Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Kun Zhang <zhangk@codeaurora.org>
-Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
+Matt Hanselman <mjhanselman@gmail.com>
+Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
}
},
'arch:ia32': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
+ 'CPPDEFINES': ['V8_TARGET_ARCH_IA32', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
* Debug message callback function.
*
* \param message the debug message handler message object
-
+ *
* A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
static bool SetDebugEventListener(v8::Handle<v8::Object> that,
Handle<Value> data = Handle<Value>());
- // Schedule a debugger break to happen when JavaScript code is run.
- static void DebugBreak();
-
- // Remove scheduled debugger break if it has not happened yet.
- static void CancelDebugBreak();
-
- // Break execution of JavaScript (this method can be invoked from a
- // non-VM thread) for further client command execution on a VM
- // thread. Client data is then passed in EventDetails to
- // EventCallback at the moment when the VM actually stops.
- static void DebugBreakForCommand(ClientData* data = NULL);
+ // Schedule a debugger break to happen when JavaScript code is run
+ // in the given isolate. If no isolate is provided the default
+ // isolate is used.
+ static void DebugBreak(Isolate* isolate = NULL);
+
+ // Remove scheduled debugger break in given isolate if it has not
+ // happened yet. If no isolate is provided the default isolate is
+ // used.
+ static void CancelDebugBreak(Isolate* isolate = NULL);
+
+ // Break execution of JavaScript in the given isolate (this method
+ // can be invoked from a non-VM thread) for further client command
+ // execution on a VM thread. Client data is then passed in
+ // EventDetails to EventCallback at the moment when the VM actually
+ // stops. If no isolate is provided the default isolate is used.
+ static void DebugBreakForCommand(ClientData* data = NULL,
+ Isolate* isolate = NULL);
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
class Arguments;
class Object;
class Heap;
-class Top;
+class HeapObject;
+class Isolate;
}
* Creates a new handle with the given value.
*/
static internal::Object** CreateHandle(internal::Object* value);
+ // Faster version, uses HeapObject to obtain the current Isolate.
+ static internal::Object** CreateHandle(internal::HeapObject* value);
private:
// Make it impossible to create heap-allocated or illegal handle
internal::Object** next;
internal::Object** limit;
int level;
-
inline void Initialize() {
next = limit = NULL;
level = 0;
void Leave();
+ internal::Isolate* isolate_;
internal::Object** prev_next_;
internal::Object** prev_limit_;
class RetainedObjectInfo;
+/**
+ * Isolate represents an isolated instance of the V8 engine. V8
+ * isolates have completely separate states. Objects from one isolate
+ * must not be used in other isolates. When V8 is initialized a
+ * default isolate is implicitly created and entered. The embedder
+ * can create additional isolates and use them in parallel in multiple
+ * threads. An isolate can be entered by at most one thread at any
+ * given time. The Locker/Unlocker API can be used to synchronize.
+ */
+class V8EXPORT Isolate {
+ public:
+ /**
+ * Stack-allocated class which sets the isolate for all operations
+ * executed within a local scope.
+ */
+ class V8EXPORT Scope {
+ public:
+ explicit Scope(Isolate* isolate) : isolate_(isolate) {
+ isolate->Enter();
+ }
+
+ ~Scope() { isolate_->Exit(); }
+
+ private:
+ Isolate* const isolate_;
+
+ // Prevent copying of Scope objects.
+ Scope(const Scope&);
+ Scope& operator=(const Scope&);
+ };
+
+ /**
+ * Creates a new isolate. Does not change the currently entered
+ * isolate.
+ *
+ * When an isolate is no longer used its resources should be freed
+ * by calling Dispose(). Using the delete operator is not allowed.
+ */
+ static Isolate* New();
+
+ /**
+ * Returns the entered isolate for the current thread or NULL in
+ * case there is no current isolate.
+ */
+ static Isolate* GetCurrent();
+
+ /**
+ * Methods below this point require holding a lock (using Locker) in
+ * a multi-threaded environment.
+ */
+
+ /**
+ * Sets this isolate as the entered one for the current thread.
+ * Saves the previously entered one (if any), so that it can be
+ * restored when exiting. Re-entering an isolate is allowed.
+ */
+ void Enter();
+
+ /**
+ * Exits this isolate by restoring the previously entered one in the
+ * current thread. The isolate may still stay the same, if it was
+ * entered more than once.
+ *
+ * Requires: this == Isolate::GetCurrent().
+ */
+ void Exit();
+
+ /**
+ * Disposes the isolate. The isolate must not be entered by any
+ * thread to be disposable.
+ */
+ void Dispose();
+
+ private:
+
+ Isolate();
+ Isolate(const Isolate&);
+ ~Isolate();
+ Isolate& operator=(const Isolate&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+};
+
+
/**
* Container class for static utility functions.
*/
static void TerminateExecution(int thread_id);
/**
- * Forcefully terminate the current thread of JavaScript execution.
+ * Forcefully terminate the current thread of JavaScript execution
+ * in the given isolate. If no isolate is provided, the default
+ * isolate is used.
*
* This method can be used by any thread even if that thread has not
* acquired the V8 lock with a Locker object.
+ *
+ * \param isolate The isolate in which to terminate the current JS execution.
*/
- static void TerminateExecution();
+ static void TerminateExecution(Isolate* isolate = NULL);
/**
* Is V8 terminating JavaScript execution.
bool capture_message_ : 1;
bool rethrow_ : 1;
- friend class v8::internal::Top;
+ friend class v8::internal::Isolate;
};
/**
* Multiple threads in V8 are allowed, but only one thread at a time
- * is allowed to use V8. The definition of 'using V8' includes
- * accessing handles or holding onto object pointers obtained from V8
- * handles. It is up to the user of V8 to ensure (perhaps with
- * locking) that this constraint is not violated.
+ * is allowed to use any given V8 isolate. See Isolate class
+ * comments. The definition of 'using V8 isolate' includes
+ * accessing handles or holding onto object pointers obtained
+ * from V8 handles while in the particular V8 isolate. It is up
+ * to the user of V8 to ensure (perhaps with locking) that this
+ * constraint is not violated.
+ *
+ * More then one thread and multiple V8 isolates can be used
+ * without any locking if each isolate is created and accessed
+ * by a single thread only. For example, one thread can use
+ * multiple isolates or multiple threads can each create and run
+ * their own isolate.
*
- * If you wish to start using V8 in a thread you can do this by constructing
- * a v8::Locker object. After the code using V8 has completed for the
- * current thread you can call the destructor. This can be combined
- * with C++ scope-based construction as follows:
+ * If you wish to start using V8 isolate in more then one thread
+ * you can do this by constructing a v8::Locker object to guard
+ * access to the isolate. After the code using V8 has completed
+ * for the current thread you can call the destructor. This can
+ * be combined with C++ scope-based construction as follows
+ * (assumes the default isolate that is used if not specified as
+ * a parameter for the Locker):
*
* \code
* ...
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
- static const int kMapInstanceTypeOffset = kApiPointerSize + kApiIntSize;
+ static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset =
InternalConstants<kApiPointerSize>::kStringResourceOffset;
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
}
+
+ static inline bool CanCastToHeapObject(void* o) { return false; }
+ static inline bool CanCastToHeapObject(Context* o) { return true; }
+ static inline bool CanCastToHeapObject(String* o) { return true; }
+ static inline bool CanCastToHeapObject(Object* o) { return true; }
+ static inline bool CanCastToHeapObject(Message* o) { return true; }
+ static inline bool CanCastToHeapObject(StackTrace* o) { return true; }
+ static inline bool CanCastToHeapObject(StackFrame* o) { return true; }
};
} // namespace internal
template <class T>
Local<T> Local<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Local<T>();
- internal::Object** p = reinterpret_cast<internal::Object**>(*that);
+ T* that_ptr = *that;
+ internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
+ if (internal::Internals::CanCastToHeapObject(that_ptr)) {
+ return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
+ reinterpret_cast<internal::HeapObject*>(*p))));
+ }
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
}
#include <stdio.h>
#include <stdlib.h>
+#include "../src/v8.h"
+// TODO(isolates):
+// o Either use V8 internal platform stuff for every platform or
+// re-implement it.
+// o Do not assume not WIN32 implies pthreads.
+#ifndef WIN32
+#include <pthread.h> // NOLINT
+#include <unistd.h> // NOLINT
+#endif
+
+static void ExitShell(int exit_code) {
+ // Use _exit instead of exit to avoid races between isolate
+ // threads and static destructors.
+ fflush(stdout);
+ fflush(stderr);
+ _exit(exit_code);
+}
+
+v8::Persistent<v8::Context> CreateShellContext();
void RunShell(v8::Handle<v8::Context> context);
bool ExecuteString(v8::Handle<v8::String> source,
v8::Handle<v8::Value> name,
void ReportException(v8::TryCatch* handler);
+#ifndef WIN32
+void* IsolateThreadEntry(void* arg);
+#endif
+
+static bool last_run = true;
+
+class SourceGroup {
+ public:
+ SourceGroup() : argv_(NULL),
+ begin_offset_(0),
+ end_offset_(0),
+ next_semaphore_(NULL),
+ done_semaphore_(NULL) {
+#ifndef WIN32
+ next_semaphore_ = v8::internal::OS::CreateSemaphore(0);
+ done_semaphore_ = v8::internal::OS::CreateSemaphore(0);
+ thread_ = 0;
+#endif
+ }
+
+ void Begin(char** argv, int offset) {
+ argv_ = const_cast<const char**>(argv);
+ begin_offset_ = offset;
+ }
+
+ void End(int offset) { end_offset_ = offset; }
+
+ void Execute() {
+ for (int i = begin_offset_; i < end_offset_; ++i) {
+ const char* arg = argv_[i];
+ if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
+ // Execute argument given to -e option directly.
+ v8::HandleScope handle_scope;
+ v8::Handle<v8::String> file_name = v8::String::New("unnamed");
+ v8::Handle<v8::String> source = v8::String::New(argv_[i + 1]);
+ if (!ExecuteString(source, file_name, false, true)) {
+ ExitShell(1);
+ return;
+ }
+ ++i;
+ } else if (arg[0] == '-') {
+ // Ignore other options. They have been parsed already.
+ } else {
+ // Use all other arguments as names of files to load and run.
+ v8::HandleScope handle_scope;
+ v8::Handle<v8::String> file_name = v8::String::New(arg);
+ v8::Handle<v8::String> source = ReadFile(arg);
+ if (source.IsEmpty()) {
+ printf("Error reading '%s'\n", arg);
+ }
+ if (!ExecuteString(source, file_name, false, true)) {
+ ExitShell(1);
+ return;
+ }
+ }
+ }
+ }
+
+#ifdef WIN32
+ void StartExecuteInThread() { ExecuteInThread(); }
+ void WaitForThread() {}
+
+#else
+ void StartExecuteInThread() {
+ if (thread_ == 0) {
+ pthread_attr_t attr;
+ // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
+ // which is not enough to parse the big literal expressions used in tests.
+ // The stack size should be at least StackGuard::kLimitSize + some
+ // OS-specific padding for thread startup code.
+ size_t stacksize = 2 << 20; // 2 Mb seems to be enough
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, stacksize);
+ int error = pthread_create(&thread_, &attr, &IsolateThreadEntry, this);
+ if (error != 0) {
+ fprintf(stderr, "Error creating isolate thread.\n");
+ ExitShell(1);
+ }
+ }
+ next_semaphore_->Signal();
+ }
+
+ void WaitForThread() {
+ if (thread_ == 0) return;
+ if (last_run) {
+ pthread_join(thread_, NULL);
+ thread_ = 0;
+ } else {
+ done_semaphore_->Wait();
+ }
+ }
+#endif // WIN32
+
+ private:
+ void ExecuteInThread() {
+ v8::Isolate* isolate = v8::Isolate::New();
+ do {
+ if (next_semaphore_ != NULL) next_semaphore_->Wait();
+ {
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope;
+ v8::Persistent<v8::Context> context = CreateShellContext();
+ {
+ v8::Context::Scope cscope(context);
+ Execute();
+ }
+ context.Dispose();
+ }
+ if (done_semaphore_ != NULL) done_semaphore_->Signal();
+ } while (!last_run);
+ isolate->Dispose();
+ }
+
+ const char** argv_;
+ int begin_offset_;
+ int end_offset_;
+ v8::internal::Semaphore* next_semaphore_;
+ v8::internal::Semaphore* done_semaphore_;
+#ifndef WIN32
+ pthread_t thread_;
+#endif
+
+ friend void* IsolateThreadEntry(void* arg);
+};
+
+#ifndef WIN32
+void* IsolateThreadEntry(void* arg) {
+ reinterpret_cast<SourceGroup*>(arg)->ExecuteInThread();
+ return NULL;
+}
+#endif
+
+
+static SourceGroup* isolate_sources = NULL;
+
+
int RunMain(int argc, char* argv[]) {
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::HandleScope handle_scope;
- // Create a template for the global object.
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
- // Bind the global 'print' function to the C++ Print callback.
- global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
- // Bind the global 'read' function to the C++ Read callback.
- global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
- // Bind the global 'load' function to the C++ Load callback.
- global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
- // Bind the 'quit' function
- global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
- // Bind the 'version' function
- global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
- // Create a new execution environment containing the built-in
- // functions
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Persistent<v8::Context> context = CreateShellContext();
+ // Enter the newly created execution environment.
+ context->Enter();
if (context.IsEmpty()) {
printf("Error creating context\n");
return 1;
}
bool run_shell = (argc == 1);
+ int num_isolates = 1;
for (int i = 1; i < argc; i++) {
- // Enter the execution environment before evaluating any code.
- v8::Context::Scope context_scope(context);
- const char* str = argv[i];
- if (strcmp(str, "--shell") == 0) {
- run_shell = true;
- } else if (strcmp(str, "-f") == 0) {
- // Ignore any -f flags for compatibility with the other stand-
- // alone JavaScript engines.
- continue;
- } else if (strncmp(str, "--", 2) == 0) {
- printf("Warning: unknown flag %s.\nTry --help for options\n", str);
- } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
- // Execute argument given to -e option directly
- v8::HandleScope handle_scope;
- v8::Handle<v8::String> file_name = v8::String::New("unnamed");
- v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
- if (!ExecuteString(source, file_name, false, true))
- return 1;
- i++;
- } else {
- // Use all other arguments as names of files to load and run.
- v8::HandleScope handle_scope;
- v8::Handle<v8::String> file_name = v8::String::New(str);
- v8::Handle<v8::String> source = ReadFile(str);
- if (source.IsEmpty()) {
- printf("Error reading '%s'\n", str);
- return 1;
+ if (strcmp(argv[i], "--isolate") == 0) ++num_isolates;
+ }
+ if (isolate_sources == NULL) {
+ isolate_sources = new SourceGroup[num_isolates];
+ SourceGroup* current = isolate_sources;
+ current->Begin(argv, 1);
+ for (int i = 1; i < argc; i++) {
+ const char* str = argv[i];
+ if (strcmp(str, "--isolate") == 0) {
+ current->End(i);
+ current++;
+ current->Begin(argv, i + 1);
+ } else if (strcmp(str, "--shell") == 0) {
+ run_shell = true;
+ } else if (strcmp(str, "-f") == 0) {
+ // Ignore any -f flags for compatibility with the other stand-
+ // alone JavaScript engines.
+ continue;
+ } else if (strncmp(str, "--", 2) == 0) {
+ printf("Warning: unknown flag %s.\nTry --help for options\n", str);
}
- if (!ExecuteString(source, file_name, false, true))
- return 1;
}
+ current->End(argc);
}
+ for (int i = 1; i < num_isolates; ++i) {
+ isolate_sources[i].StartExecuteInThread();
+ }
+ isolate_sources[0].Execute();
if (run_shell) RunShell(context);
+ for (int i = 1; i < num_isolates; ++i) {
+ isolate_sources[i].WaitForThread();
+ }
+ if (last_run) {
+ delete[] isolate_sources;
+ isolate_sources = NULL;
+ }
+ context->Exit();
context.Dispose();
return 0;
}
printf("============ Stress %d/%d ============\n",
i + 1, stress_runs);
v8::Testing::PrepareStressRun(i);
+ last_run = (i == stress_runs - 1);
result = RunMain(argc, argv);
}
printf("======== Full Deoptimization =======\n");
}
+// Creates a new execution environment containing the built-in
+// functions.
+v8::Persistent<v8::Context> CreateShellContext() {
+ // Create a template for the global object.
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+ // Bind the global 'print' function to the C++ Print callback.
+ global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+ // Bind the global 'read' function to the C++ Read callback.
+ global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
+ // Bind the global 'load' function to the C++ Load callback.
+ global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
+ // Bind the 'quit' function
+ global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
+ // Bind the 'version' function
+ global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
+ return v8::Context::New(NULL, global);
+}
+
+
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
// If not arguments are given args[0] will yield undefined which
// converts to the integer value 0.
int exit_code = args[0]->Int32Value();
- exit(exit_code);
+ ExitShell(exit_code);
return v8::Undefined();
}
ic.cc
inspector.cc
interpreter-irregexp.cc
+ isolate.cc
jsregexp.cc
jump-target.cc
lithium-allocator.cc
#include "factory.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
-#include "top.h"
namespace v8 {
namespace internal {
template <class C>
static C* FindInPrototypeChain(Object* obj, bool* found_it) {
ASSERT(!*found_it);
+ Heap* heap = HEAP;
while (!Is<C>(obj)) {
- if (obj == Heap::null_value()) return NULL;
+ if (obj == heap->null_value()) return NULL;
obj = obj->GetPrototype();
}
*found_it = true;
Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(
- Top::context()->global_context()->number_function()->has_initial_map());
- Map* number_map =
- Top::context()->global_context()->number_function()->initial_map();
+ ASSERT(Isolate::Current()->context()->global_context()->number_function()->
+ has_initial_map());
+ Map* number_map = Isolate::Current()->context()->global_context()->
+ number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
// This means one of the object's prototypes is a JSArray and
// the object does not have a 'length' property.
// Calling SetProperty causes an infinite loop.
- return object->SetLocalPropertyIgnoreAttributes(Heap::length_symbol(),
+ return object->SetLocalPropertyIgnoreAttributes(HEAP->length_symbol(),
value, NONE);
}
}
- return Top::Throw(*Factory::NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ return Isolate::Current()->Throw(
+ *FACTORY->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
}
ASSERT(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS.
- ASSERT(*line_ends == Heap::empty_fixed_array() ||
- line_ends->map() == Heap::fixed_cow_array_map());
- Handle<JSArray> js_array = Factory::NewJSArrayWithElements(line_ends);
+ ASSERT(*line_ends == HEAP->empty_fixed_array() ||
+ line_ends->map() == HEAP->fixed_cow_array_map());
+ Handle<JSArray> js_array = FACTORY->NewJSArrayWithElements(line_ends);
return *js_array;
}
return *GetScriptWrapper(eval_from_script);
}
}
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// If this is not a script compiled through eval there is no eval position.
int compilation_type = Smi::cast(script->compilation_type())->value();
if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// Get the function from where eval was called and find the source position
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
+ if (!found_it) return HEAP->undefined_value();
while (!function->should_have_prototype()) {
found_it = false;
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
if (!function->has_prototype()) {
Object* prototype;
- { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
+ { MaybeObject* maybe_prototype = HEAP->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
void*) {
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
+ if (!found_it) return HEAP->undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
+ return object->SetLocalPropertyIgnoreAttributes(HEAP->prototype_symbol(),
value,
NONE);
}
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
+ if (!found_it) return HEAP->undefined_value();
return holder->shared()->name();
}
if (Smi::IsValid(value)) {
return Handle<Object>(Smi::FromInt(value));
} else {
- return Factory::NewNumberFromInt(value);
+ return Isolate::Current()->factory()->NewNumberFromInt(value);
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
- return Factory::NewNumber(value);
+ return Isolate::Current()->factory()->NewNumber(value);
}
case LITERAL:
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
+ Factory* factory = Isolate::Current()->factory();
int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count);
ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
Handle<JSObject> arguments =
- Factory::NewArgumentsObject(inlined_function, args_count);
- Handle<FixedArray> array = Factory::NewFixedArray(args_count);
+ factory->NewArgumentsObject(inlined_function, args_count);
+ Handle<FixedArray> array = factory->NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue();
array->set(i, *value);
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
- Handle<JSFunction> function(holder);
+ if (!found_it) return isolate->heap()->undefined_value();
+ Handle<JSFunction> function(holder, isolate);
// Find the top invocation of the function by traversing frames.
List<JSFunction*> functions(2);
if (!frame->is_optimized()) {
// If there is an arguments variable in the stack, we return that.
Handle<SerializedScopeInfo> info(function->shared()->scope_info());
- int index = info->StackSlotIndex(Heap::arguments_symbol());
+ int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index));
+ Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
}
}
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
- length);
- Handle<FixedArray> array = Factory::NewFixedArray(length);
+ Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
+ function, length);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
}
// No frame corresponding to the given function found. Return null.
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
AssertNoAllocation no_alloc;
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
- Handle<JSFunction> function(holder);
+ if (!found_it) return isolate->heap()->undefined_value();
+ Handle<JSFunction> function(holder, isolate);
List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
}
ASSERT(functions.length() == 1);
}
- if (it.done()) return Heap::null_value();
+ if (it.done()) return isolate->heap()->null_value();
break;
}
}
}
// No frame corresponding to the given function found. Return null.
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_INL_H_
+#define V8_ALLOCATION_INL_H_
+
+#include "allocation.h"
+
+namespace v8 {
+namespace internal {
+
+
+void* PreallocatedStorage::New(size_t size) {
+ return Isolate::Current()->PreallocatedStorageNew(size);
+}
+
+
+void PreallocatedStorage::Delete(void* p) {
+ return Isolate::Current()->PreallocatedStorageDelete(p);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_INL_H_
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
+#include "v8.h"
+#include "isolate.h"
+#include "allocation.h"
+/* TODO(isolates): this is what's included in bleeding_edge
+ including of v8.h was replaced with these in
+ http://codereview.chromium.org/5005001/
+ we need Isolate and Isolate needs a lot more so I'm including v8.h back.
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
#include "allocation.h"
#include "utils.h"
+*/
namespace v8 {
namespace internal {
+#ifdef DEBUG
+
+NativeAllocationChecker::NativeAllocationChecker(
+ NativeAllocationChecker::NativeAllocationAllowed allowed)
+ : allowed_(allowed) {
+ if (allowed == DISALLOW) {
+ Isolate* isolate = Isolate::Current();
+ isolate->set_allocation_disallowed(isolate->allocation_disallowed() + 1);
+ }
+}
+
+
+NativeAllocationChecker::~NativeAllocationChecker() {
+ Isolate* isolate = Isolate::Current();
+ if (allowed_ == DISALLOW) {
+ isolate->set_allocation_disallowed(isolate->allocation_disallowed() - 1);
+ }
+ ASSERT(isolate->allocation_disallowed() >= 0);
+}
+
+
+bool NativeAllocationChecker::allocation_allowed() {
+ // TODO(isolates): either find a way to make this work that doesn't
+ // require initializing an isolate before we can use malloc or drop
+ // it completely.
+ return true;
+ // return Isolate::Current()->allocation_disallowed() == 0;
+}
+
+#endif // DEBUG
+
+
void* Malloced::New(size_t size) {
ASSERT(NativeAllocationChecker::allocation_allowed());
void* result = malloc(size);
}
-int NativeAllocationChecker::allocation_disallowed_ = 0;
-
-
-PreallocatedStorage PreallocatedStorage::in_use_list_(0);
-PreallocatedStorage PreallocatedStorage::free_list_(0);
-bool PreallocatedStorage::preallocated_ = false;
-
-
-void PreallocatedStorage::Init(size_t size) {
+void Isolate::PreallocatedStorageInit(size_t size) {
ASSERT(free_list_.next_ == &free_list_);
ASSERT(free_list_.previous_ == &free_list_);
PreallocatedStorage* free_chunk =
free_list_.next_ = free_list_.previous_ = free_chunk;
free_chunk->next_ = free_chunk->previous_ = &free_list_;
free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_ = true;
+ preallocated_storage_preallocated_ = true;
}
-void* PreallocatedStorage::New(size_t size) {
- if (!preallocated_) {
+void* Isolate::PreallocatedStorageNew(size_t size) {
+ if (!preallocated_storage_preallocated_) {
return FreeStoreAllocationPolicy::New(size);
}
ASSERT(free_list_.next_ != &free_list_);
// We don't attempt to coalesce.
-void PreallocatedStorage::Delete(void* p) {
+void Isolate::PreallocatedStorageDelete(void* p) {
if (p == NULL) {
return;
}
- if (!preallocated_) {
+ if (!preallocated_storage_preallocated_) {
FreeStoreAllocationPolicy::Delete(p);
return;
}
// the C++ heap only!
class NativeAllocationChecker {
public:
- typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
- explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
- : allowed_(allowed) {
+ enum NativeAllocationAllowed { ALLOW, DISALLOW };
#ifdef DEBUG
- if (allowed == DISALLOW) {
- allocation_disallowed_++;
- }
-#endif
- }
- ~NativeAllocationChecker() {
-#ifdef DEBUG
- if (allowed_ == DISALLOW) {
- allocation_disallowed_--;
- }
-#endif
- ASSERT(allocation_disallowed_ >= 0);
- }
- static inline bool allocation_allowed() {
- return allocation_disallowed_ == 0;
- }
+ explicit NativeAllocationChecker(NativeAllocationAllowed allowed);
+ ~NativeAllocationChecker();
+ static bool allocation_allowed();
private:
- // This static counter ensures that NativeAllocationCheckers can be nested.
- static int allocation_disallowed_;
// This flag applies to this particular instance.
NativeAllocationAllowed allowed_;
+#else
+ explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed) {}
+ static inline bool allocation_allowed() { return true; }
+#endif
};
// Allocation policy for allocating in preallocated space.
// Used as an allocation policy for ScopeInfo when generating
// stack traces.
-class PreallocatedStorage : public AllStatic {
+class PreallocatedStorage {
public:
explicit PreallocatedStorage(size_t size);
size_t size() { return size_; }
- static void* New(size_t size);
- static void Delete(void* p);
- // Preallocate a set number of bytes.
- static void Init(size_t size);
+ // TODO(isolates): Get rid of these-- we'll have to change the allocator
+ // interface to include a pointer to an isolate to do this
+ // efficiently.
+ static inline void* New(size_t size);
+ static inline void Delete(void* p);
private:
size_t size_;
PreallocatedStorage* previous_;
PreallocatedStorage* next_;
- static bool preallocated_;
-
- static PreallocatedStorage in_use_list_;
- static PreallocatedStorage free_list_;
void LinkTo(PreallocatedStorage* other);
void Unlink();
+
+ friend class Isolate;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
};
#include "runtime-profiler.h"
#include "serialize.h"
#include "snapshot.h"
-#include "top.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
-#define LOG_API(expr) LOG(ApiEntryCall(expr))
+#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
+// TODO(isolates): avoid repeated TLS reads in function prologues.
#ifdef ENABLE_VMSTATE_TRACKING
-#define ENTER_V8 ASSERT(i::V8::IsRunning()); i::VMState __state__(i::OTHER)
-#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
+#define ENTER_V8 \
+ ASSERT(i::Isolate::Current()->IsInitialized()); \
+ i::VMState __state__(i::Isolate::Current(), i::OTHER)
+#define LEAVE_V8 \
+ i::VMState __state__(i::Isolate::Current(), i::EXTERNAL)
#else
#define ENTER_V8 ((void) 0)
#define LEAVE_V8 ((void) 0)
namespace v8 {
-#define ON_BAILOUT(location, code) \
- if (IsDeadCheck(location) || v8::V8::IsExecutionTerminating()) { \
+#define ON_BAILOUT(isolate, location, code) \
+ if (IsDeadCheck(isolate, location) || \
+ v8::V8::IsExecutionTerminating()) { \
code; \
UNREACHABLE(); \
}
-#define EXCEPTION_PREAMBLE() \
- thread_local.IncrementCallDepth(); \
- ASSERT(!i::Top::external_caught_exception()); \
+#define EXCEPTION_PREAMBLE() \
+ i::Isolate::Current()->handle_scope_implementer()->IncrementCallDepth(); \
+ ASSERT(!i::Isolate::Current()->external_caught_exception()); \
bool has_pending_exception = false
#define EXCEPTION_BAILOUT_CHECK(value) \
do { \
- thread_local.DecrementCallDepth(); \
+ i::HandleScopeImplementer* handle_scope_implementer = \
+ isolate->handle_scope_implementer(); \
+ handle_scope_implementer->DecrementCallDepth(); \
if (has_pending_exception) { \
- if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
- if (!thread_local.ignore_out_of_memory()) \
+ if (handle_scope_implementer->CallDepthIsZero() && \
+ i::Isolate::Current()->is_out_of_memory()) { \
+ if (!handle_scope_implementer->ignore_out_of_memory()) \
i::V8::FatalProcessOutOfMemory(NULL); \
} \
- bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
- i::Top::OptionalRescheduleException(call_depth_is_zero); \
+ bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
+ i::Isolate::Current()->OptionalRescheduleException(call_depth_is_zero); \
return value; \
} \
} while (false)
+// TODO(isolates): Add a parameter to this macro for an isolate.
#define API_ENTRY_CHECK(msg) \
do { \
if (v8::Locker::IsActive()) { \
- ApiCheck(i::ThreadManager::IsLockedByCurrentThread(), \
+ ApiCheck(i::Isolate::Current()->thread_manager()-> \
+ IsLockedByCurrentThread(), \
msg, \
"Entering the V8 API without proper locking in place"); \
} \
} while (false)
-// --- D a t a t h a t i s s p e c i f i c t o a t h r e a d ---
-
-
-static i::HandleScopeImplementer thread_local;
-
-
// --- E x c e p t i o n B e h a v i o r ---
-static FatalErrorCallback exception_behavior = NULL;
-
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
#ifdef ENABLE_VMSTATE_TRACKING
- i::VMState __state__(i::OTHER);
+ i::VMState __state__(i::Isolate::Current(), i::OTHER);
#endif
API_Fatal(location, message);
}
-static FatalErrorCallback& GetFatalErrorHandler() {
- if (exception_behavior == NULL) {
- exception_behavior = DefaultFatalErrorHandler;
+static FatalErrorCallback GetFatalErrorHandler() {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->exception_behavior() == NULL) {
+ isolate->set_exception_behavior(DefaultFatalErrorHandler);
}
- return exception_behavior;
+ return isolate->exception_behavior();
}
heap_stats.os_error = &os_error;
int end_marker;
heap_stats.end_marker = &end_marker;
- i::Heap::RecordStats(&heap_stats, take_snapshot);
+ HEAP->RecordStats(&heap_stats, take_snapshot);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
}
-void V8::SetFatalErrorHandler(FatalErrorCallback that) {
- exception_behavior = that;
-}
-
-
bool Utils::ReportApiFailure(const char* location, const char* message) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, message);
* advantage over ON_BAILOUT that it actually initializes the VM if this has not
* yet been done.
*/
-static inline bool IsDeadCheck(const char* location) {
- return !i::V8::IsRunning()
+static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
+ return !isolate->IsInitialized()
&& i::V8::IsDead() ? ReportV8Dead(location) : false;
}
// --- S t a t i c s ---
-static i::StringInputBuffer write_input_buffer;
+static bool InitializeHelper() {
+ if (i::Snapshot::Initialize()) return true;
+ return i::V8::Initialize(NULL);
+}
-static inline bool EnsureInitialized(const char* location) {
- if (i::V8::IsRunning()) {
- return true;
+static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
+ const char* location) {
+ if (IsDeadCheck(isolate, location)) return false;
+ if (isolate != NULL) {
+ if (isolate->IsInitialized()) return true;
}
- if (IsDeadCheck(location)) {
- return false;
- }
- return ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
+ return ApiCheck(InitializeHelper(), location, "Error initializing V8");
}
+static inline bool EnsureInitialized(const char* location) {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ return EnsureInitializedForIsolate(isolate, location);
+}
-ImplementationUtilities::HandleScopeData*
- ImplementationUtilities::CurrentHandleScope() {
- return &i::HandleScope::current_;
+// Some initializing API functions are called early and may be
+// called on a thread different from static initializer thread.
+// If Isolate API is used, Isolate::Enter() will initialize TLS so
+// Isolate::Current() works. If it's a legacy case, then the thread
+// may not have TLS initialized yet. However, in initializing APIs it
+// may be too early to call EnsureInitialized() - some pre-init
+// parameters still have to be configured.
+static inline i::Isolate* EnterIsolateIfNeeded() {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate != NULL)
+ return isolate;
+
+ i::Isolate::EnterDefaultIsolate();
+ isolate = i::Isolate::Current();
+ return isolate;
+}
+
+
+void V8::SetFatalErrorHandler(FatalErrorCallback that) {
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+ isolate->set_exception_behavior(that);
}
v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
if (!EnsureInitialized("v8::Undefined()")) return v8::Handle<v8::Primitive>();
- return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
+ return v8::Handle<Primitive>(ToApi<Primitive>(FACTORY->undefined_value()));
}
v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
- if (!EnsureInitialized("v8::Null()")) return v8::Handle<v8::Primitive>();
- return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (!EnsureInitializedForIsolate(isolate, "v8::Null()"))
+ return v8::Handle<v8::Primitive>();
+ return v8::Handle<Primitive>(
+ ToApi<Primitive>(isolate->factory()->null_value()));
}
v8::Handle<v8::Boolean> ImplementationUtilities::True() {
if (!EnsureInitialized("v8::True()")) return v8::Handle<v8::Boolean>();
- return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
+ return v8::Handle<v8::Boolean>(ToApi<Boolean>(FACTORY->true_value()));
}
v8::Handle<v8::Boolean> ImplementationUtilities::False() {
if (!EnsureInitialized("v8::False()")) return v8::Handle<v8::Boolean>();
- return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
+ return v8::Handle<v8::Boolean>(ToApi<Boolean>(FACTORY->false_value()));
}
-
void V8::SetFlagsFromString(const char* str, int length) {
i::FlagList::SetFlagsFromString(str, length);
}
v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- if (IsDeadCheck("v8::ThrowException()")) return v8::Handle<Value>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ThrowException()")) {
+ return v8::Handle<Value>();
+ }
ENTER_V8;
// If we're passed an empty handle, we throw an undefined exception
// to deal more gracefully with out of memory situations.
if (value.IsEmpty()) {
- i::Top::ScheduleThrow(i::Heap::undefined_value());
+ isolate->ScheduleThrow(HEAP->undefined_value());
} else {
- i::Top::ScheduleThrow(*Utils::OpenHandle(*value));
+ isolate->ScheduleThrow(*Utils::OpenHandle(*value));
}
return v8::Undefined();
}
void RegisteredExtension::Register(RegisteredExtension* that) {
- that->next_ = RegisteredExtension::first_extension_;
- RegisteredExtension::first_extension_ = that;
+ that->next_ = first_extension_;
+ first_extension_ = that;
}
v8::Handle<Primitive> Undefined() {
- LOG_API("Undefined");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "Undefined");
return ImplementationUtilities::Undefined();
}
v8::Handle<Primitive> Null() {
- LOG_API("Null");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "Null");
return ImplementationUtilities::Null();
}
v8::Handle<Boolean> True() {
- LOG_API("True");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "True");
return ImplementationUtilities::True();
}
v8::Handle<Boolean> False() {
- LOG_API("False");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "False");
return ImplementationUtilities::False();
}
bool SetResourceConstraints(ResourceConstraints* constraints) {
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+
int young_space_size = constraints->max_young_space_size();
int old_gen_size = constraints->max_old_space_size();
int max_executable_size = constraints->max_executable_size();
if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
- bool result = i::Heap::ConfigureHeap(young_space_size / 2,
- old_gen_size,
- max_executable_size);
+ // After initialization it's too late to change Heap constraints.
+ ASSERT(!isolate->IsInitialized());
+ bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
+ old_gen_size,
+ max_executable_size);
if (!result) return false;
}
if (constraints->stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
- i::StackGuard::SetStackLimit(limit);
+ isolate->stack_guard()->SetStackLimit(limit);
}
return true;
}
i::Object** V8::GlobalizeReference(i::Object** obj) {
- if (IsDeadCheck("V8::Persistent::New")) return NULL;
- LOG_API("Persistent::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
+ LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result =
- i::GlobalHandles::Create(*obj);
+ isolate->global_handles()->Create(*obj);
return result.location();
}
void V8::MakeWeak(i::Object** object, void* parameters,
WeakReferenceCallback callback) {
- LOG_API("MakeWeak");
- i::GlobalHandles::MakeWeak(object, parameters, callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "MakeWeak");
+ isolate->global_handles()->MakeWeak(object, parameters,
+ callback);
}
void V8::ClearWeak(i::Object** obj) {
- LOG_API("ClearWeak");
- i::GlobalHandles::ClearWeakness(obj);
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "ClearWeak");
+ isolate->global_handles()->ClearWeakness(obj);
}
bool V8::IsGlobalNearDeath(i::Object** obj) {
- LOG_API("IsGlobalNearDeath");
- if (!i::V8::IsRunning()) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "IsGlobalNearDeath");
+ if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsNearDeath(obj);
}
bool V8::IsGlobalWeak(i::Object** obj) {
- LOG_API("IsGlobalWeak");
- if (!i::V8::IsRunning()) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "IsGlobalWeak");
+ if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsWeak(obj);
}
void V8::DisposeGlobal(i::Object** obj) {
- LOG_API("DisposeGlobal");
- if (!i::V8::IsRunning()) return;
- i::GlobalHandles::Destroy(obj);
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "DisposeGlobal");
+ if (!isolate->IsInitialized()) return;
+ isolate->global_handles()->Destroy(obj);
}
// --- H a n d l e s ---
-HandleScope::HandleScope()
- : prev_next_(i::HandleScope::current_.next),
- prev_limit_(i::HandleScope::current_.limit),
- is_closed_(false) {
+HandleScope::HandleScope() {
API_ENTRY_CHECK("HandleScope::HandleScope");
- i::HandleScope::current_.level++;
+ i::Isolate* isolate = i::Isolate::Current();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ is_closed_ = false;
+ current->level++;
}
void HandleScope::Leave() {
- i::HandleScope::current_.level--;
- ASSERT(i::HandleScope::current_.level >= 0);
- i::HandleScope::current_.next = prev_next_;
- if (i::HandleScope::current_.limit != prev_limit_) {
- i::HandleScope::current_.limit = prev_limit_;
- i::HandleScope::DeleteExtensions();
+ ASSERT(isolate_ == i::Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ current->level--;
+ ASSERT(current->level >= 0);
+ current->next = prev_next_;
+ if (current->limit != prev_limit_) {
+ current->limit = prev_limit_;
+ i::HandleScope::DeleteExtensions(isolate_);
}
#ifdef DEBUG
int HandleScope::NumberOfHandles() {
+ EnsureInitialized("HandleScope::NumberOfHandles");
return i::HandleScope::NumberOfHandles();
}
-i::Object** v8::HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(value);
+i::Object** HandleScope::CreateHandle(i::Object* value) {
+ return i::HandleScope::CreateHandle(value, i::Isolate::Current());
+}
+
+
+i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
+ ASSERT(value->IsHeapObject());
+ return reinterpret_cast<i::Object**>(
+ i::HandleScope::CreateHandle(value, value->GetIsolate()));
}
void Context::Enter() {
- if (IsDeadCheck("v8::Context::Enter()")) return;
+ // TODO(isolates): Context should have a pointer to isolate.
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
ENTER_V8;
i::Handle<i::Context> env = Utils::OpenHandle(this);
- thread_local.EnterContext(env);
+ isolate->handle_scope_implementer()->EnterContext(env);
- thread_local.SaveContext(i::Top::context());
- i::Top::set_context(*env);
+ isolate->handle_scope_implementer()->SaveContext(isolate->context());
+ isolate->set_context(*env);
}
void Context::Exit() {
- if (!i::V8::IsRunning()) return;
- if (!ApiCheck(thread_local.LeaveLastContext(),
+ // TODO(isolates): Context should have a pointer to isolate.
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return;
+
+ if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
// Content of 'last_context' could be NULL.
- i::Context* last_context = thread_local.RestoreContext();
- i::Top::set_context(last_context);
+ i::Context* last_context =
+ isolate->handle_scope_implementer()->RestoreContext();
+ isolate->set_context(last_context);
}
void Context::SetData(v8::Handle<String> data) {
- if (IsDeadCheck("v8::Context::SetData()")) return;
+ // TODO(isolates): Context should have a pointer to isolate.
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
ENTER_V8;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
ASSERT(env->IsGlobalContext());
v8::Local<v8::Value> Context::GetData() {
- if (IsDeadCheck("v8::Context::GetData()")) return v8::Local<Value>();
+ // TODO(isolates): Context should have a pointer to isolate.
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
+ return v8::Local<Value>();
+ }
ENTER_V8;
i::Object* raw_result = NULL;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
"Local scope has already been closed")) {
return 0;
}
- LOG_API("CloseHandleScope");
+ LOG_API(isolate_, "CloseHandleScope");
// Read the result before popping the handle block.
i::Object* result = NULL;
NeanderObject::NeanderObject(int size) {
EnsureInitialized("v8::Nowhere");
ENTER_V8;
- value_ = i::Factory::NewNeanderObject();
- i::Handle<i::FixedArray> elements = i::Factory::NewFixedArray(size);
+ value_ = FACTORY->NewNeanderObject();
+ i::Handle<i::FixedArray> elements = FACTORY->NewFixedArray(size);
value_->set_elements(*elements);
}
int length = this->length();
int size = obj_.size();
if (length == size - 1) {
- i::Handle<i::FixedArray> new_elms = i::Factory::NewFixedArray(2 * size);
+ i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
for (int i = 0; i < length; i++)
new_elms->set(i + 1, get(i));
obj_.value()->set_elements(*new_elms);
void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
- if (IsDeadCheck("v8::Template::Set()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
if (list->IsUndefined()) {
list = NeanderArray().value();
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- if (IsDeadCheck("v8::FunctionTemplate::PrototypeTemplate()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
return Local<ObjectTemplate>();
}
ENTER_V8;
void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
- if (IsDeadCheck("v8::FunctionTemplate::Inherit()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
ENTER_V8;
Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
}
-// To distinguish the function templates, so that we can find them in the
-// function cache of the global context.
-static int next_serial_number = 0;
-
-
Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
v8::Handle<Value> data, v8::Handle<Signature> signature) {
- EnsureInitialized("v8::FunctionTemplate::New()");
- LOG_API("FunctionTemplate::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
+ LOG_API(isolate, "FunctionTemplate::New");
ENTER_V8;
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
+ FACTORY->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
- obj->set_serial_number(i::Smi::FromInt(next_serial_number++));
+ int next_serial_number = isolate->next_serial_number();
+ isolate->set_next_serial_number(next_serial_number + 1);
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
Utils::ToLocal(obj)->SetCallHandler(callback, data);
Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
int argc, Handle<FunctionTemplate> argv[]) {
- EnsureInitialized("v8::Signature::New()");
- LOG_API("Signature::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Signature::New()");
+ LOG_API(isolate, "Signature::New");
ENTER_V8;
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::SIGNATURE_INFO_TYPE);
+ FACTORY->NewStruct(i::SIGNATURE_INFO_TYPE);
i::Handle<i::SignatureInfo> obj =
i::Handle<i::SignatureInfo>::cast(struct_obj);
if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
if (argc > 0) {
- i::Handle<i::FixedArray> args = i::Factory::NewFixedArray(argc);
+ i::Handle<i::FixedArray> args = FACTORY->NewFixedArray(argc);
for (int i = 0; i < argc; i++) {
if (!argv[i].IsEmpty())
args->set(i, *Utils::OpenHandle(*argv[i]));
Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
- EnsureInitialized("v8::TypeSwitch::New()");
- LOG_API("TypeSwitch::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::TypeSwitch::New()");
+ LOG_API(isolate, "TypeSwitch::New");
ENTER_V8;
- i::Handle<i::FixedArray> vector = i::Factory::NewFixedArray(argc);
+ i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
for (int i = 0; i < argc; i++)
vector->set(i, *Utils::OpenHandle(*types[i]));
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::TYPE_SWITCH_INFO_TYPE);
+ isolate->factory()->NewStruct(i::TYPE_SWITCH_INFO_TYPE);
i::Handle<i::TypeSwitchInfo> obj =
i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
obj->set_types(*vector);
int TypeSwitch::match(v8::Handle<Value> value) {
- LOG_API("TypeSwitch::match");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "TypeSwitch::match");
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
i::FixedArray* types = i::FixedArray::cast(info->types());
void FunctionTemplate::SetCallHandler(InvocationCallback callback,
v8::Handle<Value> data) {
- if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ FACTORY->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
- i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+ i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
- if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
+ "v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
return;
}
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
getter, setter, data,
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
- if (IsDeadCheck("v8::FunctionTemplate::InstanceTemplate()")
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
|| EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
return Local<ObjectTemplate>();
ENTER_V8;
void FunctionTemplate::SetClassName(Handle<String> name) {
- if (IsDeadCheck("v8::FunctionTemplate::SetClassName()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
ENTER_V8;
Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
}
void FunctionTemplate::SetHiddenPrototype(bool value) {
- if (IsDeadCheck("v8::FunctionTemplate::SetHiddenPrototype()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
+ return;
+ }
ENTER_V8;
Utils::OpenHandle(this)->set_hidden_prototype(value);
}
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck("v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
+ "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
return;
}
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
IndexedPropertyDeleter remover,
IndexedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck(
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
"v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
return;
}
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
void FunctionTemplate::SetInstanceCallAsFunctionHandler(
InvocationCallback callback,
Handle<Value> data) {
- if (IsDeadCheck("v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
+ "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
return;
}
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
Local<ObjectTemplate> ObjectTemplate::New(
v8::Handle<FunctionTemplate> constructor) {
- if (IsDeadCheck("v8::ObjectTemplate::New()")) return Local<ObjectTemplate>();
- EnsureInitialized("v8::ObjectTemplate::New()");
- LOG_API("ObjectTemplate::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
+ return Local<ObjectTemplate>();
+ }
+ EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
+ LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8;
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
+ FACTORY->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
i::Handle<i::ObjectTemplateInfo> obj =
i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attribute) {
- if (IsDeadCheck("v8::ObjectTemplate::SetAccessor()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
+ return;
+ }
ENTER_V8;
HandleScope scope;
EnsureConstructor(this);
void ObjectTemplate::MarkAsUndetectable() {
- if (IsDeadCheck("v8::ObjectTemplate::MarkAsUndetectable()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
IndexedSecurityCallback indexed_callback,
Handle<Value> data,
bool turned_on_by_default) {
- if (IsDeadCheck("v8::ObjectTemplate::SetAccessCheckCallbacks()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
+ return;
+ }
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::Handle<i::Struct> struct_info =
- i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ FACTORY->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
IndexedPropertyDeleter remover,
IndexedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck("v8::ObjectTemplate::SetIndexedPropertyHandler()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
+ return;
+ }
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
Handle<Value> data) {
- if (IsDeadCheck("v8::ObjectTemplate::SetCallAsFunctionHandler()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
+ "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
+ return;
+ }
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck("v8::ObjectTemplate::InternalFieldCount()")) {
+ if (IsDeadCheck(i::Isolate::Current(),
+ "v8::ObjectTemplate::InternalFieldCount()")) {
return 0;
}
return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
void ObjectTemplate::SetInternalFieldCount(int value) {
- if (IsDeadCheck("v8::ObjectTemplate::SetInternalFieldCount()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
+ return;
+ }
if (!ApiCheck(i::Smi::IsValid(value),
"v8::ObjectTemplate::SetInternalFieldCount()",
"Invalid internal field count")) {
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- ON_BAILOUT("v8::Script::New()", return Local<Script>());
- LOG_API("Script::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
+ LOG_API(isolate, "Script::New");
ENTER_V8;
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
- LOG_API("Script::Compile");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
+ LOG_API(isolate, "Script::Compile");
ENTER_V8;
Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty())
i::Handle<i::SharedFunctionInfo> function =
i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
i::Handle<i::JSFunction> result =
- i::Factory::NewFunctionFromSharedFunctionInfo(function,
- i::Top::global_context());
+ FACTORY->NewFunctionFromSharedFunctionInfo(
+ function,
+ i::Isolate::Current()->global_context());
return Local<Script>(ToApi<Script>(result));
}
Local<Value> Script::Run() {
- ON_BAILOUT("v8::Script::Run()", return Local<Value>());
- LOG_API("Script::Run");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
+ LOG_API(isolate, "Script::Run");
ENTER_V8;
i::Object* raw_result = NULL;
{
if (obj->IsSharedFunctionInfo()) {
i::Handle<i::SharedFunctionInfo>
function_info(i::SharedFunctionInfo::cast(*obj));
- fun = i::Factory::NewFunctionFromSharedFunctionInfo(
- function_info, i::Top::global_context());
+ fun = FACTORY->NewFunctionFromSharedFunctionInfo(
+ function_info, i::Isolate::Current()->global_context());
} else {
fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj));
}
EXCEPTION_PREAMBLE();
- i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
+ i::Handle<i::Object> receiver(
+ i::Isolate::Current()->context()->global_proxy());
i::Handle<i::Object> result =
i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(Local<Value>());
Local<Value> Script::Id() {
- ON_BAILOUT("v8::Script::Id()", return Local<Value>());
- LOG_API("Script::Id");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
+ LOG_API(isolate, "Script::Id");
i::Object* raw_id = NULL;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
i::Handle<i::Object> id(script->id());
void Script::SetData(v8::Handle<String> data) {
- ON_BAILOUT("v8::Script::SetData()", return);
- LOG_API("Script::SetData");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::SetData()", return);
+ LOG_API(isolate, "Script::SetData");
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
v8::TryCatch::TryCatch()
- : next_(i::Top::try_catch_handler_address()),
- exception_(i::Heap::the_hole_value()),
+ : next_(i::Isolate::Current()->try_catch_handler_address()),
+ exception_(HEAP->the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
rethrow_(false) {
- i::Top::RegisterTryCatchHandler(this);
+ i::Isolate::Current()->RegisterTryCatchHandler(this);
}
if (rethrow_) {
v8::HandleScope scope;
v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
- i::Top::UnregisterTryCatchHandler(this);
+ i::Isolate::Current()->UnregisterTryCatchHandler(this);
v8::ThrowException(exc);
} else {
- i::Top::UnregisterTryCatchHandler(this);
+ i::Isolate::Current()->UnregisterTryCatchHandler(this);
}
}
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
v8::HandleScope scope;
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
- i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("stack");
+ i::Handle<i::String> name = FACTORY->LookupAsciiSymbol("stack");
if (!obj->HasProperty(*name))
return v8::Local<Value>();
return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
void v8::TryCatch::Reset() {
- exception_ = i::Heap::the_hole_value();
+ exception_ = HEAP->the_hole_value();
message_ = i::Smi::FromInt(0);
}
Local<String> Message::Get() const {
- ON_BAILOUT("v8::Message::Get()", return Local<String>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
ENTER_V8;
HandleScope scope;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
v8::Handle<Value> Message::GetScriptResourceName() const {
- if (IsDeadCheck("v8::Message::GetScriptResourceName()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
return Local<String>();
}
ENTER_V8;
v8::Handle<Value> Message::GetScriptData() const {
- if (IsDeadCheck("v8::Message::GetScriptResourceData()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
return Local<Value>();
}
ENTER_V8;
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
- if (IsDeadCheck("v8::Message::GetStackTrace()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
return Local<v8::StackTrace>();
}
ENTER_V8;
int argc,
i::Object** argv[],
bool* has_pending_exception) {
- i::Handle<i::String> fmt_str = i::Factory::LookupAsciiSymbol(name);
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
i::Object* object_fun =
- i::Top::builtins()->GetPropertyNoExceptionThrown(*fmt_str);
+ isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
i::Handle<i::JSFunction> fun =
i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
i::Handle<i::Object> value =
bool* has_pending_exception) {
i::Object** argv[1] = { data.location() };
return CallV8HeapFunction(name,
- i::Top::builtins(),
+ i::Isolate::Current()->js_builtins_object(),
1,
argv,
has_pending_exception);
int Message::GetLineNumber() const {
- ON_BAILOUT("v8::Message::GetLineNumber()", return kNoLineNumberInfo);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Message::GetLineNumber()", return kNoLineNumberInfo);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
int Message::GetStartPosition() const {
- if (IsDeadCheck("v8::Message::GetStartPosition()")) return 0;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
return message->start_position();
int Message::GetEndPosition() const {
- if (IsDeadCheck("v8::Message::GetEndPosition()")) return 0;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
return message->end_position();
int Message::GetStartColumn() const {
- if (IsDeadCheck("v8::Message::GetStartColumn()")) return kNoColumnInfo;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
+ return kNoColumnInfo;
+ }
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
int Message::GetEndColumn() const {
- if (IsDeadCheck("v8::Message::GetEndColumn()")) return kNoColumnInfo;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
Local<String> Message::GetSourceLine() const {
- ON_BAILOUT("v8::Message::GetSourceLine()", return Local<String>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
ENTER_V8;
HandleScope scope;
EXCEPTION_PREAMBLE();
void Message::PrintCurrentStackTrace(FILE* out) {
- if (IsDeadCheck("v8::Message::PrintCurrentStackTrace()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
ENTER_V8;
- i::Top::PrintCurrentStackTrace(out);
+ isolate->PrintCurrentStackTrace(out);
}
// --- S t a c k T r a c e ---
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
- if (IsDeadCheck("v8::StackTrace::GetFrame()")) return Local<StackFrame>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
+ return Local<StackFrame>();
+ }
ENTER_V8;
HandleScope scope;
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
int StackTrace::GetFrameCount() const {
- if (IsDeadCheck("v8::StackTrace::GetFrameCount()")) return -1;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
ENTER_V8;
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
Local<Array> StackTrace::AsArray() {
- if (IsDeadCheck("v8::StackTrace::AsArray()")) Local<Array>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
ENTER_V8;
return Utils::ToLocal(Utils::OpenHandle(this));
}
Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
StackTraceOptions options) {
- if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
+ Local<StackTrace>();
+ }
ENTER_V8;
i::Handle<i::JSArray> stackTrace =
- i::Top::CaptureCurrentStackTrace(frame_limit, options);
+ isolate->CaptureCurrentStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
- if (IsDeadCheck("v8::StackFrame::GetLineNumber()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
return Message::kNoLineNumberInfo;
}
ENTER_V8;
- i::HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> line = GetProperty(self, "lineNumber");
if (!line->IsSmi()) {
int StackFrame::GetColumn() const {
- if (IsDeadCheck("v8::StackFrame::GetColumn()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
return Message::kNoColumnInfo;
}
ENTER_V8;
- i::HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> column = GetProperty(self, "column");
if (!column->IsSmi()) {
Local<String> StackFrame::GetScriptName() const {
- if (IsDeadCheck("v8::StackFrame::GetScriptName()")) return Local<String>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
+ return Local<String>();
+ }
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- if (IsDeadCheck("v8::StackFrame::GetScriptNameOrSourceURL()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
return Local<String>();
}
ENTER_V8;
Local<String> StackFrame::GetFunctionName() const {
- if (IsDeadCheck("v8::StackFrame::GetFunctionName()")) return Local<String>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
+ return Local<String>();
+ }
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
bool StackFrame::IsEval() const {
- if (IsDeadCheck("v8::StackFrame::IsEval()")) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
ENTER_V8;
- i::HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
return is_eval->IsTrue();
bool StackFrame::IsConstructor() const {
- if (IsDeadCheck("v8::StackFrame::IsConstructor()")) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
ENTER_V8;
- i::HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
return is_constructor->IsTrue();
// --- D a t a ---
bool Value::IsUndefined() const {
- if (IsDeadCheck("v8::Value::IsUndefined()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsUndefined();
}
bool Value::IsNull() const {
- if (IsDeadCheck("v8::Value::IsNull()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
return Utils::OpenHandle(this)->IsNull();
}
bool Value::IsTrue() const {
- if (IsDeadCheck("v8::Value::IsTrue()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
return Utils::OpenHandle(this)->IsTrue();
}
bool Value::IsFalse() const {
- if (IsDeadCheck("v8::Value::IsFalse()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
return Utils::OpenHandle(this)->IsFalse();
}
bool Value::IsFunction() const {
- if (IsDeadCheck("v8::Value::IsFunction()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsJSFunction();
}
bool Value::FullIsString() const {
- if (IsDeadCheck("v8::Value::IsString()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
bool result = Utils::OpenHandle(this)->IsString();
ASSERT_EQ(result, QuickIsString());
return result;
bool Value::IsArray() const {
- if (IsDeadCheck("v8::Value::IsArray()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
}
bool Value::IsObject() const {
- if (IsDeadCheck("v8::Value::IsObject()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
}
bool Value::IsNumber() const {
- if (IsDeadCheck("v8::Value::IsNumber()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
return Utils::OpenHandle(this)->IsNumber();
}
bool Value::IsBoolean() const {
- if (IsDeadCheck("v8::Value::IsBoolean()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsBoolean();
}
bool Value::IsExternal() const {
- if (IsDeadCheck("v8::Value::IsExternal()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsProxy();
}
bool Value::IsInt32() const {
- if (IsDeadCheck("v8::Value::IsInt32()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
bool Value::IsUint32() const {
- if (IsDeadCheck("v8::Value::IsUint32()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
bool Value::IsDate() const {
- if (IsDeadCheck("v8::Value::IsDate()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(i::Heap::Date_symbol());
+ return obj->HasSpecificClassOf(HEAP->Date_symbol());
}
bool Value::IsRegExp() const {
- if (IsDeadCheck("v8::Value::IsRegExp()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
}
Local<String> Value::ToString() const {
- if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
- LOG_API("ToString");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::ToString()")) {
+ return Local<String>();
+ }
+ LOG_API(isolate, "ToString");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> str;
if (obj->IsString()) {
Local<String> Value::ToDetailString() const {
- if (IsDeadCheck("v8::Value::ToDetailString()")) return Local<String>();
- LOG_API("ToDetailString");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
+ return Local<String>();
+ }
+ LOG_API(isolate, "ToDetailString");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> str;
if (obj->IsString()) {
Local<v8::Object> Value::ToObject() const {
- if (IsDeadCheck("v8::Value::ToObject()")) return Local<v8::Object>();
- LOG_API("ToObject");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToObject()")) return Local<v8::Object>();
+ LOG_API(isolate, "ToObject");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> val;
if (obj->IsJSObject()) {
Local<Boolean> Value::ToBoolean() const {
- if (IsDeadCheck("v8::Value::ToBoolean()")) return Local<Boolean>();
- LOG_API("ToBoolean");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::ToBoolean()")) {
+ return Local<Boolean>();
+ }
+ LOG_API(isolate, "ToBoolean");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
return Local<Boolean>(ToApi<Boolean>(obj));
Local<Number> Value::ToNumber() const {
- if (IsDeadCheck("v8::Value::ToNumber()")) return Local<Number>();
- LOG_API("ToNumber");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) return Local<Number>();
+ LOG_API(isolate, "ToNumber");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
Local<Integer> Value::ToInteger() const {
- if (IsDeadCheck("v8::Value::ToInteger()")) return Local<Integer>();
- LOG_API("ToInteger");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
+ LOG_API(isolate, "ToInteger");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::External::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsProxy(),
"v8::External::Cast()",
void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck("v8::Object::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck("v8::Function::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::String::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::Number::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::Integer::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck("v8::Array::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
void v8::Date::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::Date::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(HEAP->Date_symbol()),
"v8::Date::Cast()",
"Could not convert to date");
}
void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::RegExp::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSRegExp(),
"v8::RegExp::Cast()",
bool Value::BooleanValue() const {
- if (IsDeadCheck("v8::Value::BooleanValue()")) return false;
- LOG_API("BooleanValue");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false;
+ LOG_API(isolate, "BooleanValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
return obj->IsTrue();
double Value::NumberValue() const {
- if (IsDeadCheck("v8::Value::NumberValue()")) return i::OS::nan_value();
- LOG_API("NumberValue");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
+ return i::OS::nan_value();
+ }
+ LOG_API(isolate, "NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
int64_t Value::IntegerValue() const {
- if (IsDeadCheck("v8::Value::IntegerValue()")) return 0;
- LOG_API("IntegerValue");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
+ LOG_API(isolate, "IntegerValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
Local<Int32> Value::ToInt32() const {
- if (IsDeadCheck("v8::Value::ToInt32()")) return Local<Int32>();
- LOG_API("ToInt32");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
+ LOG_API(isolate, "ToInt32");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
Local<Uint32> Value::ToUint32() const {
- if (IsDeadCheck("v8::Value::ToUint32()")) return Local<Uint32>();
- LOG_API("ToUInt32");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
+ LOG_API(isolate, "ToUInt32");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
Local<Uint32> Value::ToArrayIndex() const {
- if (IsDeadCheck("v8::Value::ToArrayIndex()")) return Local<Uint32>();
- LOG_API("ToArrayIndex");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
+ LOG_API(isolate, "ToArrayIndex");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
value = i::Handle<i::Object>(i::Smi::FromInt(index));
} else {
- value = i::Factory::NewNumber(index);
+ value = FACTORY->NewNumber(index);
}
return Utils::Uint32ToLocal(value);
}
int32_t Value::Int32Value() const {
- if (IsDeadCheck("v8::Value::Int32Value()")) return 0;
- LOG_API("Int32Value");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
+ LOG_API(isolate, "Int32Value");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
- LOG_API("Int32Value (slow)");
+ LOG_API(isolate, "Int32Value (slow)");
ENTER_V8;
EXCEPTION_PREAMBLE();
i::Handle<i::Object> num =
bool Value::Equals(Handle<Value> that) const {
- if (IsDeadCheck("v8::Value::Equals()")
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::Equals()")
|| EmptyCheck("v8::Value::Equals()", this)
|| EmptyCheck("v8::Value::Equals()", that)) {
return false;
}
- LOG_API("Equals");
+ LOG_API(isolate, "Equals");
ENTER_V8;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
bool Value::StrictEquals(Handle<Value> that) const {
- if (IsDeadCheck("v8::Value::StrictEquals()")
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
|| EmptyCheck("v8::Value::StrictEquals()", this)
|| EmptyCheck("v8::Value::StrictEquals()", that)) {
return false;
}
- LOG_API("StrictEquals");
+ LOG_API(isolate, "StrictEquals");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// Must check HeapNumber first, since NaN !== NaN.
uint32_t Value::Uint32Value() const {
- if (IsDeadCheck("v8::Value::Uint32Value()")) return 0;
- LOG_API("Uint32Value");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
+ LOG_API(isolate, "Uint32Value");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
- ON_BAILOUT("v8::Object::Set()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::Set()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
- ON_BAILOUT("v8::Object::Set()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::Set()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE();
bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
- ON_BAILOUT("v8::Object::ForceSet()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::ForceSet()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
- ON_BAILOUT("v8::Object::ForceDelete()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
Local<Value> v8::Object::Get(v8::Handle<Value> key) {
- ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
ENTER_V8;
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
Local<Value> v8::Object::Get(uint32_t index) {
- ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE();
Local<Value> v8::Object::GetPrototype() {
- ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::GetPrototype()",
+ return Local<v8::Value>());
ENTER_V8;
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> result = i::GetPrototype(self);
bool v8::Object::SetPrototype(Handle<Value> value) {
- ON_BAILOUT("v8::Object::SetPrototype()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::SetPrototype()", return false);
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Handle<FunctionTemplate> tmpl) {
- ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::Object::FindInstanceInPrototypeChain()",
return Local<v8::Object>());
ENTER_V8;
i::JSObject* object = *Utils::OpenHandle(this);
Local<Array> v8::Object::GetPropertyNames() {
- ON_BAILOUT("v8::Object::GetPropertyNames()", return Local<v8::Array>());
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::GetPropertyNames()",
+ return Local<v8::Array>());
ENTER_V8;
v8::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
- i::Handle<i::FixedArray> elms = i::Factory::CopyFixedArray(value);
- i::Handle<i::JSArray> result = i::Factory::NewJSArrayWithElements(elms);
+ i::Handle<i::FixedArray> elms = FACTORY->CopyFixedArray(value);
+ i::Handle<i::JSArray> result = FACTORY->NewJSArrayWithElements(elms);
return scope.Close(Utils::ToLocal(result));
}
Local<String> v8::Object::ObjectProtoToString() {
- ON_BAILOUT("v8::Object::ObjectProtoToString()", return Local<v8::String>());
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::ObjectProtoToString()",
+ return Local<v8::String>());
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
Local<String> v8::Object::GetConstructorName() {
- ON_BAILOUT("v8::Object::GetConstructorName()", return Local<v8::String>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
+ return Local<v8::String>());
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> name(self->constructor_name());
bool v8::Object::Delete(v8::Handle<String> key) {
- ON_BAILOUT("v8::Object::Delete()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return i::DeleteProperty(self, key_obj)->IsTrue();
bool v8::Object::Has(v8::Handle<String> key) {
- ON_BAILOUT("v8::Object::Has()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::Has()", return false);
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
bool v8::Object::Delete(uint32_t index) {
- ON_BAILOUT("v8::Object::DeleteProperty()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::DeleteProperty()",
+ return false);
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
bool v8::Object::Has(uint32_t index) {
- ON_BAILOUT("v8::Object::HasProperty()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::HasProperty()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return self->HasElement(index);
}
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes) {
- ON_BAILOUT("v8::Object::SetAccessor()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
bool v8::Object::HasRealNamedProperty(Handle<String> key) {
- ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::HasRealNamedProperty()",
+ return false);
return Utils::OpenHandle(this)->HasRealNamedProperty(
*Utils::OpenHandle(*key));
}
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- ON_BAILOUT("v8::Object::HasRealIndexedProperty()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::HasRealIndexedProperty()",
+ return false);
return Utils::OpenHandle(this)->HasRealElementProperty(index);
}
bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
- ON_BAILOUT("v8::Object::HasRealNamedCallbackProperty()", return false);
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::Object::HasRealNamedCallbackProperty()",
+ return false);
ENTER_V8;
return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
*Utils::OpenHandle(*key));
bool v8::Object::HasNamedLookupInterceptor() {
- ON_BAILOUT("v8::Object::HasNamedLookupInterceptor()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::HasNamedLookupInterceptor()",
+ return false);
return Utils::OpenHandle(this)->HasNamedInterceptor();
}
bool v8::Object::HasIndexedLookupInterceptor() {
- ON_BAILOUT("v8::Object::HasIndexedLookupInterceptor()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::HasIndexedLookupInterceptor()",
+ return false);
return Utils::OpenHandle(this)->HasIndexedInterceptor();
}
Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
- ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::Object::GetRealNamedPropertyInPrototypeChain()",
return Local<Value>());
ENTER_V8;
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
- ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
+ ON_BAILOUT(i::Isolate::Current(), "v8::Object::GetRealNamedProperty()",
+ return Local<Value>());
ENTER_V8;
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
// Because the object gets a new map, existing inline cache caching
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
- ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::TurnOnAccessCheck()", return);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
// When turning on access checks for a global object deoptimize all functions
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map =
- i::Factory::CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
+ FACTORY->CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
obj->set_map(*new_map);
}
Local<v8::Object> v8::Object::Clone() {
- ON_BAILOUT("v8::Object::Clone()", return Local<Object>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::Clone()", return Local<Object>());
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE();
int v8::Object::GetIdentityHash() {
- ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
if (!hidden_props_obj->IsJSObject()) {
}
i::Handle<i::JSObject> hidden_props =
i::Handle<i::JSObject>::cast(hidden_props_obj);
- i::Handle<i::String> hash_symbol = i::Factory::identity_hash_symbol();
+ i::Handle<i::String> hash_symbol = FACTORY->identity_hash_symbol();
if (hidden_props->HasLocalProperty(*hash_symbol)) {
i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
CHECK(!hash.is_null());
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
- hash_value = i::V8::Random() & i::Smi::kMaxValue;
+ hash_value = i::V8::Random(self->GetIsolate()) & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
- ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
- ON_BAILOUT("v8::Object::GetHiddenValue()", return Local<v8::Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Object::GetHiddenValue()",
+ return Local<v8::Value>());
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
- ON_BAILOUT("v8::DeleteHiddenValue()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::DeleteHiddenValue()", return false);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
ExternalArrayType array_type,
int length) {
i::Handle<i::ExternalArray> array =
- i::Factory::NewExternalArray(length, array_type, data);
+ FACTORY->NewExternalArray(length, array_type, data);
// If the object already has external elements, create a new, unique
// map if the element type is now changing, because assumptions about
bool force_unique_map =
elements->map()->IsUndefined() ||
!elements->map()->has_external_array_elements() ||
- elements->map() != i::Heap::MapForExternalArrayType(array_type);
+ elements->map() != HEAP->MapForExternalArrayType(array_type);
if (force_unique_map) {
i::Handle<i::Map> external_array_map =
- i::Factory::NewExternalArrayElementsMap(
+ FACTORY->NewExternalArrayElementsMap(
i::Handle<i::Map>(object->map()));
object->set_map(*external_array_map);
}
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
- ON_BAILOUT("v8::SetElementsToPixelData()", return);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
bool v8::Object::HasIndexedPropertiesInPixelData() {
- ON_BAILOUT("v8::HasIndexedPropertiesInPixelData()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::HasIndexedPropertiesInPixelData()",
+ return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return self->HasExternalPixelElements();
}
uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
- ON_BAILOUT("v8::GetIndexedPropertiesPixelData()", return NULL);
+ ON_BAILOUT(i::Isolate::Current(), "v8::GetIndexedPropertiesPixelData()",
+ return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasExternalPixelElements()) {
return i::ExternalPixelArray::cast(self->elements())->
int v8::Object::GetIndexedPropertiesPixelDataLength() {
- ON_BAILOUT("v8::GetIndexedPropertiesPixelDataLength()", return -1);
+ ON_BAILOUT(i::Isolate::Current(), "v8::GetIndexedPropertiesPixelDataLength()",
+ return -1);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasExternalPixelElements()) {
return i::ExternalPixelArray::cast(self->elements())->length();
void* data,
ExternalArrayType array_type,
int length) {
- ON_BAILOUT("v8::SetIndexedPropertiesToExternalArrayData()", return);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8;
- HandleScope scope;
+ i::HandleScope scope(isolate);
if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
- ON_BAILOUT("v8::HasIndexedPropertiesInExternalArrayData()", return false);
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::HasIndexedPropertiesInExternalArrayData()",
+ return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return self->HasExternalArrayElements();
}
void* v8::Object::GetIndexedPropertiesExternalArrayData() {
- ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayData()", return NULL);
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::GetIndexedPropertiesExternalArrayData()",
+ return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->external_pointer();
ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
- ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataType()",
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::GetIndexedPropertiesExternalArrayDataType()",
return static_cast<ExternalArrayType>(-1));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
switch (self->elements()->map()->instance_type()) {
int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
- ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataLength()", return 0);
+ ON_BAILOUT(i::Isolate::Current(),
+ "v8::GetIndexedPropertiesExternalArrayDataLength()",
+ return 0);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->length();
Local<v8::Object> Function::NewInstance(int argc,
v8::Handle<v8::Value> argv[]) const {
- ON_BAILOUT("v8::Function::NewInstance()", return Local<v8::Object>());
- LOG_API("Function::NewInstance");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Function::NewInstance()",
+ return Local<v8::Object>());
+ LOG_API(isolate, "Function::NewInstance");
ENTER_V8;
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
v8::Handle<v8::Value> argv[]) {
- ON_BAILOUT("v8::Function::Call()", return Local<v8::Value>());
- LOG_API("Function::Call");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
+ LOG_API(isolate, "Function::Call");
ENTER_V8;
i::Object* raw_result = NULL;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
}
-namespace {
-
-// Tracks string usage to help make better decisions when
-// externalizing strings.
-//
-// Implementation note: internally this class only tracks fresh
-// strings and keeps a single use counter for them.
-class StringTracker {
- public:
- // Records that the given string's characters were copied to some
- // external buffer. If this happens often we should honor
- // externalization requests for the string.
- static void RecordWrite(i::Handle<i::String> string) {
- i::Address address = reinterpret_cast<i::Address>(*string);
- i::Address top = i::Heap::NewSpaceTop();
- if (IsFreshString(address, top)) {
- IncrementUseCount(top);
- }
- }
-
- // Estimates freshness and use frequency of the given string based
- // on how close it is to the new space top and the recorded usage
- // history.
- static inline bool IsFreshUnusedString(i::Handle<i::String> string) {
- i::Address address = reinterpret_cast<i::Address>(*string);
- i::Address top = i::Heap::NewSpaceTop();
- return IsFreshString(address, top) && IsUseCountLow(top);
- }
-
- private:
- static inline bool IsFreshString(i::Address string, i::Address top) {
- return top - kFreshnessLimit <= string && string <= top;
- }
-
- static inline bool IsUseCountLow(i::Address top) {
- if (last_top_ != top) return true;
- return use_count_ < kUseLimit;
- }
-
- static inline void IncrementUseCount(i::Address top) {
- if (last_top_ != top) {
- use_count_ = 0;
- last_top_ = top;
- }
- ++use_count_;
- }
-
- // How close to the new space top a fresh string has to be.
- static const int kFreshnessLimit = 1024;
-
- // The number of uses required to consider a string useful.
- static const int kUseLimit = 32;
-
- // Single use counter shared by all fresh strings.
- static int use_count_;
-
- // Last new space top when the use count above was valid.
- static i::Address last_top_;
-};
-
-int StringTracker::use_count_ = 0;
-i::Address StringTracker::last_top_ = NULL;
-
-} // namespace
-
-
int String::Length() const {
- if (IsDeadCheck("v8::String::Length()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::String::Length()")) return 0;
return Utils::OpenHandle(this)->length();
}
int String::Utf8Length() const {
- if (IsDeadCheck("v8::String::Utf8Length()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::String::Utf8Length()")) return 0;
return Utils::OpenHandle(this)->Utf8Length();
}
int capacity,
int* nchars_ref,
WriteHints hints) const {
- if (IsDeadCheck("v8::String::WriteUtf8()")) return 0;
- LOG_API("String::WriteUtf8");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
+ LOG_API(isolate, "String::WriteUtf8");
ENTER_V8;
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
i::Handle<i::String> str = Utils::OpenHandle(this);
- StringTracker::RecordWrite(str);
+ isolate->string_tracker()->RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
int start,
int length,
WriteHints hints) const {
- if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
- LOG_API("String::WriteAscii");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
+ LOG_API(isolate, "String::WriteAscii");
ENTER_V8;
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- StringTracker::RecordWrite(str);
+ isolate->string_tracker()->RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
int start,
int length,
WriteHints hints) const {
- if (IsDeadCheck("v8::String::Write()")) return 0;
- LOG_API("String::Write");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
+ LOG_API(isolate, "String::Write");
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- StringTracker::RecordWrite(str);
+ isolate->string_tracker()->RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
double Number::Value() const {
- if (IsDeadCheck("v8::Number::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
}
bool Boolean::Value() const {
- if (IsDeadCheck("v8::Boolean::Value()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsTrue();
}
int64_t Integer::Value() const {
- if (IsDeadCheck("v8::Integer::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
int32_t Int32::Value() const {
- if (IsDeadCheck("v8::Int32::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
uint32_t Uint32::Value() const {
- if (IsDeadCheck("v8::Uint32::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
int v8::Object::InternalFieldCount() {
- if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Object::InternalFieldCount()")) {
+ return 0;
+ }
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
return obj->GetInternalFieldCount();
}
Local<Value> v8::Object::CheckedGetInternalField(int index) {
- if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Object::GetInternalField()")) {
+ return Local<Value>();
+ }
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::GetInternalField()",
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
- if (IsDeadCheck("v8::Object::SetInternalField()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Object::SetInternalField()")) {
+ return;
+ }
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::SetInternalField()",
} else {
HandleScope scope;
i::Handle<i::Proxy> proxy =
- i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+ FACTORY->NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
if (!proxy.is_null())
Utils::OpenHandle(this)->SetInternalField(index, *proxy);
}
// --- E n v i r o n m e n t ---
+
bool v8::V8::Initialize() {
- if (i::V8::IsRunning()) return true;
- HandleScope scope;
- if (i::Snapshot::Initialize()) return true;
- return i::V8::Initialize(NULL);
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate != NULL && isolate->IsInitialized()) {
+ return true;
+ }
+ return InitializeHelper();
}
bool v8::V8::Dispose() {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
+ "v8::V8::Dispose()",
+ "Use v8::Isolate::Dispose() for a non-default isolate.")) {
+ return false;
+ }
i::V8::TearDown();
return true;
}
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- heap_statistics->set_total_heap_size(i::Heap::CommittedMemory());
+ heap_statistics->set_total_heap_size(HEAP->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
- i::Heap::CommittedMemoryExecutable());
- heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
- heap_statistics->set_heap_size_limit(i::Heap::MaxReserved());
+ HEAP->CommittedMemoryExecutable());
+ heap_statistics->set_used_heap_size(HEAP->SizeOfObjects());
+ heap_statistics->set_heap_size_limit(HEAP->MaxReserved());
}
bool v8::V8::IdleNotification() {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
- if (!i::V8::IsRunning()) return true;
+ if (!i::Isolate::Current()->IsInitialized()) return true;
return i::V8::IdleNotification();
}
void v8::V8::LowMemoryNotification() {
- if (!i::V8::IsRunning()) return;
- i::Heap::CollectAllGarbage(true);
+ if (!i::Isolate::Current()->IsInitialized()) return;
+ HEAP->CollectAllGarbage(true);
}
int v8::V8::ContextDisposedNotification() {
- if (!i::V8::IsRunning()) return 0;
- return i::Heap::NotifyContextDisposed();
+ if (!i::Isolate::Current()->IsInitialized()) return 0;
+ return HEAP->NotifyContextDisposed();
}
const char* v8::V8::GetVersion() {
- static v8::internal::EmbeddedVector<char, 128> buffer;
- v8::internal::Version::GetString(buffer);
- return buffer.start();
+ return i::Version::GetVersion();
}
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- EnsureInitialized("v8::Context::New()");
- LOG_API("Context::New");
- ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Context::New()");
+ LOG_API(isolate, "Context::New");
+ ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
// Enter V8 via an ENTER_V8 scope.
i::Handle<i::Context> env;
proxy_constructor->set_needs_access_check(
global_constructor->needs_access_check());
global_constructor->set_needs_access_check(false);
- global_constructor->set_access_check_info(i::Heap::undefined_value());
+ global_constructor->set_access_check_info(HEAP->undefined_value());
}
}
// Create the environment.
- env = i::Bootstrapper::CreateEnvironment(
+ env = isolate->bootstrapper()->CreateEnvironment(
Utils::OpenHandle(*global_object),
proxy_template,
extensions);
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
}
- i::RuntimeProfiler::Reset();
+ i::Isolate::Current()->runtime_profiler()->Reset();
}
// Leave V8.
void v8::Context::SetSecurityToken(Handle<Value> token) {
- if (IsDeadCheck("v8::Context::SetSecurityToken()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::SetSecurityToken()")) {
+ return;
+ }
ENTER_V8;
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
void v8::Context::UseDefaultSecurityToken() {
- if (IsDeadCheck("v8::Context::UseDefaultSecurityToken()")) return;
+ if (IsDeadCheck(i::Isolate::Current(),
+ "v8::Context::UseDefaultSecurityToken()")) {
+ return;
+ }
ENTER_V8;
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global());
Handle<Value> v8::Context::GetSecurityToken() {
- if (IsDeadCheck("v8::Context::GetSecurityToken()")) return Handle<Value>();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::GetSecurityToken()")) {
+ return Handle<Value>();
+ }
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Object* security_token = env->security_token();
i::Handle<i::Object> token_handle(security_token);
bool Context::InContext() {
- return i::Top::context() != NULL;
+ return i::Isolate::Current()->context() != NULL;
}
v8::Local<v8::Context> Context::GetEntered() {
- if (IsDeadCheck("v8::Context::GetEntered()")) return Local<Context>();
- i::Handle<i::Object> last = thread_local.LastEnteredContext();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::GetEntered()")) {
+ return Local<Context>();
+ }
+ i::Handle<i::Object> last =
+ i::Isolate::Current()->handle_scope_implementer()->LastEnteredContext();
if (last.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
return Utils::ToLocal(context);
v8::Local<v8::Context> Context::GetCurrent() {
- if (IsDeadCheck("v8::Context::GetCurrent()")) return Local<Context>();
- i::Handle<i::Object> current = i::Top::global_context();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::GetCurrent()")) {
+ return Local<Context>();
+ }
+ i::Handle<i::Object> current = i::Isolate::Current()->global_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
v8::Local<v8::Context> Context::GetCalling() {
- if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
- i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::GetCalling()")) {
+ return Local<Context>();
+ }
+ i::Handle<i::Object> calling =
+ i::Isolate::Current()->GetCallingGlobalContext();
if (calling.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
v8::Local<v8::Object> Context::Global() {
- if (IsDeadCheck("v8::Context::Global()")) return Local<v8::Object>();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
+ return Local<v8::Object>();
+ }
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
void Context::DetachGlobal() {
- if (IsDeadCheck("v8::Context::DetachGlobal()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::DetachGlobal()")) return;
ENTER_V8;
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Bootstrapper::DetachGlobal(context);
+ i::Isolate::Current()->bootstrapper()->DetachGlobal(context);
}
void Context::ReattachGlobal(Handle<Object> global_object) {
- if (IsDeadCheck("v8::Context::ReattachGlobal()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
ENTER_V8;
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Bootstrapper::ReattachGlobal(context, Utils::OpenHandle(*global_object));
+ isolate->bootstrapper()->ReattachGlobal(
+ context,
+ Utils::OpenHandle(*global_object));
}
Local<v8::Object> ObjectTemplate::NewInstance() {
- ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>());
- LOG_API("ObjectTemplate::NewInstance");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
+ return Local<v8::Object>());
+ LOG_API(isolate, "ObjectTemplate::NewInstance");
ENTER_V8;
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj =
Local<v8::Function> FunctionTemplate::GetFunction() {
- ON_BAILOUT("v8::FunctionTemplate::GetFunction()",
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
return Local<v8::Function>());
- LOG_API("FunctionTemplate::GetFunction");
+ LOG_API(isolate, "FunctionTemplate::GetFunction");
ENTER_V8;
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj =
bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
- ON_BAILOUT("v8::FunctionTemplate::HasInstanceOf()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
+ return false);
i::Object* obj = *Utils::OpenHandle(*value);
return obj->IsInstanceOf(*Utils::OpenHandle(this));
}
static Local<External> ExternalNewImpl(void* data) {
- return Utils::ToLocal(i::Factory::NewProxy(static_cast<i::Address>(data)));
+ return Utils::ToLocal(FACTORY->NewProxy(static_cast<i::Address>(data)));
}
static void* ExternalValueImpl(i::Handle<i::Object> obj) {
Local<Value> v8::External::Wrap(void* data) {
+ i::Isolate* isolate = i::Isolate::Current();
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- LOG_API("External::Wrap");
- EnsureInitialized("v8::External::Wrap()");
+ LOG_API(isolate, "External::Wrap");
+ EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
ENTER_V8;
v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
- if (IsDeadCheck("v8::External::Unwrap()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
Local<External> v8::External::New(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- LOG_API("External::New");
- EnsureInitialized("v8::External::New()");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "External::New");
+ EnsureInitializedForIsolate(isolate, "v8::External::New()");
ENTER_V8;
return ExternalNewImpl(data);
}
void* External::Value() const {
- if (IsDeadCheck("v8::External::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return ExternalValueImpl(obj);
}
Local<String> v8::String::Empty() {
- EnsureInitialized("v8::String::Empty()");
- LOG_API("String::Empty()");
- return Utils::ToLocal(i::Factory::empty_symbol());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
+ LOG_API(isolate, "String::Empty()");
+ return Utils::ToLocal(isolate->factory()->empty_symbol());
}
Local<String> v8::String::New(const char* data, int length) {
- EnsureInitialized("v8::String::New()");
- LOG_API("String::New(char)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(char)");
if (length == 0) return Empty();
ENTER_V8;
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
return Utils::ToLocal(result);
}
Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
- EnsureInitialized("v8::String::New()");
- LOG_API("String::New(char)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(char)");
ENTER_V8;
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
- i::Handle<i::String> result = i::Factory::NewConsString(left_string,
+ i::Handle<i::String> result = FACTORY->NewConsString(left_string,
right_string);
return Utils::ToLocal(result);
}
Local<String> v8::String::NewUndetectable(const char* data, int length) {
- EnsureInitialized("v8::String::NewUndetectable()");
- LOG_API("String::NewUndetectable(char)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
+ LOG_API(isolate, "String::NewUndetectable(char)");
ENTER_V8;
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ FACTORY->NewStringFromUtf8(i::Vector<const char>(data, length));
result->MarkAsUndetectable();
return Utils::ToLocal(result);
}
Local<String> v8::String::New(const uint16_t* data, int length) {
- EnsureInitialized("v8::String::New()");
- LOG_API("String::New(uint16_)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(uint16_)");
if (length == 0) return Empty();
ENTER_V8;
if (length == -1) length = TwoByteStringLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ isolate->factory()->NewStringFromTwoByte(
+ i::Vector<const uint16_t>(data, length));
return Utils::ToLocal(result);
}
Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
- EnsureInitialized("v8::String::NewUndetectable()");
- LOG_API("String::NewUndetectable(uint16_)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
+ LOG_API(isolate, "String::NewUndetectable(uint16_)");
ENTER_V8;
if (length == -1) length = TwoByteStringLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ isolate->factory()->NewStringFromTwoByte(
+ i::Vector<const uint16_t>(data, length));
result->MarkAsUndetectable();
return Utils::ToLocal(result);
}
-i::Handle<i::String> NewExternalStringHandle(
+i::Handle<i::String> NewExternalStringHandle(i::Isolate* isolate,
v8::String::ExternalStringResource* resource) {
i::Handle<i::String> result =
- i::Factory::NewExternalStringFromTwoByte(resource);
+ isolate->factory()->NewExternalStringFromTwoByte(resource);
return result;
}
-i::Handle<i::String> NewExternalAsciiStringHandle(
+i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
v8::String::ExternalAsciiStringResource* resource) {
i::Handle<i::String> result =
- i::Factory::NewExternalStringFromAscii(resource);
+ isolate->factory()->NewExternalStringFromAscii(resource);
return result;
}
Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) {
- EnsureInitialized("v8::String::NewExternal()");
- LOG_API("String::NewExternal");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
+ LOG_API(isolate, "String::NewExternal");
ENTER_V8;
- i::Handle<i::String> result = NewExternalStringHandle(resource);
- i::ExternalStringTable::AddString(*result);
+ i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
+ isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
- if (IsDeadCheck("v8::String::MakeExternal()")) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (StringTracker::IsFreshUnusedString(obj)) return false;
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+ return false;
+ }
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- i::ExternalStringTable::AddString(*obj);
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
Local<String> v8::String::NewExternal(
v8::String::ExternalAsciiStringResource* resource) {
- EnsureInitialized("v8::String::NewExternal()");
- LOG_API("String::NewExternal");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
+ LOG_API(isolate, "String::NewExternal");
ENTER_V8;
- i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
- i::ExternalStringTable::AddString(*result);
+ i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
+ isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
bool v8::String::MakeExternal(
v8::String::ExternalAsciiStringResource* resource) {
- if (IsDeadCheck("v8::String::MakeExternal()")) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (StringTracker::IsFreshUnusedString(obj)) return false;
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+ return false;
+ }
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- i::ExternalStringTable::AddString(*obj);
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
bool v8::String::CanMakeExternal() {
- if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (StringTracker::IsFreshUnusedString(obj)) return false;
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+ return false;
+ }
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kSize)
return false;
Local<v8::Object> v8::Object::New() {
- EnsureInitialized("v8::Object::New()");
- LOG_API("Object::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Object::New()");
+ LOG_API(isolate, "Object::New");
ENTER_V8;
i::Handle<i::JSObject> obj =
- i::Factory::NewJSObject(i::Top::object_function());
+ isolate->factory()->NewJSObject(i::Isolate::Current()->object_function());
return Utils::ToLocal(obj);
}
Local<v8::Value> v8::Date::New(double time) {
- EnsureInitialized("v8::Date::New()");
- LOG_API("Date::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Date::New()");
+ LOG_API(isolate, "Date::New");
if (isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
double v8::Date::NumberValue() const {
- if (IsDeadCheck("v8::Date::NumberValue()")) return 0;
- LOG_API("Date::NumberValue");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
+ LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
return jsvalue->value()->Number();
void v8::Date::DateTimeConfigurationChangeNotification() {
- ON_BAILOUT("v8::Date::DateTimeConfigurationChangeNotification()", return);
- LOG_API("Date::DateTimeConfigurationChangeNotification");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
+ return);
+ LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
ENTER_V8;
- HandleScope scope;
-
+ i::HandleScope scope(isolate);
// Get the function ResetDateCache (defined in date-delay.js).
i::Handle<i::String> func_name_str =
- i::Factory::LookupAsciiSymbol("ResetDateCache");
- i::MaybeObject* result = i::Top::builtins()->GetProperty(*func_name_str);
+ isolate->factory()->LookupAsciiSymbol("ResetDateCache");
+ i::MaybeObject* result =
+ isolate->js_builtins_object()->GetProperty(*func_name_str);
i::Object* object_func;
if (!result->ToObject(&object_func)) {
return;
// Call ResetDateCache(0 but expect no exceptions:
bool caught_exception = false;
i::Handle<i::Object> result =
- i::Execution::TryCall(func, i::Top::builtins(), 0, NULL,
+ i::Execution::TryCall(func, isolate->js_builtins_object(), 0, NULL,
&caught_exception);
}
}
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return i::Factory::LookupSymbol(
+ return FACTORY->LookupSymbol(
i::Vector<const char>(flags_buf, num_flags));
}
Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
Flags flags) {
- EnsureInitialized("v8::RegExp::New()");
- LOG_API("RegExp::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::RegExp::New()");
+ LOG_API(isolate, "RegExp::New");
ENTER_V8;
EXCEPTION_PREAMBLE();
i::Handle<i::JSRegExp> obj = i::Execution::NewJSRegExp(
Local<v8::String> v8::RegExp::GetSource() const {
- if (IsDeadCheck("v8::RegExp::GetSource()")) return Local<v8::String>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
+ return Local<v8::String>();
+ }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
}
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck("v8::RegExp::GetFlags()")) return v8::RegExp::kNone;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
+ return v8::RegExp::kNone;
+ }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return static_cast<RegExp::Flags>(obj->GetFlags().value());
}
Local<v8::Array> v8::Array::New(int length) {
- EnsureInitialized("v8::Array::New()");
- LOG_API("Array::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Array::New()");
+ LOG_API(isolate, "Array::New");
ENTER_V8;
int real_length = length > 0 ? length : 0;
- i::Handle<i::JSArray> obj = i::Factory::NewJSArray(real_length);
- obj->set_length(*i::Factory::NewNumberFromInt(real_length));
+ i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
+ obj->set_length(*isolate->factory()->NewNumberFromInt(real_length));
return Utils::ToLocal(obj);
}
uint32_t v8::Array::Length() const {
- if (IsDeadCheck("v8::Array::Length()")) return 0;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
if (length->IsSmi()) {
Local<Object> Array::CloneElementAt(uint32_t index) {
- ON_BAILOUT("v8::Array::CloneElementAt()", return Local<Object>());
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastElements()) {
return Local<Object>();
Local<String> v8::String::NewSymbol(const char* data, int length) {
- EnsureInitialized("v8::String::NewSymbol()");
- LOG_API("String::NewSymbol(char)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()");
+ LOG_API(isolate, "String::NewSymbol(char)");
ENTER_V8;
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
- i::Factory::LookupSymbol(i::Vector<const char>(data, length));
+ FACTORY->LookupSymbol(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
}
Local<Number> v8::Number::New(double value) {
- EnsureInitialized("v8::Number::New()");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Number::New()");
if (isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value();
}
ENTER_V8;
- i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
return Utils::NumberToLocal(result);
}
Local<Integer> v8::Integer::New(int32_t value) {
- EnsureInitialized("v8::Integer::New()");
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
if (i::Smi::IsValid(value)) {
- return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value)));
+ return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
+ isolate));
}
ENTER_V8;
- i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
return Integer::New(static_cast<int32_t>(value));
}
ENTER_V8;
- i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ i::Handle<i::Object> result = FACTORY->NewNumber(value);
return Utils::IntegerToLocal(result);
}
void V8::IgnoreOutOfMemoryException() {
- thread_local.set_ignore_out_of_memory(true);
+ EnterIsolateIfNeeded()->handle_scope_implementer()->set_ignore_out_of_memory(
+ true);
}
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
- EnsureInitialized("v8::V8::AddMessageListener()");
- ON_BAILOUT("v8::V8::AddMessageListener()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
+ ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8;
- HandleScope scope;
- NeanderArray listeners(i::Factory::message_listeners());
+ i::HandleScope scope(isolate);
+ NeanderArray listeners(isolate->factory()->message_listeners());
NeanderObject obj(2);
- obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+ obj.set(0, *isolate->factory()->NewProxy(FUNCTION_ADDR(that)));
obj.set(1, data.IsEmpty() ?
- i::Heap::undefined_value() :
+ HEAP->undefined_value() :
*Utils::OpenHandle(*data));
listeners.add(obj.value());
return true;
void V8::RemoveMessageListeners(MessageCallback that) {
- EnsureInitialized("v8::V8::RemoveMessageListener()");
- ON_BAILOUT("v8::V8::RemoveMessageListeners()", return);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::V8::RemoveMessageListener()");
+ ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
ENTER_V8;
- HandleScope scope;
- NeanderArray listeners(i::Factory::message_listeners());
+ i::HandleScope scope(isolate);
+ NeanderArray listeners(isolate->factory()->message_listeners());
for (int i = 0; i < listeners.length(); i++) {
if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
NeanderObject listener(i::JSObject::cast(listeners.get(i)));
i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
- listeners.set(i, i::Heap::undefined_value());
+ listeners.set(i, HEAP->undefined_value());
}
}
}
bool capture,
int frame_limit,
StackTrace::StackTraceOptions options) {
- i::Top::SetCaptureStackTraceForUncaughtExceptions(
+ i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
capture,
frame_limit,
options);
void V8::SetCounterFunction(CounterLookupCallback callback) {
- if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
- i::StatsTable::SetCounterFunction(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
+ isolate->stats_table()->SetCounterFunction(callback);
}
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
- if (IsDeadCheck("v8::V8::SetCreateHistogramFunction()")) return;
- i::StatsTable::SetCreateHistogramFunction(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
+ isolate->stats_table()->SetCreateHistogramFunction(callback);
}
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
- if (IsDeadCheck("v8::V8::SetAddHistogramSampleFunction()")) return;
- i::StatsTable::SetAddHistogramSampleFunction(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
+ isolate->stats_table()->
+ SetAddHistogramSampleFunction(callback);
}
void V8::EnableSlidingStateWindow() {
- if (IsDeadCheck("v8::V8::EnableSlidingStateWindow()")) return;
- i::Logger::EnableSlidingStateWindow();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
+ isolate->logger()->EnableSlidingStateWindow();
}
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
- if (IsDeadCheck("v8::V8::SetFailedAccessCheckCallbackFunction()")) return;
- i::Top::SetFailedAccessCheckCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
+ return;
+ }
+ isolate->SetFailedAccessCheckCallback(callback);
}
-
void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
- if (IsDeadCheck("v8::V8::AddObjectGroup()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- i::GlobalHandles::AddObjectGroup(
+ isolate->global_handles()->AddObjectGroup(
reinterpret_cast<i::Object***>(objects), length, info);
}
void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children,
size_t length) {
- if (IsDeadCheck("v8::V8::AddImplicitReferences()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- i::GlobalHandles::AddImplicitReferences(
+ isolate->global_handles()->AddImplicitReferences(
*Utils::OpenHandle(*parent),
reinterpret_cast<i::Object***>(children), length);
}
int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
- if (IsDeadCheck("v8::V8::AdjustAmountOfExternalAllocatedMemory()")) return 0;
- return i::Heap::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ return 0;
+ }
+ return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
+ change_in_bytes);
}
void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
- i::Heap::SetGlobalGCPrologueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
+ isolate->heap()->SetGlobalGCPrologueCallback(callback);
}
void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- if (IsDeadCheck("v8::V8::SetGlobalGCEpilogueCallback()")) return;
- i::Heap::SetGlobalGCEpilogueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
+ isolate->heap()->SetGlobalGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
- if (IsDeadCheck("v8::V8::AddGCPrologueCallback()")) return;
- i::Heap::AddGCPrologueCallback(callback, gc_type);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
+ isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- if (IsDeadCheck("v8::V8::RemoveGCPrologueCallback()")) return;
- i::Heap::RemoveGCPrologueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
+ isolate->heap()->RemoveGCPrologueCallback(callback);
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
- if (IsDeadCheck("v8::V8::AddGCEpilogueCallback()")) return;
- i::Heap::AddGCEpilogueCallback(callback, gc_type);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
+ isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- if (IsDeadCheck("v8::V8::RemoveGCEpilogueCallback()")) return;
- i::Heap::RemoveGCEpilogueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
+ isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
- if (IsDeadCheck("v8::V8::AddMemoryAllocationCallback()")) return;
- i::MemoryAllocator::AddMemoryAllocationCallback(callback,
- space,
- action);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
+ isolate->memory_allocator()->AddMemoryAllocationCallback(
+ callback, space, action);
}
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
- if (IsDeadCheck("v8::V8::RemoveMemoryAllocationCallback()")) return;
- i::MemoryAllocator::RemoveMemoryAllocationCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
+ isolate->memory_allocator()->RemoveMemoryAllocationCallback(
+ callback);
}
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return i::Logger::GetActiveProfilerModules() & PROFILER_MODULE_CPU;
+ return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
#else
return true;
#endif
// snapshot.
// Make a GC prior to taking a snapshot.
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Reset snapshot flag and CPU module flags.
flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
- const int current_flags = i::Logger::GetActiveProfilerModules();
- i::Logger::ResumeProfiler(flags, tag);
- i::Heap::CollectAllGarbage(false);
- i::Logger::PauseProfiler(~current_flags & flags, tag);
+ const int current_flags = LOGGER->GetActiveProfilerModules();
+ LOGGER->ResumeProfiler(flags, tag);
+ HEAP->CollectAllGarbage(false);
+ LOGGER->PauseProfiler(~current_flags & flags, tag);
} else {
- i::Logger::ResumeProfiler(flags, tag);
+ LOGGER->ResumeProfiler(flags, tag);
}
#endif
}
void V8::PauseProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler(flags, tag);
+ LOGGER->PauseProfiler(flags, tag);
#endif
}
int V8::GetActiveProfilerModules() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return i::Logger::GetActiveProfilerModules();
+ return LOGGER->GetActiveProfilerModules();
#else
return PROFILER_MODULE_NONE;
#endif
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
- return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
+ return LOGGER->GetLogLines(from_pos, dest_buf, max_size);
#endif
return 0;
}
int V8::GetCurrentThreadId() {
API_ENTRY_CHECK("V8::GetCurrentThreadId()");
EnsureInitialized("V8::GetCurrentThreadId()");
- return i::Top::thread_id();
+ return i::Isolate::Current()->thread_id();
}
void V8::TerminateExecution(int thread_id) {
- if (!i::V8::IsRunning()) return;
+ if (!i::Isolate::Current()->IsInitialized()) return;
API_ENTRY_CHECK("V8::GetCurrentThreadId()");
+ i::Isolate* isolate = i::Isolate::Current();
// If the thread_id identifies the current thread just terminate
// execution right away. Otherwise, ask the thread manager to
// terminate the thread with the given id if any.
- if (thread_id == i::Top::thread_id()) {
- i::StackGuard::TerminateExecution();
+ if (thread_id == isolate->thread_id()) {
+ isolate->stack_guard()->TerminateExecution();
} else {
- i::ThreadManager::TerminateExecution(thread_id);
+ isolate->thread_manager()->TerminateExecution(thread_id);
}
}
-void V8::TerminateExecution() {
- if (!i::V8::IsRunning()) return;
- i::StackGuard::TerminateExecution();
+void V8::TerminateExecution(Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->TerminateExecution();
+ } else {
+ i::Isolate::GetDefaultIsolateStackGuard()->TerminateExecution();
+ }
}
bool V8::IsExecutionTerminating() {
- if (!i::V8::IsRunning()) return false;
- if (i::Top::has_scheduled_exception()) {
- return i::Top::scheduled_exception() == i::Heap::termination_exception();
+ if (!i::Isolate::Current()->IsInitialized()) return false;
+ if (i::Isolate::Current()->has_scheduled_exception()) {
+ return i::Isolate::Current()->scheduled_exception() ==
+ HEAP->termination_exception();
}
return false;
}
+Isolate* Isolate::GetCurrent() {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ return reinterpret_cast<Isolate*>(isolate);
+}
+
+
+Isolate* Isolate::New() {
+ i::Isolate* isolate = new i::Isolate();
+ return reinterpret_cast<Isolate*>(isolate);
+}
+
+
+void Isolate::Dispose() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ if (!ApiCheck(!isolate->IsInUse(),
+ "v8::Isolate::Dispose()",
+ "Disposing the isolate that is entered by a thread.")) {
+ return;
+ }
+ isolate->TearDown();
+}
+
+
+void Isolate::Enter() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->Enter();
+}
+
+
+void Isolate::Exit() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->Exit();
+}
+
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
if (obj.IsEmpty()) {
}
Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
- LOG_API("RangeError");
- ON_BAILOUT("v8::Exception::RangeError()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "RangeError");
+ ON_BAILOUT(isolate, "v8::Exception::RangeError()", return Local<Value>());
ENTER_V8;
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewRangeError(message);
+ i::Handle<i::Object> result = FACTORY->NewRangeError(message);
error = *result;
}
i::Handle<i::Object> result(error);
}
Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
- LOG_API("ReferenceError");
- ON_BAILOUT("v8::Exception::ReferenceError()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "ReferenceError");
+ ON_BAILOUT(isolate, "v8::Exception::ReferenceError()", return Local<Value>());
ENTER_V8;
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewReferenceError(message);
+ i::Handle<i::Object> result = FACTORY->NewReferenceError(message);
error = *result;
}
i::Handle<i::Object> result(error);
}
Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
- LOG_API("SyntaxError");
- ON_BAILOUT("v8::Exception::SyntaxError()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "SyntaxError");
+ ON_BAILOUT(isolate, "v8::Exception::SyntaxError()", return Local<Value>());
ENTER_V8;
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewSyntaxError(message);
+ i::Handle<i::Object> result = FACTORY->NewSyntaxError(message);
error = *result;
}
i::Handle<i::Object> result(error);
}
Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
- LOG_API("TypeError");
- ON_BAILOUT("v8::Exception::TypeError()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "TypeError");
+ ON_BAILOUT(isolate, "v8::Exception::TypeError()", return Local<Value>());
ENTER_V8;
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewTypeError(message);
+ i::Handle<i::Object> result = FACTORY->NewTypeError(message);
error = *result;
}
i::Handle<i::Object> result(error);
}
Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
- LOG_API("Error");
- ON_BAILOUT("v8::Exception::Error()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "Error");
+ ON_BAILOUT(isolate, "v8::Exception::Error()", return Local<Value>());
ENTER_V8;
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewError(message);
+ i::Handle<i::Object> result = FACTORY->NewError(message);
error = *result;
}
i::Handle<i::Object> result(error);
#ifdef ENABLE_DEBUGGER_SUPPORT
-static v8::Debug::EventCallback event_callback = NULL;
-
static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- if (event_callback) {
- event_callback(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->debug_event_callback() != NULL) {
+ isolate->debug_event_callback()(event_details.GetEvent(),
+ event_details.GetExecutionState(),
+ event_details.GetEventData(),
+ event_details.GetCallbackData());
}
}
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- EnsureInitialized("v8::Debug::SetDebugEventListener()");
- ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8;
- event_callback = that;
+ isolate->set_debug_event_callback(that);
- HandleScope scope;
- i::Handle<i::Object> proxy = i::Factory::undefined_value();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
if (that != NULL) {
- proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
+ proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
}
- i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(proxy, Utils::OpenHandle(*data));
return true;
}
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
- EnsureInitialized("v8::Debug::SetDebugEventListener2()");
- ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
ENTER_V8;
- HandleScope scope;
- i::Handle<i::Object> proxy = i::Factory::undefined_value();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
if (that != NULL) {
- proxy = i::Factory::NewProxy(FUNCTION_ADDR(that));
+ proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(that));
}
- i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(proxy,
+ Utils::OpenHandle(*data));
return true;
}
bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
Handle<Value> data) {
- ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8;
- i::Debugger::SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
+ Utils::OpenHandle(*data));
return true;
}
-void Debug::DebugBreak() {
- if (!i::V8::IsRunning()) return;
- i::StackGuard::DebugBreak();
+void Debug::DebugBreak(Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->DebugBreak();
+ } else {
+ i::Isolate::GetDefaultIsolateStackGuard()->DebugBreak();
+ }
}
-void Debug::CancelDebugBreak() {
- i::StackGuard::Continue(i::DEBUGBREAK);
+void Debug::CancelDebugBreak(Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->stack_guard()->Continue(i::DEBUGBREAK);
+ } else {
+ i::Isolate::GetDefaultIsolateStackGuard()->Continue(i::DEBUGBREAK);
+ }
}
-void Debug::DebugBreakForCommand(ClientData* data) {
- if (!i::V8::IsRunning()) return;
- i::Debugger::EnqueueDebugCommand(data);
+void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->EnqueueDebugCommand(data);
+ } else {
+ i::Isolate::GetDefaultIsolateDebugger()->EnqueueDebugCommand(data);
+ }
}
-static v8::Debug::MessageHandler message_handler = NULL;
-
static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- if (message_handler) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->message_handler()) {
v8::String::Value json(message.GetJSON());
- message_handler(*json, json.length(), message.GetClientData());
+ (isolate->message_handler())(*json, json.length(), message.GetClientData());
}
}
bool message_handler_thread) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
// Message handler thread not supported any more. Parameter temporally left in
// the API for client compatability reasons.
CHECK(!message_handler_thread);
// TODO(sgjesse) support the old message handler API through a simple wrapper.
- message_handler = handler;
- if (message_handler != NULL) {
- i::Debugger::SetMessageHandler(MessageHandlerWrapper);
+ isolate->set_message_handler(handler);
+ if (handler != NULL) {
+ i::Isolate::Current()->debugger()->SetMessageHandler(MessageHandlerWrapper);
} else {
- i::Debugger::SetMessageHandler(NULL);
+ i::Isolate::Current()->debugger()->SetMessageHandler(NULL);
}
}
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
- i::Debugger::SetMessageHandler(handler);
+ i::Isolate::Current()->debugger()->SetMessageHandler(handler);
}
void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data) {
- if (!i::V8::IsRunning()) return;
- i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
- client_data);
+ if (!i::Isolate::Current()->IsInitialized()) return;
+ i::Isolate::Current()->debugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
}
int period) {
EnsureInitialized("v8::Debug::SetHostDispatchHandler");
ENTER_V8;
- i::Debugger::SetHostDispatchHandler(handler, period);
+ i::Isolate::Current()->debugger()->SetHostDispatchHandler(handler, period);
}
DebugMessageDispatchHandler handler, bool provide_locker) {
EnsureInitialized("v8::Debug::SetDebugMessageDispatchHandler");
ENTER_V8;
- i::Debugger::SetDebugMessageDispatchHandler(handler, provide_locker);
+ i::Isolate::Current()->debugger()->SetDebugMessageDispatchHandler(
+ handler, provide_locker);
}
Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
v8::Handle<v8::Value> data) {
- if (!i::V8::IsRunning()) return Local<Value>();
- ON_BAILOUT("v8::Debug::Call()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return Local<Value>();
+ ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>());
ENTER_V8;
i::Handle<i::Object> result;
EXCEPTION_PREAMBLE();
if (data.IsEmpty()) {
- result = i::Debugger::Call(Utils::OpenHandle(*fun),
- i::Factory::undefined_value(),
- &has_pending_exception);
+ result =
+ i::Isolate::Current()->debugger()->Call(Utils::OpenHandle(*fun),
+ FACTORY->undefined_value(),
+ &has_pending_exception);
} else {
- result = i::Debugger::Call(Utils::OpenHandle(*fun),
- Utils::OpenHandle(*data),
- &has_pending_exception);
+ result = i::Isolate::Current()->debugger()->Call(Utils::OpenHandle(*fun),
+ Utils::OpenHandle(*data),
+ &has_pending_exception);
}
EXCEPTION_BAILOUT_CHECK(Local<Value>());
return Utils::ToLocal(result);
Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
- if (!i::V8::IsRunning()) return Local<Value>();
- ON_BAILOUT("v8::Debug::GetMirror()", return Local<Value>());
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return Local<Value>();
+ ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
ENTER_V8;
v8::HandleScope scope;
- i::Debug::Load();
- i::Handle<i::JSObject> debug(i::Debug::debug_context()->global());
- i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("MakeMirror");
+ i::Debug* isolate_debug = i::Isolate::Current()->debug();
+ isolate_debug->Load();
+ i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global());
+ i::Handle<i::String> name = FACTORY->LookupAsciiSymbol("MakeMirror");
i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
- return i::Debugger::StartAgent(name, port, wait_for_connection);
+ return i::Isolate::Current()->debugger()->StartAgent(name, port,
+ wait_for_connection);
}
void Debug::ProcessDebugMessages() {
Local<Context> Debug::GetDebugContext() {
EnsureInitialized("v8::Debug::GetDebugContext()");
ENTER_V8;
- return Utils::ToLocal(i::Debugger::GetDebugContext());
+ return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
}
#endif // ENABLE_DEBUGGER_SUPPORT
#ifdef ENABLE_LOGGING_AND_PROFILING
Handle<String> CpuProfileNode::GetFunctionName() const {
- IsDeadCheck("v8::CpuProfileNode::GetFunctionName");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
return Handle<String>(ToApi<String>(
- i::Factory::LookupAsciiSymbol(entry->name())));
+ isolate->factory()->LookupAsciiSymbol(entry->name())));
} else {
- return Handle<String>(ToApi<String>(i::Factory::NewConsString(
- i::Factory::LookupAsciiSymbol(entry->name_prefix()),
- i::Factory::LookupAsciiSymbol(entry->name()))));
+ return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
+ isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
+ isolate->factory()->LookupAsciiSymbol(entry->name()))));
}
}
Handle<String> CpuProfileNode::GetScriptResourceName() const {
- IsDeadCheck("v8::CpuProfileNode::GetScriptResourceName");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(FACTORY->LookupAsciiSymbol(
node->entry()->resource_name())));
}
int CpuProfileNode::GetLineNumber() const {
- IsDeadCheck("v8::CpuProfileNode::GetLineNumber");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
}
double CpuProfileNode::GetTotalTime() const {
- IsDeadCheck("v8::CpuProfileNode::GetTotalTime");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
}
double CpuProfileNode::GetSelfTime() const {
- IsDeadCheck("v8::CpuProfileNode::GetSelfTime");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
}
double CpuProfileNode::GetTotalSamplesCount() const {
- IsDeadCheck("v8::CpuProfileNode::GetTotalSamplesCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
}
double CpuProfileNode::GetSelfSamplesCount() const {
- IsDeadCheck("v8::CpuProfileNode::GetSelfSamplesCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
unsigned CpuProfileNode::GetCallUid() const {
- IsDeadCheck("v8::CpuProfileNode::GetCallUid");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
}
int CpuProfileNode::GetChildrenCount() const {
- IsDeadCheck("v8::CpuProfileNode::GetChildrenCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
- IsDeadCheck("v8::CpuProfileNode::GetChild");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
unsigned CpuProfile::GetUid() const {
- IsDeadCheck("v8::CpuProfile::GetUid");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
Handle<String> CpuProfile::GetTitle() const {
- IsDeadCheck("v8::CpuProfile::GetTitle");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(FACTORY->LookupAsciiSymbol(
profile->title())));
}
const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
- IsDeadCheck("v8::CpuProfile::GetBottomUpRoot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
}
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
- IsDeadCheck("v8::CpuProfile::GetTopDownRoot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
}
int CpuProfiler::GetProfilesCount() {
- IsDeadCheck("v8::CpuProfiler::GetProfilesCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
return i::CpuProfiler::GetProfilesCount();
}
const CpuProfile* CpuProfiler::GetProfile(int index,
Handle<Value> security_token) {
- IsDeadCheck("v8::CpuProfiler::GetProfile");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::GetProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) {
- IsDeadCheck("v8::CpuProfiler::FindProfile");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::FindProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
void CpuProfiler::StartProfiling(Handle<String> title) {
- IsDeadCheck("v8::CpuProfiler::StartProfiling");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
}
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
Handle<Value> security_token) {
- IsDeadCheck("v8::CpuProfiler::StopProfiling");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::StopProfiling(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
}
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- IsDeadCheck("v8::HeapGraphEdge::GetType");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
}
Handle<Value> HeapGraphEdge::GetName() const {
- IsDeadCheck("v8::HeapGraphEdge::GetName");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(FACTORY->LookupAsciiSymbol(
edge->name())));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
+ return Handle<Number>(ToApi<Number>(FACTORY->NewNumberFromInt(
edge->index())));
default: UNREACHABLE();
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->From();
return reinterpret_cast<const HeapGraphNode*>(from);
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- IsDeadCheck("v8::HeapGraphEdge::GetToNode");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
HeapGraphNode::Type HeapGraphNode::GetType() const {
- IsDeadCheck("v8::HeapGraphNode::GetType");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
}
Handle<String> HeapGraphNode::GetName() const {
- IsDeadCheck("v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
+ return Handle<String>(ToApi<String>(FACTORY->LookupAsciiSymbol(
ToInternal(this)->name())));
}
uint64_t HeapGraphNode::GetId() const {
- IsDeadCheck("v8::HeapGraphNode::GetId");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
return ToInternal(this)->id();
}
int HeapGraphNode::GetInstancesCount() const {
- IsDeadCheck("v8::HeapGraphNode::GetInstancesCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetInstancesCount");
ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
return static_cast<int>(ToInternal(this)->id());
}
int HeapGraphNode::GetSelfSize() const {
- IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
}
int HeapGraphNode::GetRetainedSize(bool exact) const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainedSize");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->RetainedSize(exact);
}
int HeapGraphNode::GetChildrenCount() const {
- IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
return ToInternal(this)->children().length();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- IsDeadCheck("v8::HeapSnapshot::GetChild");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
&ToInternal(this)->children()[index]);
}
int HeapGraphNode::GetRetainersCount() const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainer");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
}
int HeapGraphNode::GetRetainingPathsCount() const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainingPathsCount");
return ToInternal(this)->GetRetainingPaths()->length();
}
const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainingPath");
return reinterpret_cast<const HeapGraphPath*>(
ToInternal(this)->GetRetainingPaths()->at(index));
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
- IsDeadCheck("v8::HeapSnapshot::GetDominatorNode");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
}
const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
- IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshotsDiff::GetAdditionsRoot");
i::HeapSnapshotsDiff* diff =
const_cast<i::HeapSnapshotsDiff*>(
reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
- IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshotsDiff::GetDeletionsRoot");
i::HeapSnapshotsDiff* diff =
const_cast<i::HeapSnapshotsDiff*>(
reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
HeapSnapshot::Type HeapSnapshot::GetType() const {
- IsDeadCheck("v8::HeapSnapshot::GetType");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
}
unsigned HeapSnapshot::GetUid() const {
- IsDeadCheck("v8::HeapSnapshot::GetUid");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
}
Handle<String> HeapSnapshot::GetTitle() const {
- IsDeadCheck("v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
+ return Handle<String>(ToApi<String>(FACTORY->LookupAsciiSymbol(
ToInternal(this)->title())));
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
- IsDeadCheck("v8::HeapSnapshot::GetHead");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
- IsDeadCheck("v8::HeapSnapshot::GetNodeById");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
}
const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
const HeapSnapshot* snapshot) const {
- IsDeadCheck("v8::HeapSnapshot::CompareWith");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::CompareWith");
return reinterpret_cast<const HeapSnapshotsDiff*>(
ToInternal(this)->CompareWith(ToInternal(snapshot)));
}
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- IsDeadCheck("v8::HeapSnapshot::Serialize");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
int HeapProfiler::GetSnapshotsCount() {
- IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
}
const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- IsDeadCheck("v8::HeapProfiler::GetSnapshot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::GetSnapshot(index));
}
const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- IsDeadCheck("v8::HeapProfiler::FindSnapshot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::FindSnapshot(uid));
}
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
ActivityControl* control) {
- IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
switch (type) {
case HeapSnapshot::kFull:
void HeapProfiler::DefineWrapperClass(uint16_t class_id,
WrapperInfoCallback callback) {
- i::HeapProfiler::DefineWrapperClass(class_id, callback);
+ i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
+ callback);
}
#endif // ENABLE_LOGGING_AND_PROFILING
namespace internal {
-HandleScopeImplementer* HandleScopeImplementer::instance() {
- return &thread_local;
-}
-
-
void HandleScopeImplementer::FreeThreadResources() {
- thread_local.Free();
+ Free();
}
char* HandleScopeImplementer::ArchiveThread(char* storage) {
- return thread_local.ArchiveThreadHelper(storage);
-}
-
-
-char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
+ Isolate* isolate = Isolate::Current();
v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
+ isolate->handle_scope_data();
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
int HandleScopeImplementer::ArchiveSpacePerThread() {
- return sizeof(thread_local);
+ return sizeof(HandleScopeImplementer);
}
char* HandleScopeImplementer::RestoreThread(char* storage) {
- return thread_local.RestoreThreadHelper(storage);
-}
-
-
-char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
memcpy(this, storage, sizeof(*this));
- *v8::ImplementationUtilities::CurrentHandleScope() = handle_scope_data_;
+ *Isolate::Current()->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}
void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
- thread_local.handle_scope_data_ = *current;
- thread_local.IterateThis(v);
+ Isolate::Current()->handle_scope_data();
+ handle_scope_data_ = *current;
+ IterateThis(v);
}
template <typename T>
static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return v8::internal::Factory::NewProxy(
+ return FACTORY->NewProxy(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
RegisteredExtension* next_auto_;
ExtensionTraversalState state_;
static RegisteredExtension* first_extension_;
- static RegisteredExtension* first_auto_extension_;
};
namespace internal {
+// Tracks string usage to help make better decisions when
+// externalizing strings.
+//
+// Implementation note: internally this class only tracks fresh
+// strings and keeps a single use counter for them.
+class StringTracker {
+ public:
+ // Records that the given string's characters were copied to some
+ // external buffer. If this happens often we should honor
+ // externalization requests for the string.
+ void RecordWrite(Handle<String> string) {
+ Address address = reinterpret_cast<Address>(*string);
+ Address top = isolate_->heap()->NewSpaceTop();
+ if (IsFreshString(address, top)) {
+ IncrementUseCount(top);
+ }
+ }
+
+ // Estimates freshness and use frequency of the given string based
+ // on how close it is to the new space top and the recorded usage
+ // history.
+ inline bool IsFreshUnusedString(Handle<String> string) {
+ Address address = reinterpret_cast<Address>(*string);
+ Address top = isolate_->heap()->NewSpaceTop();
+ return IsFreshString(address, top) && IsUseCountLow(top);
+ }
+
+ private:
+ StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
+
+ static inline bool IsFreshString(Address string, Address top) {
+ return top - kFreshnessLimit <= string && string <= top;
+ }
+
+ inline bool IsUseCountLow(Address top) {
+ if (last_top_ != top) return true;
+ return use_count_ < kUseLimit;
+ }
+
+ inline void IncrementUseCount(Address top) {
+ if (last_top_ != top) {
+ use_count_ = 0;
+ last_top_ = top;
+ }
+ ++use_count_;
+ }
+
+ // Single use counter shared by all fresh strings.
+ int use_count_;
+
+ // Last new space top when the use count above was valid.
+ Address last_top_;
+
+ Isolate* isolate_;
+
+ // How close to the new space top a fresh string has to be.
+ static const int kFreshnessLimit = 1024;
+
+ // The number of uses required to consider a string useful.
+ static const int kUseLimit = 32;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(StringTracker);
+};
+
+
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
-// neat in some ways, but it would expose external implementation details in
+// neat in some ways, but it would expose internal implementation details in
// our public header file, which is undesirable.
//
-// There is a singleton instance of this class to hold the per-thread data.
-// For multithreaded V8 programs this data is copied in and out of storage
+// An isolate has a single instance of this class to hold the current thread's
+// data. In multithreaded V8 programs this data is copied in and out of storage
// so that the currently executing thread always has its own copy of this
// data.
-class HandleScopeImplementer {
+ISOLATED_CLASS HandleScopeImplementer {
public:
HandleScopeImplementer()
ignore_out_of_memory_(false),
call_depth_(0) { }
- static HandleScopeImplementer* instance();
-
// Threading support for handle data.
static int ArchiveSpacePerThread();
- static char* RestoreThread(char* from);
- static char* ArchiveThread(char* to);
- static void FreeThreadResources();
+ char* RestoreThread(char* from);
+ char* ArchiveThread(char* to);
+ void FreeThreadResources();
// Garbage collection support.
- static void Iterate(v8::internal::ObjectVisitor* v);
+ void Iterate(v8::internal::ObjectVisitor* v);
static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
// to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData;
- static HandleScopeData* CurrentHandleScope();
-
#ifdef DEBUG
static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif
int length() const { return length_; }
Object** arguments() { return arguments_; }
-
private:
int length_;
Object** arguments_;
// can.
class CustomArguments : public Relocatable {
public:
- inline CustomArguments(Object* data,
+ inline CustomArguments(Isolate* isolate,
+ Object* data,
Object* self,
- JSObject* holder) {
+ JSObject* holder) : Relocatable(isolate) {
values_[2] = self;
values_[1] = holder;
values_[0] = data;
}
- inline CustomArguments() {
+ inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
#ifdef DEBUG
for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
values_[i] = reinterpret_cast<Object*>(kZapValue);
Object* values_[3];
};
+#define RUNTIME_CALLING_CONVENTION Arguments args, Isolate* isolate
+#define RUNTIME_GET_ISOLATE ASSERT(isolate == Isolate::Current())
} } // namespace v8::internal
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
namespace v8 {
namespace internal {
-// Safe default is no features.
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::enabled_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
+CpuFeatures::CpuFeatures()
+ : supported_(0),
+ enabled_(0),
+ found_by_runtime_probing_(0) {
+}
#ifdef __arm__
static uint64_t CpuFeaturesImpliedByCompiler() {
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false),
emit_debug_code_(FLAG_debug_code) {
+ Isolate* isolate = Isolate::Current();
allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate->assembler_spare_buffer() != NULL) {
+ buffer = isolate->assembler_spare_buffer();
+ isolate->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
Assembler::~Assembler() {
+ Isolate* isolate = Isolate::Current();
ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
*instr ^= kMovMvnFlip;
return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
*instr |= EncodeMovwImmediate(imm32);
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
+ if (must_use_constant_pool() ||
+ !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
+ if (x.must_use_constant_pool() ||
+ !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
} else {
const Operand& src,
Condition cond) {
// v6 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.rm_.is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
int width,
Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
int width,
Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
ASSERT(!dst.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
int width,
Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(ARMv7));
ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
// VMOV can accept an immediate of the form:
//
const Condition cond) {
// Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
uint32_t enc;
if (FitsVMOVDoubleImmediate(imm, &enc)) {
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
}
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
+class CpuFeatures {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- static void Probe(bool portable);
+ void Probe(bool portable);
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
+ bool IsSupported(CpuFeature f) const {
if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ bool IsEnabled(CpuFeature f) const {
return (enabled_ & (1u << f)) != 0;
}
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(CpuFeature f) {
- ASSERT(CpuFeatures::IsSupported(f));
+ explicit Scope(CpuFeature f)
+ : cpu_features_(Isolate::Current()->cpu_features()),
+ isolate_(Isolate::Current()) {
+ ASSERT(cpu_features_->IsSupported(f));
ASSERT(!Serializer::enabled() ||
- (found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= 1u << f;
+ (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = cpu_features_->enabled_;
+ cpu_features_->enabled_ |= 1u << f;
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::Current(), isolate_);
+ cpu_features_->enabled_ = old_enabled_;
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
unsigned old_enabled_;
+ CpuFeatures* cpu_features_;
+ Isolate* isolate_;
#else
public:
explicit Scope(CpuFeature f) {}
};
private:
- static unsigned supported_;
- static unsigned enabled_;
- static unsigned found_by_runtime_probing_;
+ CpuFeatures();
+
+ unsigned supported_;
+ unsigned enabled_;
+ unsigned found_by_runtime_probing_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
r5,
JSArray::kPreallocatedArrayElements,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
__ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize));
r7,
true,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1, r2, r4);
// Setup return value, remove receiver and argument from stack and return.
__ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
r7,
false,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1, r2, r6);
// Fill arguments as array elements. Copy from the top of the stack (last
// element) to the array backing store filling it backwards. Note:
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- __ IncrementCounter(&Counters::string_ctor_calls, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_ctor_calls(), 1, r2, r3);
Register function = r1;
if (FLAG_debug_code) {
r5, // Scratch.
false, // Is it a Smi?
¬_cached);
- __ IncrementCounter(&Counters::string_ctor_cached_number, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
__ tst(r3, Operand(kIsNotStringMask));
__ b(ne, &convert_argument);
__ mov(argument, r0);
- __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->string_ctor_conversions(), 1, r3, r4);
__ b(&argument_is_string);
// Invoke the conversion builtin and put the result into r2.
__ bind(&convert_argument);
__ push(function); // Preserve the function.
- __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->string_ctor_conversions(), 1, r3, r4);
__ EnterInternalFrame();
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
// At this point the argument is already a string. Call runtime to
// create a string wrapper.
__ bind(&gc_required);
- __ IncrementCounter(&Counters::string_ctor_gc_required, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->string_ctor_gc_required(), 1, r3, r4);
__ EnterInternalFrame();
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
}
if (is_api_function) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::HandleApiCallConstruct));
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION);
__ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->constructed_objects(), 1, r1, r2);
__ Jump(lr);
}
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
- RelocInfo::CODE_TARGET);
+ __ Call(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall)), RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION);
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Probe the CPU to set the supported features, because this builtin
// may be called before the initialization performs CPU setup.
- CpuFeatures::Probe(false);
+ Isolate::Current()->cpu_features()->Probe(false);
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
__ bind(&function);
}
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET, ne);
+ __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET, ne);
ParameterCount expected(0);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1);
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ destination == kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag));
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
__ SmiUntag(scratch1, object);
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(single_scratch, scratch1);
__ vcvt_f64_s32(double_dst, single_scratch);
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
SwVfpRegister single_scratch = double_scratch.low();
// Load the double value.
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
}
// Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
// Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP3);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
}
// Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag));
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
scratch1,
__ bind(&load_result_from_cache);
__ ldr(result,
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native,
+ __ IncrementCounter(COUNTERS->number_to_string_native(),
1,
scratch1,
scratch2);
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
Label no_nan;
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1, r2, r3);
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
lhs_,
rhs_,
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
Label false_result;
Label not_heap_number;
Register rhs,
const Builtins::JavaScript& builtin) {
Label slow, slow_reverse, do_the_call;
- bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
+ bool use_fp_registers =
+ Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
Register heap_number_map = r6;
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(rhs, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
// The code below for writing into heap numbers isn't capable of writing
// the register as an unsigned int so we go to slow case if we hit this
// case.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, &slow);
// result.
__ mov(r0, Operand(r5));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
const char* TypeRecordingBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
// result.
__ mov(r0, Operand(r5));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP3);
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// to return a heap number if we can.
// The non vfp3 code does not support this special case, so jump to
// runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
__ b(mi,
(result_type_ <= TRBinaryOpIC::INT32) ? &transition
: &return_heap_number);
__ Ret();
__ bind(&return_heap_number);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (tagged) {
// Argument is a number and is on stack and in r0.
__ eor(r1, r2, Operand(r3));
__ eor(r1, r1, Operand(r1, ASR, 16));
__ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
__ mov(cache_entry,
Operand(ExternalReference::transcendental_cache_array_address()));
// r0 points to cache array.
- __ ldr(cache_entry, MemOperand(cache_entry,
- type_ * sizeof(TranscendentalCache::caches_[0])));
+ __ ldr(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
// r0 points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ cmp(cache_entry, Operand(0, RelocInfo::NONE));
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
+ { TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
__ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
}
__ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
+ } // if (Isolate::Current()->cpu_features()->IsSupported(VFP3))
__ bind(&calculate);
if (tagged) {
__ bind(&invalid_cache);
__ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
} else {
- if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
+ if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE();
CpuFeatures::Scope scope(VFP3);
Label no_update;
__ mov(r0, Operand(r2));
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label base_not_smi;
}
#endif
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+
// TODO(1242173): To let the GC traverse the return address of the exit
// frames, we need to know where the return address is. Right now,
// we store it on the stack to be able to find it again, but we never
// Retrieve the pending exception and clear the variable.
__ mov(ip, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r3, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address)));
__ ldr(r0, MemOperand(ip));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(Factory::termination_exception()));
+ __ cmp(r0, Operand(FACTORY->termination_exception()));
__ b(eq, throw_termination_exception);
// Handle normal exception.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ mov(r5, Operand(ExternalReference(Isolate::k_c_entry_fp_address)));
__ ldr(r5, MemOperand(r5));
__ Push(r8, r7, r6, r5);
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address);
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(r6, Operand(0, RelocInfo::NONE));
// exception field in the JSEnv and return a failure sentinel.
// Coming in here the fp will be invalid because the PushTryHandler below
// sets it to 0 to signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address)));
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
// Clear any pending exceptions.
__ mov(ip, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r5, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address)));
__ str(r5, MemOperand(ip));
// Invoke the function by calling through JS entry trampoline builtin.
// displacement since the current stack pointer (sp) points directly
// to the stack handler.
__ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_handler_address)));
__ str(r3, MemOperand(ip));
// No need to restore registers
__ add(sp, sp, Operand(StackHandlerConstants::kSize));
__ bind(&exit); // r0 holds result
// Restore the top frame descriptors from the stack.
__ pop(r3);
- __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address)));
__ str(r3, MemOperand(ip));
// Reset the stack to the callee saved registers.
__ b(ne, &slow);
// Null is not instance of anything.
- __ cmp(scratch, Operand(Factory::null_value()));
+ __ cmp(scratch, Operand(FACTORY->null_value()));
__ b(ne, &object_not_null);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
+ __ IncrementCounter(COUNTERS->regexp_entry_native(), 1, r0, r2);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
static const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
+ // Argument 8 (sp[16]): Pass current isolate address.
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ str(r0, MemOperand(sp, 4 * kPointerSize));
+
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r1, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r1, MemOperand(r1, 0));
- __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address)));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
__ bind(&failure);
// For failure and exception return null.
- __ mov(r0, Operand(Factory::null_value()));
+ __ mov(r0, Operand(FACTORY->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(Factory::empty_fixed_array()));
+ __ mov(r4, Operand(FACTORY->empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
// r5: Number of elements in array, untagged.
// Set map.
- __ mov(r2, Operand(Factory::fixed_array_map()));
+ __ mov(r2, Operand(FACTORY->fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(Factory::the_hole_value()));
+ __ mov(r2, Operand(FACTORY->the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Fill fixed array elements with hole.
// r0: JSArray, tagged.
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
+ __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ bind(&make_two_character_string);
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1, r1, r2);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
// Compare flat ASCII strings natively. Remove arguments from stack first.
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
__ cmp(r3, Operand(Smi::FromInt(0)), ne);
__ b(ne, &strings_not_empty); // If either string was empty, return r0.
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
__ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
__ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// r7: result string.
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
__ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
__ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load left and right operand
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
- use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+ use_vfp3_ = Isolate::Current()->cpu_features()->IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
// -------------------------------------------------------------------------
// CodeGenerator implementation
-int CodeGenerator::inlined_write_barrier_size_ = -1;
-
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
+ bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
true_target->Branch(eq);
// Slow case.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Implements the slow case by using ToBooleanStub.
// The ToBooleanStub takes a single argument, and
void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
ASSERT(Token::IsBitOp(op_));
- if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
+ if ((op_ == Token::SHR) &&
+ !Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
// >>> requires an unsigned to double conversion and the non VFP code
// does not support this conversion.
__ b(cond, entry_label());
void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
Register heap_number,
Register scratch) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, answer);
if (op_ == Token::SHR) {
// SHR is special because it is required to produce a positive answer.
__ cmp(int32, Operand(0, RelocInfo::NONE));
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
// Non VFP code cannot convert from unsigned to double, so fall back
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
frame_->Dup();
frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
__ JumpIfSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ Handle<Code> apply_code(
+ Isolate::Current()->builtins()->builtin(Builtins::FunctionApply));
__ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
__ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r1, Operand(apply_code));
// If we have a function or a constant, we need to initialize the variable.
Expression* val = NULL;
if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
+ val = new Literal(FACTORY->the_hole_value());
} else {
val = node->fun(); // NULL if we don't have a function
}
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// If we can fall off the end of the try block, unlink from try chain.
if (has_valid_frame()) {
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// If we can fall off the end of the try block, unlink from the try
// chain and set the state on the frame to FALLING.
frame_->EmitPush(cp);
frame_->EmitPush(Operand(function_info));
frame_->EmitPush(Operand(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? FACTORY->true_value()
+ : FACTORY->false_value()));
frame_->CallRuntime(Runtime::kNewClosure, 3);
frame_->EmitPush(r0);
}
// else fall through
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Initialize));
Load(value);
if (property->emit_store()) {
frame_->PopToR0();
frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
frame_->EmitPush(Operand(node->constant_elements()));
int length = node->values()->length();
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->cow_arrays_created_stub(), 1, r1, r2);
} else if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
// Setup the name register and call the IC initialization code.
__ mov(r2, Operand(var->name()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> stub =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
arg_count + 1);
__ mov(r2, Operand(name));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub =
- StubCache::ComputeCallInitialize(arg_count, in_loop);
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
// Load the key into r2 and call the IC initialization code.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub =
- StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
+ in_loop);
CodeForSourcePosition(node->position());
frame_->SpillAll();
__ ldr(r2, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall));
frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
frame_->EmitPush(r0);
// Functions have class 'Function'.
function.Bind();
- __ mov(tos, Operand(Factory::function_class_symbol()));
+ __ mov(tos, Operand(FACTORY->function_class_symbol()));
frame_->EmitPush(tos);
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
- __ mov(tos, Operand(Factory::Object_symbol()));
+ __ mov(tos, Operand(FACTORY->Object_symbol()));
frame_->EmitPush(tos);
leave.Jump();
Load(args->at(0));
Load(args->at(1));
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
frame_->CallRuntime(Runtime::kMath_pow, 2);
frame_->EmitPush(r0);
} else {
ASSERT(args->length() == 1);
Load(args->at(0));
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
frame_->CallRuntime(Runtime::kMath_sqrt, 1);
frame_->EmitPush(r0);
} else {
Label entry, loop;
// The use of ip to store the valueOf symbol asumes that it is not otherwise
// used in the loop below.
- __ mov(ip, Operand(Factory::value_of_symbol()));
+ __ mov(ip, Operand(FACTORY->value_of_symbol()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(scratch2_, MemOperand(map_result_, 0));
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
__ PrepareCallCFunction(0, r1);
__ CallCFunction(ExternalReference::random_uint32_function(), 0);
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ Isolate::Current()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
frame_->SpillAllButCopyTOSToR0();
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
frame_->SpillAllButCopyTOSToR0();
void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
frame_->SpillAllButCopyTOSToR0();
ZoneList<Expression*>* args = node->arguments();
Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
if (function == NULL) {
// Prepare stack for calling JS runtime function.
// Call the JS runtime function.
__ mov(r2, Operand(node->name()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> stub =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
frame_->EmitPush(r0);
Register scratch = VirtualFrame::scratch0();
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(HEAP->number_symbol())) {
__ tst(tos, Operand(kSmiTagMask));
true_target()->Branch(eq);
__ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
__ cmp(tos, ip);
cc_reg_ = eq;
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(HEAP->string_symbol())) {
__ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
__ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
cc_reg_ = lt;
- } else if (check->Equals(Heap::boolean_symbol())) {
+ } else if (check->Equals(HEAP->boolean_symbol())) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(tos, ip);
true_target()->Branch(eq);
__ cmp(tos, ip);
cc_reg_ = eq;
- } else if (check->Equals(Heap::undefined_symbol())) {
+ } else if (check->Equals(HEAP->undefined_symbol())) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(tos, ip);
true_target()->Branch(eq);
cc_reg_ = eq;
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(HEAP->function_symbol())) {
__ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
Register map_reg = scratch;
__ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
cc_reg_ = eq;
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(HEAP->object_symbol())) {
__ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
- __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
- __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
+ __ DecrementCounter(COUNTERS->named_load_inline(), 1, scratch1, scratch2);
+ __ IncrementCounter(COUNTERS->named_load_inline_miss(), 1,
+ scratch1, scratch2);
// Ensure receiver in r0 and name in r2 to match load ic calling convention.
__ Move(r0, receiver_);
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
RelocInfo::Mode mode = is_contextual_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
+ __ DecrementCounter(COUNTERS->keyed_load_inline(), 1, scratch1, scratch2);
+ __ IncrementCounter(COUNTERS->keyed_load_inline_miss(),
+ 1, scratch1, scratch2);
// Ensure key in r0 and receiver in r1 to match keyed load ic calling
// convention.
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has the arguments key and receiver in r0 and r1.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// keyed load has been inlined.
void DeferredReferenceSetKeyedValue::Generate() {
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
- __ IncrementCounter(
- &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
+ __ DecrementCounter(COUNTERS->keyed_store_inline(), 1, scratch1, scratch2);
+ __ IncrementCounter(COUNTERS->keyed_store_inline_miss(),
+ 1, scratch1, scratch2);
// Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
// calling convention.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
bool contextual_load_in_builtin =
is_contextual &&
- (Bootstrapper::IsActive() ||
+ (ISOLATE->bootstrapper()->IsActive() ||
(!info_->closure().is_null() && info_->closure()->IsBuiltin()));
if (scope()->is_global_scope() ||
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
if (is_contextual) {
- __ IncrementCounter(&Counters::named_load_global_inline, 1,
+ __ IncrementCounter(COUNTERS->named_load_global_inline(), 1,
frame_->scratch0(), frame_->scratch1());
} else {
- __ IncrementCounter(&Counters::named_load_inline, 1,
+ __ IncrementCounter(COUNTERS->named_load_inline(), 1,
frame_->scratch0(), frame_->scratch1());
}
}
}
if (is_dont_delete) {
- __ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
+ __ IncrementCounter(COUNTERS->dont_delete_hint_hit(), 1,
frame_->scratch0(), frame_->scratch1());
}
}
// Check the map. The null map used below is patched by the inline cache
// code. Therefore we can't use a LoadRoot call.
__ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(scratch2, Operand(Factory::null_value()));
+ __ mov(scratch2, Operand(FACTORY->null_value()));
__ cmp(scratch, scratch2);
deferred->Branch(ne);
InlinedNamedLoadInstructions += 1;
#endif
// Load the (initially invalid) cell and get its value.
- masm()->mov(receiver, Operand(Factory::null_value()));
+ masm()->mov(receiver, Operand(FACTORY->null_value()));
__ ldr(receiver,
FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
#ifdef DEBUG
InlinedNamedLoadInstructions += 3;
#endif
- __ cmp(receiver, Operand(Factory::the_hole_value()));
+ __ cmp(receiver, Operand(FACTORY->the_hole_value()));
deferred->Branch(eq);
} else if (FLAG_debug_code) {
#ifdef DEBUG
InlinedNamedLoadInstructions += 3;
#endif
- __ cmp(receiver, Operand(Factory::the_hole_value()));
+ __ cmp(receiver, Operand(FACTORY->the_hole_value()));
__ b(&check_the_hole, eq);
__ bind(&cont);
}
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
- __ mov(scratch0, Operand(Factory::null_value()));
+ __ mov(scratch0, Operand(FACTORY->null_value()));
__ cmp(scratch0, scratch1);
deferred->Branch(ne);
// Check that this is the first inlined write barrier or that
// this inlined write barrier has the same size as all the other
// inlined write barriers.
- ASSERT((inlined_write_barrier_size_ == -1) ||
- (inlined_write_barrier_size_ ==
+ ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
+ (Isolate::Current()->inlined_write_barrier_size() ==
masm()->InstructionsGeneratedSince(&record_write_start)));
- inlined_write_barrier_size_ =
- masm()->InstructionsGeneratedSince(&record_write_start);
+ Isolate::Current()->set_inlined_write_barrier_size(
+ masm()->InstructionsGeneratedSince(&record_write_start));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
- __ IncrementCounter(&Counters::keyed_load_inline, 1,
+ __ IncrementCounter(COUNTERS->keyed_load_inline(), 1,
frame_->scratch0(), frame_->scratch1());
// Load the key and receiver from the stack.
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
- __ mov(scratch2, Operand(Factory::null_value()));
+ __ mov(scratch2, Operand(FACTORY->null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
- __ IncrementCounter(&Counters::keyed_store_inline, 1,
+ __ IncrementCounter(COUNTERS->keyed_store_inline(), 1,
scratch1, scratch2);
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
- __ mov(scratch3, Operand(Factory::fixed_array_map()));
+ __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
__ cmp(scratch2, scratch3);
deferred->Branch(ne);
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
return name_;
}
-
#undef __
} } // namespace v8::internal
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(inlined_write_barrier_size_ != -1);
- return inlined_write_barrier_size_ + 4;
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ return Isolate::Current()->inlined_write_barrier_size() + 4;
}
private:
// to some unlinking code).
bool function_return_is_shadowed_;
- // Size of inlined write barriers generated by EmitNamedStore.
- static int inlined_write_barrier_size_;
-
friend class VirtualFrame;
+ friend class Isolate;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
+ friend class InlineRuntimeFunctionsTable;
friend class LCodeGen;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
namespace internal {
void CPU::Setup() {
- CpuFeatures::Probe(true);
- if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
+ CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
+ cpu_features->Probe(true);
+ if (!cpu_features->IsSupported(VFP3) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
}
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(start, size);
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
- patcher.Emit(Debug::debug_break_return()->entry());
+ patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
- patcher.Emit(Debug::debug_break_slot()->entry());
+ patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
}
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation = Isolate::Current()->builtins()->builtin(
+ Builtins::NotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
+
// Set the continuation for the topmost frame.
if (is_topmost) {
+ Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? builtins->builtin(Builtins::NotifyDeoptimized)
+ : builtins->builtin(Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
+#include "memory.h"
+
namespace v8 {
namespace internal {
void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Emit the inlined tests assumed by the stub.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(
- is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(is_strict_mode()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements).
}
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ mov(r0, Operand(key_literal->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, mode);
}
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
__ mov(r0, Operand(key_literal->handle()));
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
context()->Plug(r0);
}
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_elements()));
__ Push(r3, r2, r1);
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ __ IncrementCounter(
+ isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ pop(r2);
}
__ pop(r0); // Restore value.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
__ pop(r1);
}
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ pop(r2);
}
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Restore context register.
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ ldr(r1, GlobalObjectOperand());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
// also use the fast code generator.
FunctionLiteral* lit = fun->AsFunctionLiteral();
if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
+ lit->name()->Equals(isolate()->heap()->empty_string()) &&
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin(isolate()->builtins()->builtin(
+ Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(r0);
}
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (isolate()->cpu_features()->IsSupported(VFP3)) {
__ PrepareCallCFunction(0, r1);
__ CallCFunction(ExternalReference::random_uint32_function(), 0);
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
if (expr->is_jsruntime()) {
// Call the JS runtime function.
__ mov(r2, Operand(expr->name()));
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, NOT_IN_LOOP);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count, NOT_IN_LOOP);
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Comment cmnt(masm_, "Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE);
Split(ge, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
mode == RelocInfo::CODE_TARGET_CONTEXT);
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->named_load_full(), 1, r1, r2);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1, r1, r2);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->named_store_full(), 1, r1, r2);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1, r1, r2);
default:
break;
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->named_load_full(), 1, r1, r2);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1, r1, r2);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->named_store_full(), 1, r1, r2);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1, r1, r2);
default:
break;
}
Code::kNoExtraICState,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r1, r2, r3, r4, r5);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r1, r2, r3, r4, r5);
__ bind(&miss);
}
// -----------------------------------
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->call_miss(), 1, r3, r4);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->keyed_call_miss(), 1, r3, r4);
}
// Get the receiver of the function from the stack.
GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_smi_fast(), 1, r0, r3);
__ bind(&do_call);
// receiver in r1 is not used after this point.
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_slow_load(), 1, r0, r3);
__ EnterInternalFrame();
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
__ b(ne, &lookup_monomorphic_cache);
GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_lookup_dict(), 1, r0, r3);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_lookup_cache(), 1, r0, r3);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_slow(), 1, r0, r3);
GenerateMiss(masm, argc);
__ bind(&index_string);
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, r0, r2, r3, r4, r5);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r0, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
// -- sp[0] : receiver
// -----------------------------------
- __ IncrementCounter(&Counters::load_miss, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->load_miss(), 1, r3, r4);
__ mov(r3, r0);
__ Push(r3, r2);
// Update the offsets if initializing the inlined store. No reason
// to update the offsets when clearing the inlined version because
// it will bail out in the map check.
- if (map != Heap::null_value()) {
+ if (map != HEAP->null_value()) {
// Patch the offset in the actual store instruction.
Address str_property_instr_address =
ldr_map_instr_address + 3 * Assembler::kInstrSize;
// -- r1 : receiver
// -----------------------------------
- __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->keyed_load_miss(), 1, r3, r4);
__ Push(r1, r0);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_smi(), 1, r2, r3);
__ Ret();
__ bind(&check_number_dictionary);
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_slow(), 1, r2, r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
__ add(r6, r6, r5); // Index from start of object.
__ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
__ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_lookup_cache(), 1, r2, r3);
__ Ret();
// Load property array property.
__ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_lookup_cache(), 1, r2, r3);
__ Ret();
// Do a quick inline probe of the receiver's dictionary, if it
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_symbol(), 1, r2, r3);
__ Ret();
__ bind(&index_string);
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
+
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
- __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
+ __ IncrementCounter(COUNTERS->store_normal_hit(), 1, r4, r5);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
+ __ IncrementCounter(COUNTERS->store_normal_miss(), 1, r4, r5);
GenerateMiss(masm);
}
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "arm/lithium-codegen-arm.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
}
-void LCodeGen::CallRuntime(Runtime::Function* function,
+void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
ASSERT(instr != NULL);
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
Label true_value, done;
__ tst(r0, r0);
- __ mov(r0, Operand(Factory::false_value()), LeaveCC, ne);
- __ mov(r0, Operand(Factory::true_value()), LeaveCC, eq);
+ __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
+ __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
}
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- __ mov(ip, Operand(Factory::the_hole_value()));
+ __ mov(ip, Operand(factory()->the_hole_value()));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
- __ mov(result, Operand(Factory::the_hole_value()));
+ __ mov(result, Operand(factory()->the_hole_value()));
__ b(&done);
// The inlined call site cache did not match. Check null and string before
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(
+ isolate()->builtins()->builtin(Builtins::LoadIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arity, NOT_IN_LOOP);
__ mov(r2, Operand(instr->name()));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Restore context register.
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(r2, Operand(instr->name()));
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
ASSERT(ToRegister(instr->InputAt(0)).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin(isolate()->builtins()->builtin(
+ Builtins::JSConstructCall));
__ mov(r0, Operand(instr->arity()));
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
void LCodeGen::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ factory()->NewJSGlobalPropertyCell(object);
__ mov(result, Operand(cell));
__ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
} else {
__ mov(r2, Operand(shared_info));
__ mov(r1, Operand(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? factory()->true_value()
+ : factory()->false_value()));
__ Push(cp, r2, r1);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
Handle<String> type_name) {
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(Heap::number_symbol())) {
+ if (type_name->Equals(heap()->number_symbol())) {
__ JumpIfSmi(input, true_label);
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(input, Operand(ip));
final_branch_condition = eq;
- } else if (type_name->Equals(Heap::string_symbol())) {
+ } else if (type_name->Equals(heap()->string_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
- } else if (type_name->Equals(Heap::function_symbol())) {
+ } else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE);
final_branch_condition = ge;
- } else if (type_name->Equals(Heap::object_symbol())) {
+ } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
- void CallRuntime(Runtime::Function* function,
+ void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "arm/lithium-gap-resolver-arm.h"
#include "arm/lithium-codegen-arm.h"
: Assembler(buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ code_object_(HEAP->undefined_value()) {
}
} else if (!src2.is_single_instruction() &&
!src2.must_use_constant_pool() &&
- CpuFeatures::IsSupported(ARMv7) &&
+ Isolate::Current()->cpu_features()->IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
ASSERT_EQ(dst1.code() + 1, dst2.code());
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
ASSERT_EQ(src1.code() + 1, src2.code());
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address)));
str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address)));
str(cp, MemOperand(ip));
// Optionally save all double registers.
// Clear top frame.
mov(r3, Operand(0, RelocInfo::NONE));
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address)));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address)));
ldr(cp, MemOperand(ip));
#ifdef DEBUG
str(r3, MemOperand(ip));
}
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) {
if (call_wrapper != NULL) {
call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address)));
ldr(r1, MemOperand(r3));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r1);
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r7, Operand(ExternalReference(Isolate::k_handler_address)));
ldr(r6, MemOperand(r7));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r6);
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(r1);
- mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_handler_address)));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
}
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address)));
ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
}
// Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address)));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address);
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address)));
str(r0, MemOperand(r2));
}
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function)));
CEntryStub stub(1);
b(ne, failure);
}
+static const int kRegisterPassedArguments = 4;
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = ActivationFrameAlignment();
+
+ // Reserve space for Isolate address which is always passed as last parameter
+ num_arguments += 1;
+
// Up to four simple arguments are passed in registers r0..r3.
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments;
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- mov(ip, Operand(function));
- CallCFunction(ip, num_arguments);
+ CallCFunctionHelper(no_reg, function, ip, num_arguments);
+}
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(),
+ scratch,
+ num_arguments);
}
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
+ // Push Isolate address as the last argument.
+ if (num_arguments < kRegisterPassedArguments) {
+ Register arg_to_reg[] = {r0, r1, r2, r3};
+ Register r = arg_to_reg[num_arguments];
+ mov(r, Operand(ExternalReference::isolate_address()));
+ } else {
+ int stack_passed_arguments = num_arguments - kRegisterPassedArguments;
+ // Push Isolate address on the stack after the arguments.
+ mov(scratch, Operand(ExternalReference::isolate_address()));
+ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ }
+ num_arguments += 1;
+
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ if (function.is(no_reg)) {
+ mov(scratch, Operand(function_reference));
+ function = scratch;
+ }
Call(function);
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments;
if (OS::ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Condition cond = al);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
private:
+ void CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments);
+
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
+ * - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
+ Handle<Code> code = FACTORY->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
+ PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
-// The Debugger class is used by the simulator while debugging simulated ARM
+// The ArmDebugger class is used by the simulator while debugging simulated ARM
// code.
-class Debugger {
+class ArmDebugger {
public:
- explicit Debugger(Simulator* sim);
- ~Debugger();
+ explicit ArmDebugger(Simulator* sim);
+ ~ArmDebugger();
void Stop(Instruction* instr);
void Debug();
};
-Debugger::Debugger(Simulator* sim) {
+ArmDebugger::ArmDebugger(Simulator* sim) {
sim_ = sim;
}
-Debugger::~Debugger() {
+ArmDebugger::~ArmDebugger() {
}
}
-void Debugger::Stop(Instruction* instr) {
+void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
}
-void Debugger::Stop(Instruction* instr) {
+void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
#endif
-int32_t Debugger::GetRegisterValue(int regnum) {
+int32_t ArmDebugger::GetRegisterValue(int regnum) {
if (regnum == kPCRegister) {
return sim_->get_pc();
} else {
}
-double Debugger::GetVFPDoubleRegisterValue(int regnum) {
+double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
}
-bool Debugger::GetValue(const char* desc, int32_t* value) {
+bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
}
-bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
+bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
}
-bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
+bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
}
-bool Debugger::SetBreakpoint(Instruction* breakpc) {
+bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
}
-bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-void Debugger::UndoBreakpoints() {
+void ArmDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-void Debugger::RedoBreakpoints() {
+void ArmDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-void Debugger::Debug() {
+void ArmDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" stop and and give control to the ArmDebugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");
}
-void Simulator::FlushICache(void* start_addr, size_t size) {
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+ void* start_addr,
+ size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
int offset = (start & CachePage::kPageMask);
while (!AllOnOnePage(start, size - 1)) {
int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(start, bytes_to_flush);
+ FlushOnePage(i_cache, start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
ASSERT_EQ(0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
- FlushOnePage(start, size);
+ FlushOnePage(i_cache, start, size);
}
}
-CachePage* Simulator::GetCachePage(void* page) {
- v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page,
- ICacheHash(page),
- true);
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+ ICacheHash(page),
+ true);
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(intptr_t start, int size) {
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+ intptr_t start,
+ int size) {
ASSERT(size <= CachePage::kPageSize);
ASSERT(AllOnOnePage(start, size - 1));
ASSERT((start & CachePage::kLineMask) == 0);
ASSERT((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(page);
+ CachePage* cache_page = GetCachePage(i_cache, page);
char* valid_bytemap = cache_page->ValidityByte(offset);
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(Instruction* instr) {
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(page);
+ CachePage* cache_page = GetCachePage(i_cache, page);
char* cache_valid_byte = cache_page->ValidityByte(offset);
bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
}
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key;
-
-
-bool Simulator::initialized_ = false;
-
-
void Simulator::Initialize() {
- if (initialized_) return;
- simulator_key = v8::internal::Thread::CreateThreadLocalKey();
- initialized_ = true;
+ if (Isolate::Current()->simulator_initialized()) return;
+ Isolate::Current()->set_simulator_initialized(true);
::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
}
-v8::internal::HashMap* Simulator::i_cache_ = NULL;
-
-
-Simulator::Simulator() {
+Simulator::Simulator() : isolate_(Isolate::Current()) {
+ i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
}
Initialize();
// Setup simulator support first. Some of this information is needed to
: external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
type_(type),
- next_(list_) {
- Simulator::current()->
- FlushICache(reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- list_ = this;
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->
+ FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_),
+ Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
}
void* address_of_swi_instruction() {
static Redirection* Get(void* external_function,
ExternalReference::Type type) {
- Redirection* current;
- for (current = list_; current != NULL; current = current->next_) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
return new Redirection(external_function, type);
uint32_t swi_instruction_;
ExternalReference::Type type_;
Redirection* next_;
- static Redirection* list_;
};
-Redirection* Redirection::list_ = NULL;
-
-
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
// Get the active Simulator for the current thread.
-Simulator* Simulator::current() {
- Initialize();
- Simulator* sim = reinterpret_cast<Simulator*>(
- v8::internal::Thread::GetThreadLocal(simulator_key));
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ Isolate::CurrentPerIsolateThreadData();
+ if (isolate_data == NULL) {
+ Isolate::EnterDefaultIsolate();
+ isolate_data = Isolate::CurrentPerIsolateThreadData();
+ }
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread goes away.
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator();
- v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ isolate_data->set_simulator(sim);
}
return sim;
}
int32_t arg1,
int32_t arg2,
int32_t arg3,
- int32_t arg4);
+ int32_t arg4,
+ int32_t arg5);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg2 = get_register(r2);
int32_t arg3 = get_register(r3);
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- int32_t arg4 = *stack_pointer;
+ int32_t arg4 = stack_pointer[0];
+ int32_t arg5 = stack_pointer[1];
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
- "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
+ "Call to host function at %p"
+ "args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3,
- arg4);
+ arg4,
+ arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
break;
}
case kBreakpoint: {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
dbg.Debug();
break;
}
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
if (isEnabledStop(code)) {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
dbg.Stop(instr);
} else {
set_pc(get_pc() + 2 * Instruction::kInstrSize);
break;
}
case BKPT: {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
PrintF("Simulator hit BKPT.\n");
dbg.Debug();
break;
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(instr);
+ CheckICache(isolate_->simulator_i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
+ reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
-
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
class Simulator {
public:
- friend class Debugger;
+ friend class ArmDebugger;
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current();
+ static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the ARM
// architecture specification and is off by a 8 from the currently executing
uintptr_t PopAddress();
// ICache checking.
- static void FlushICache(void* start, size_t size);
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
void InstructionDecode(Instruction* instr);
// ICache.
- static void CheckICache(Instruction* instr);
- static void FlushOnePage(intptr_t start, int size);
- static CachePage* GetCachePage(void* page);
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
char* stack_;
bool pc_modified_;
int icount_;
- static bool initialized_;
// Icache simulation
- static v8::internal::HashMap* i_cache_;
+ v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
+ v8::internal::Isolate* isolate_;
+
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
static const uint32_t kNumOfWatchedStops = 256;
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current()->Call( \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == \
- NULL ? NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return Simulator::current()->StackLimit();
+ return Simulator::current(Isolate::Current())->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current();
+ Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- Simulator::current()->PopAddress();
+ Simulator::current(Isolate::Current())->PopAddress();
}
};
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register offset,
Register scratch,
Register scratch2) {
- ExternalReference key_offset(SCTableReference::keyReference(table));
- ExternalReference value_offset(SCTableReference::valueReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
Register scratch0,
Register scratch1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+ __ IncrementCounter(COUNTERS->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(COUNTERS->negative_lookups_miss(), 1, scratch0, scratch1);
Label done;
}
}
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+ __ DecrementCounter(COUNTERS->negative_lookups_miss(), 1, scratch0, scratch1);
}
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = Isolate::Current();
Label miss;
// Make sure that code is valid. The shifting code relies on the
Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch, extra, extra2);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name));
Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch, extra, extra2);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, Top::global());
+ __ Move(ip, Isolate::Current()->global());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function = JSFunction::cast(
+ Isolate::Current()->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = Isolate::Current()->builtins()->builtin(Builtins::LoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = Isolate::Current()->builtins()->builtin(Builtins::KeyedLoadIC_Miss);
}
Handle<Code> ic(code);
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!HEAP->InNewSpace(interceptor));
Register scratch = name;
__ mov(scratch, Operand(Handle<Object>(interceptor)));
__ push(scratch);
// Pass the additional arguments FastHandleApiCall expects.
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (HEAP->InNewSpace(call_data)) {
__ Move(r0, api_call_info_handle);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
name,
holder,
miss);
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
}
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1,
+ __ IncrementCounter(COUNTERS->call_const_interceptor(), 1,
scratch1, scratch2);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1,
+ __ IncrementCounter(COUNTERS->call_const_interceptor_fast_api(), 1,
scratch1, scratch2);
ReserveSpaceForFastApiCall(masm, scratch1);
}
FreeSpaceForFastApiCall(masm);
}
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
Register fval,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* maybe_lookup_result = HEAP->LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (HEAP->InNewSpace(prototype)) {
// Get the map of the current object.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
__ b(ne, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(Isolate::Current(), IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
Handle<AccessorInfo> callback_handle(callback);
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (HEAP->InNewSpace(callback_handle->data())) {
__ Move(scratch3, callback_handle);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (HEAP->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj = Isolate::Current()->stub_cache()->ComputeCallMiss(
+ arguments().immediate(), kind_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
Label miss;
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
Label miss, return_undefined, call_builtin;
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
const int argc = arguments().immediate();
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP3)) return Heap::undefined_value();
+ if (!Isolate::Current()->cpu_features()->IsSupported(VFP3))
+ return HEAP->undefined_value();
+
CpuFeatures::Scope scope_vfp3(VFP3);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss, slow;
GenerateNameCheck(name, &miss);
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return Heap::undefined_value();
- if (cell != NULL) return Heap::undefined_value();
+ if (object->IsGlobalObject()) return HEAP->undefined_value();
+ if (cell != NULL) return HEAP->undefined_value();
int depth = optimization.GetPrototypeDepthOfExpectedType(
JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Heap::undefined_value();
+ if (depth == kInvalidProtoDepth) return HEAP->undefined_value();
Label miss, miss_before_stack_reserved;
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss_before_stack_reserved);
- __ IncrementCounter(&Counters::call_const, 1, r0, r3);
- __ IncrementCounter(&Counters::call_const_fast_api, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->call_const(), 1, r0, r3);
+ __ IncrementCounter(COUNTERS->call_const_fast_api(), 1, r0, r3);
ReserveSpaceForFastApiCall(masm(), r0);
SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1, r0, r3);
+ __ IncrementCounter(COUNTERS->call_const(), 1, r0, r3);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->call_global_inline(), 1, r3, r4);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
+ __ IncrementCounter(COUNTERS->call_global_inline_miss(), 1, r1, r3);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
r1, r2, r3,
&miss);
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
+ __ IncrementCounter(COUNTERS->named_store_global_inline(), 1, r4, r3);
__ Ret();
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(COUNTERS->named_store_global_inline_miss(), 1, r4, r3);
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, HEAP->empty_string());
}
}
__ mov(r0, r4);
- __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
+ __ IncrementCounter(COUNTERS->named_load_global_stub(), 1, r1, r3);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
+ __ IncrementCounter(COUNTERS->named_load_global_stub_miss(), 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
// -- r1 : receiver
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_string_length(), 1, r2, r3);
// Check the key is the cached one.
__ cmp(r0, Operand(Handle<String>(name)));
GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
+ __ DecrementCounter(COUNTERS->keyed_load_string_length(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_function_prototype(), 1, r2, r3);
// Check the name hasn't changed.
__ cmp(r0, Operand(Handle<String>(name)));
GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1, r2, r3);
+ __ DecrementCounter(COUNTERS->keyed_load_function_prototype(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(CALLBACKS, name);
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
+ __ IncrementCounter(COUNTERS->keyed_store_field(), 1, r3, r4);
// Check that the name has not changed.
__ cmp(r1, Operand(Handle<String>(name)));
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ DecrementCounter(COUNTERS->keyed_store_field(), 1, r3, r4);
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(Factory::fixed_array_map())));
+ __ cmp(scratch, Operand(Handle<Map>(FACTORY->fixed_array_map())));
__ b(ne, &miss);
// Check that the key is within bounds.
__ Ret();
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Handle<Code> ic(
+ Isolate::Current()->builtins()->builtin(Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Remove caller arguments and receiver from the stack and return.
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
__ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1, r1, r2);
+ __ IncrementCounter(COUNTERS->constructed_objects(), 1, r1, r2);
+ __ IncrementCounter(COUNTERS->constructed_objects_stub(), 1, r1, r2);
__ Jump(lr);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
+ __ IncrementCounter(COUNTERS->keyed_load_external_array_slow(), 1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
}
-void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
PopToR0();
SpillAll();
__ mov(r2, Operand(name));
void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
PopToR0();
void VirtualFrame::CallKeyedLoadIC() {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
PopToR1R0();
SpillAll();
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
PopToR1R0();
ASSERT(dropped_args == 0);
break;
case Code::BUILTIN:
- ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+ ASSERT(*code == Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall));
break;
default:
UNREACHABLE();
{ R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
-bool VirtualFrame::SpilledScope::is_spilled_ = false;
-
-
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
class SpilledScope BASE_EMBEDDED {
public:
explicit SpilledScope(VirtualFrame* frame)
- : old_is_spilled_(is_spilled_) {
+ : old_is_spilled_(
+ Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
if (frame != NULL) {
- if (!is_spilled_) {
+ if (!old_is_spilled_) {
frame->SpillAll();
} else {
frame->AssertIsSpilled();
}
}
- is_spilled_ = true;
+ Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
}
~SpilledScope() {
- is_spilled_ = old_is_spilled_;
+ Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
+ old_is_spilled_);
+ }
+ static bool is_spilled() {
+ return Isolate::Current()->is_virtual_frame_in_spilled_scope();
}
- static bool is_spilled() { return is_spilled_; }
private:
- static bool is_spilled_;
int old_is_spilled_;
SpilledScope() { }
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
- void CallRuntime(Runtime::Function* f, int arg_count);
+ void CallRuntime(const Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
- Counters::reloc_info_count.Increment();
+ COUNTERS->reloc_info_count()->Increment();
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
// Use unsigned delta-encoding for pc.
ASSERT(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = Heap::FindCodeObject(addr);
+ Object* found = HEAP->FindCodeObject(addr);
ASSERT(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address());
break;
ExternalReference::ExternalReference(Builtins::Name name)
- : address_(Builtins::builtin_address(name)) {}
+ : address_(Isolate::Current()->builtins()->builtin_address(name)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id)
: address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
-ExternalReference::ExternalReference(Runtime::Function* f)
+ExternalReference::ExternalReference(const Runtime::Function* f)
: address_(Redirect(f->entry)) {}
+ExternalReference ExternalReference::isolate_address() {
+ return ExternalReference(Isolate::Current());
+}
+
+
ExternalReference::ExternalReference(const IC_Utility& ic_utility)
: address_(Redirect(ic_utility.address())) {}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address)
- : address_(debug_address.address()) {}
+ : address_(debug_address.address(Isolate::Current())) {}
#endif
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
-ExternalReference::ExternalReference(Top::AddressId id)
- : address_(Top::get_address_from_id(id)) {}
+ExternalReference::ExternalReference(Isolate::AddressId id)
+ : address_(Isolate::Current()->get_address_from_id(id)) {}
ExternalReference::ExternalReference(const SCTableReference& table_ref)
ExternalReference ExternalReference::transcendental_cache_array_address() {
- return ExternalReference(TranscendentalCache::cache_array_address());
+ return ExternalReference(Isolate::Current()->transcendental_cache()->
+ cache_array_address());
}
ExternalReference ExternalReference::global_contexts_list() {
- return ExternalReference(Heap::global_contexts_list_address());
+ return ExternalReference(Isolate::Current()->
+ heap()->global_contexts_list_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
- return ExternalReference(KeyedLookupCache::keys_address());
+ return ExternalReference(Isolate::Current()->
+ keyed_lookup_cache()->keys_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
- return ExternalReference(KeyedLookupCache::field_offsets_address());
+ return ExternalReference(Isolate::Current()->
+ keyed_lookup_cache()->field_offsets_address());
}
ExternalReference ExternalReference::the_hole_value_location() {
- return ExternalReference(Factory::the_hole_value().location());
+ return ExternalReference(FACTORY->the_hole_value().location());
}
ExternalReference ExternalReference::arguments_marker_location() {
- return ExternalReference(Factory::arguments_marker().location());
+ return ExternalReference(FACTORY->arguments_marker().location());
}
ExternalReference ExternalReference::roots_address() {
- return ExternalReference(Heap::roots_address());
+ return ExternalReference(HEAP->roots_address());
}
ExternalReference ExternalReference::address_of_stack_limit() {
- return ExternalReference(StackGuard::address_of_jslimit());
+ return ExternalReference(
+ Isolate::Current()->stack_guard()->address_of_jslimit());
}
ExternalReference ExternalReference::address_of_real_stack_limit() {
- return ExternalReference(StackGuard::address_of_real_jslimit());
+ return ExternalReference(
+ Isolate::Current()->stack_guard()->address_of_real_jslimit());
}
ExternalReference ExternalReference::address_of_regexp_stack_limit() {
- return ExternalReference(RegExpStack::limit_address());
+ return ExternalReference(
+ Isolate::Current()->regexp_stack()->limit_address());
}
ExternalReference ExternalReference::new_space_start() {
- return ExternalReference(Heap::NewSpaceStart());
+ return ExternalReference(HEAP->NewSpaceStart());
}
ExternalReference ExternalReference::new_space_mask() {
- return ExternalReference(reinterpret_cast<Address>(Heap::NewSpaceMask()));
+ return ExternalReference(reinterpret_cast<Address>(HEAP->NewSpaceMask()));
}
ExternalReference ExternalReference::new_space_allocation_top_address() {
- return ExternalReference(Heap::NewSpaceAllocationTopAddress());
+ return ExternalReference(HEAP->NewSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
- return ExternalReference(Heap::always_allocate_scope_depth_address());
+ return ExternalReference(HEAP->always_allocate_scope_depth_address());
}
ExternalReference ExternalReference::new_space_allocation_limit_address() {
- return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
+ return ExternalReference(HEAP->NewSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::scheduled_exception_address() {
- return ExternalReference(Top::scheduled_exception_address());
+ return ExternalReference(Isolate::Current()->scheduled_exception_address());
}
}
ExternalReference ExternalReference::address_of_static_offsets_vector() {
- return ExternalReference(OffsetsVector::static_offsets_vector_address());
+ return ExternalReference(OffsetsVector::static_offsets_vector_address(
+ Isolate::Current()));
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
- return ExternalReference(RegExpStack::memory_address());
+ return ExternalReference(
+ Isolate::Current()->regexp_stack()->memory_address());
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
- return ExternalReference(RegExpStack::memory_size_address());
+ return ExternalReference(
+ Isolate::Current()->regexp_stack()->memory_size_address());
}
#endif // V8_INTERPRETED_REGEXP
}
-ExternalReference::ExternalReferenceRedirector*
- ExternalReference::redirector_ = NULL;
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break() {
return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
ExternalReference ExternalReference::debug_step_in_fp_address() {
- return ExternalReference(Debug::step_in_fp_addr());
+ return ExternalReference(Isolate::Current()->debug()->step_in_fp_addr());
}
#endif
#include "gdb-jit.h"
#include "runtime.h"
-#include "top.h"
#include "token.h"
namespace v8 {
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
- template<typename StaticVisitor> inline void Visit();
+ template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(ObjectVisitor* v);
// Patch the code with some other code.
explicit ExternalReference(Runtime::FunctionId id);
- explicit ExternalReference(Runtime::Function* f);
+ explicit ExternalReference(const Runtime::Function* f);
explicit ExternalReference(const IC_Utility& ic_utility);
explicit ExternalReference(StatsCounter* counter);
- explicit ExternalReference(Top::AddressId id);
+ explicit ExternalReference(Isolate::AddressId id);
explicit ExternalReference(const SCTableReference& table_ref);
+ // Isolate::Current() as an external reference.
+ static ExternalReference isolate_address();
+
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
static void set_redirector(ExternalReferenceRedirector* redirector) {
- ASSERT(redirector_ == NULL); // We can't stack them.
- redirector_ = redirector;
+ // We can't stack them.
+ ASSERT(Isolate::Current()->external_reference_redirector() == NULL);
+ Isolate::Current()->set_external_reference_redirector(
+ reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
private:
explicit ExternalReference(void* address)
: address_(address) {}
- static ExternalReferenceRedirector* redirector_;
-
static void* Redirect(void* address,
Type type = ExternalReference::BUILTIN_CALL) {
- if (redirector_ == NULL) return address;
- void* answer = (*redirector_)(address, type);
+ ExternalReferenceRedirector* redirector =
+ reinterpret_cast<ExternalReferenceRedirector*>(
+ Isolate::Current()->external_reference_redirector());
+ if (redirector == NULL) return address;
+ void* answer = (*redirector)(address, type);
return answer;
}
static void* Redirect(Address address_arg,
Type type = ExternalReference::BUILTIN_CALL) {
+ ExternalReferenceRedirector* redirector =
+ reinterpret_cast<ExternalReferenceRedirector*>(
+ Isolate::Current()->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector_ == NULL) ?
+ void* answer = (redirector == NULL) ?
address :
- (*redirector_)(address, type);
+ (*redirector)(address, type);
return answer;
}
namespace v8 {
namespace internal {
-unsigned AstNode::current_id_ = 0;
-unsigned AstNode::count_ = 0;
-VariableProxySentinel VariableProxySentinel::this_proxy_(true);
-VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
-ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
-Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
-Call Call::sentinel_(NULL, NULL, 0);
+AstSentinels::AstSentinels()
+ : this_proxy_(true),
+ identifier_proxy_(false),
+ valid_left_hand_side_sentinel_(),
+ this_property_(&this_proxy_, NULL, 0),
+ call_sentinel_(NULL, NULL, 0) {
+}
// ----------------------------------------------------------------------------
key_ = key;
value_ = value;
Object* k = *key->handle();
- if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
+ if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
uint32_t hash;
HashMap* table;
void* key;
+ Factory* factory = Isolate::Current()->factory();
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
- Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
+ Handle<Object> key_handle = factory->NewNumberFromUint(hash);
key = key_handle.location();
table = &elements;
} else {
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
- Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
table = &properties;
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!Heap::InNewSpace(*candidate) &&
+ if (!HEAP->InNewSpace(*candidate) &&
CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate;
return true;
bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true;
- StackLimitCheck check;
+ StackLimitCheck check(Isolate::Current());
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
static const int kNoNumber = -1;
- AstNode() : id_(GetNextId()) { count_++; }
+ AstNode() : id_(GetNextId()) {
+ Isolate* isolate = Isolate::Current();
+ isolate->set_ast_node_count(isolate->ast_node_count() + 1);
+ }
virtual ~AstNode() { }
// True if the node is simple enough for us to inline calls containing it.
virtual bool IsInlineable() const { return false; }
- static int Count() { return count_; }
- static void ResetIds() { current_id_ = 0; }
+ static int Count() { return Isolate::Current()->ast_node_count(); }
+ static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
unsigned id() const { return id_; }
protected:
- static unsigned GetNextId() { return current_id_++; }
+ static unsigned GetNextId() {
+ Isolate* isolate = Isolate::Current();
+ unsigned tmp = isolate->ast_node_id();
+ isolate->set_ast_node_id(tmp + 1);
+ return tmp;
+ }
static unsigned ReserveIdRange(int n) {
- unsigned tmp = current_id_;
- current_id_ += n;
+ Isolate* isolate = Isolate::Current();
+ unsigned tmp = isolate->ast_node_id();
+ isolate->set_ast_node_id(tmp + n);
return tmp;
}
private:
- static unsigned current_id_;
- static unsigned count_;
unsigned id_;
friend class CaseClause; // Generates AST IDs.
public:
virtual bool IsValidLeftHandSide() { return true; }
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- static ValidLeftHandSideSentinel* instance() { return &instance_; }
-
- private:
- static ValidLeftHandSideSentinel instance_;
};
virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
// Identity testers.
- bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
- bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
+ bool IsNull() const {
+ ASSERT(!handle_.is_null());
+ return handle_->IsNull();
+ }
+ bool IsTrue() const {
+ ASSERT(!handle_.is_null());
+ return handle_->IsTrue();
+ }
bool IsFalse() const {
- return handle_.is_identical_to(Factory::false_value());
+ ASSERT(!handle_.is_null());
+ return handle_->IsFalse();
}
Handle<Object> handle() const { return handle_; }
class VariableProxySentinel: public VariableProxy {
public:
virtual bool IsValidLeftHandSide() { return !is_this(); }
- static VariableProxySentinel* this_proxy() { return &this_proxy_; }
- static VariableProxySentinel* identifier_proxy() {
- return &identifier_proxy_;
- }
private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
- static VariableProxySentinel this_proxy_;
- static VariableProxySentinel identifier_proxy_;
+
+ friend class AstSentinels;
};
return monomorphic_receiver_type_;
}
- // Returns a property singleton property access on 'this'. Used
- // during preparsing.
- static Property* this_property() { return &this_property_; }
-
private:
Expression* obj_;
Expression* key_;
bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_;
ExternalArrayType array_type_;
-
- // Dummy property used during preparsing.
- static Property this_property_;
};
// Bailout support.
int ReturnId() const { return return_id_; }
- static Call* sentinel() { return &sentinel_; }
-
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
Handle<JSGlobalPropertyCell> cell_;
int return_id_;
+};
- static Call sentinel_;
+
+class AstSentinels {
+ public:
+ ~AstSentinels() { }
+
+ // Returns a property singleton property access on 'this'. Used
+ // during preparsing.
+ Property* this_property() { return &this_property_; }
+ VariableProxySentinel* this_proxy() { return &this_proxy_; }
+ VariableProxySentinel* identifier_proxy() { return &identifier_proxy_; }
+ ValidLeftHandSideSentinel* valid_left_hand_side_sentinel() {
+ return &valid_left_hand_side_sentinel_;
+ }
+ Call* call_sentinel() { return &call_sentinel_; }
+ EmptyStatement* empty_statement() { return &empty_statement_; }
+
+ private:
+ AstSentinels();
+ VariableProxySentinel this_proxy_;
+ VariableProxySentinel identifier_proxy_;
+ ValidLeftHandSideSentinel valid_left_hand_side_sentinel_;
+ Property this_property_;
+ Call call_sentinel_;
+ EmptyStatement empty_statement_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(AstSentinels);
};
class CallRuntime: public Expression {
public:
CallRuntime(Handle<String> name,
- Runtime::Function* function,
+ const Runtime::Function* function,
ZoneList<Expression*>* arguments)
: name_(name), function_(function), arguments_(arguments) { }
virtual bool IsInlineable() const;
Handle<String> name() const { return name_; }
- Runtime::Function* function() const { return function_; }
+ const Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == NULL; }
private:
Handle<String> name_;
- Runtime::Function* function_;
+ const Runtime::Function* function_;
ZoneList<Expression*>* arguments_;
};
is_expression_(is_expression),
contains_loops_(contains_loops),
function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(Heap::empty_string()),
+ inferred_name_(HEAP->empty_string()),
try_full_codegen_(false),
pretenure_(false) { }
bool stack_overflow_;
};
+
} } // namespace v8::internal
#endif // V8_AST_H_
namespace v8 {
namespace internal {
-// A SourceCodeCache uses a FixedArray to store pairs of
-// (AsciiString*, JSFunction*), mapping names of native code files
-// (runtime.js, etc.) to precompiled functions. Instead of mapping
-// names to functions it might make sense to let the JS2C tool
-// generate an index for each native JS file.
-class SourceCodeCache BASE_EMBEDDED {
- public:
- explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
-
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? Heap::empty_fixed_array() : NULL;
- }
-
- void Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&cache_));
- }
-
-
- bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
- for (int i = 0; i < cache_->length(); i+=2) {
- SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
- if (str->IsEqualTo(name)) {
- *handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)));
- return true;
- }
- }
- return false;
- }
-
- void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope;
- int length = cache_->length();
- Handle<FixedArray> new_array =
- Factory::NewFixedArray(length + 2, TENURED);
- cache_->CopyTo(0, *new_array, 0, cache_->length());
- cache_ = *new_array;
- Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
- cache_->set(length, *str);
- cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(Smi::FromInt(type_));
- }
-
- private:
- Script::Type type_;
- FixedArray* cache_;
- DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
-};
-
-static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
-// This is for delete, not delete[].
-static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
-// This is for delete[]
-static List<char*>* delete_these_arrays_on_tear_down = NULL;
-
-
-NativesExternalStringResource::NativesExternalStringResource(const char* source)
+NativesExternalStringResource::NativesExternalStringResource(
+ Bootstrapper* bootstrapper,
+ const char* source)
: data_(source), length_(StrLength(source)) {
- if (delete_these_non_arrays_on_tear_down == NULL) {
- delete_these_non_arrays_on_tear_down = new List<char*>(2);
+ if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
+ bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
}
// The resources are small objects and we only make a fixed number of
// them, but let's clean them up on exit for neatness.
- delete_these_non_arrays_on_tear_down->
+ bootstrapper->delete_these_non_arrays_on_tear_down_->
Add(reinterpret_cast<char*>(this));
}
+Bootstrapper::Bootstrapper()
+ : nesting_(0),
+ extensions_cache_(Script::TYPE_EXTENSION),
+ delete_these_non_arrays_on_tear_down_(NULL),
+ delete_these_arrays_on_tear_down_(NULL) {
+}
+
+
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
+ if (HEAP->natives_source_cache()->get(index)->IsUndefined()) {
if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
// We can use external strings for the natives.
NativesExternalStringResource* resource =
- new NativesExternalStringResource(
+ new NativesExternalStringResource(this,
Natives::GetScriptSource(index).start());
Handle<String> source_code =
- Factory::NewExternalStringFromAscii(resource);
- Heap::natives_source_cache()->set(index, *source_code);
+ FACTORY->NewExternalStringFromAscii(resource);
+ HEAP->natives_source_cache()->set(index, *source_code);
} else {
// Old snapshot code can't cope with external strings at all.
Handle<String> source_code =
- Factory::NewStringFromAscii(Natives::GetScriptSource(index));
- Heap::natives_source_cache()->set(index, *source_code);
+ FACTORY->NewStringFromAscii(Natives::GetScriptSource(index));
+ HEAP->natives_source_cache()->set(index, *source_code);
}
}
- Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
+ Handle<Object> cached_source(HEAP->natives_source_cache()->get(index));
return Handle<String>::cast(cached_source);
}
void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache.Initialize(create_heap_objects);
+ extensions_cache_.Initialize(create_heap_objects);
GCExtension::Register();
ExternalizeStringExtension::Register();
}
char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
char* memory = new char[bytes];
if (memory != NULL) {
- if (delete_these_arrays_on_tear_down == NULL) {
- delete_these_arrays_on_tear_down = new List<char*>(2);
+ if (delete_these_arrays_on_tear_down_ == NULL) {
+ delete_these_arrays_on_tear_down_ = new List<char*>(2);
}
- delete_these_arrays_on_tear_down->Add(memory);
+ delete_these_arrays_on_tear_down_->Add(memory);
}
return memory;
}
void Bootstrapper::TearDown() {
- if (delete_these_non_arrays_on_tear_down != NULL) {
- int len = delete_these_non_arrays_on_tear_down->length();
+ if (delete_these_non_arrays_on_tear_down_ != NULL) {
+ int len = delete_these_non_arrays_on_tear_down_->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
- delete delete_these_non_arrays_on_tear_down->at(i);
- delete_these_non_arrays_on_tear_down->at(i) = NULL;
+ delete delete_these_non_arrays_on_tear_down_->at(i);
+ delete_these_non_arrays_on_tear_down_->at(i) = NULL;
}
- delete delete_these_non_arrays_on_tear_down;
- delete_these_non_arrays_on_tear_down = NULL;
+ delete delete_these_non_arrays_on_tear_down_;
+ delete_these_non_arrays_on_tear_down_ = NULL;
}
- if (delete_these_arrays_on_tear_down != NULL) {
- int len = delete_these_arrays_on_tear_down->length();
+ if (delete_these_arrays_on_tear_down_ != NULL) {
+ int len = delete_these_arrays_on_tear_down_->length();
ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
- delete[] delete_these_arrays_on_tear_down->at(i);
- delete_these_arrays_on_tear_down->at(i) = NULL;
+ delete[] delete_these_arrays_on_tear_down_->at(i);
+ delete_these_arrays_on_tear_down_->at(i) = NULL;
}
- delete delete_these_arrays_on_tear_down;
- delete_these_arrays_on_tear_down = NULL;
+ delete delete_these_arrays_on_tear_down_;
+ delete_these_arrays_on_tear_down_ = NULL;
}
- extensions_cache.Initialize(false); // Yes, symmetrical
+ extensions_cache_.Initialize(false); // Yes, symmetrical
}
void Bootstrapper::Iterate(ObjectVisitor* v) {
- extensions_cache.Iterate(v);
+ extensions_cache_.Iterate(v);
v->Synchronize("Extensions");
}
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = Factory::CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
new_to_map->set_prototype(*proto);
object->set_map(*new_to_map);
}
void Bootstrapper::DetachGlobal(Handle<Context> env) {
- JSGlobalProxy::cast(env->global_proxy())->set_context(*Factory::null_value());
+ JSGlobalProxy::cast(env->global_proxy())->set_context(*FACTORY->null_value());
SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
- Factory::null_value());
+ FACTORY->null_value());
env->set_global_proxy(env->global());
env->global()->set_global_receiver(env->global());
}
Handle<JSObject> prototype,
Builtins::Name call,
bool is_ecma_native) {
- Handle<String> symbol = Factory::LookupAsciiSymbol(name);
- Handle<Code> call_code = Handle<Code>(Builtins::builtin(call));
+ Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
+ Handle<Code> call_code = Handle<Code>(
+ Isolate::Current()->builtins()->builtin(call));
Handle<JSFunction> function = prototype.is_null() ?
- Factory::NewFunctionWithoutPrototype(symbol, call_code) :
- Factory::NewFunctionWithPrototype(symbol,
+ FACTORY->NewFunctionWithoutPrototype(symbol, call_code) :
+ FACTORY->NewFunctionWithPrototype(symbol,
type,
instance_size,
prototype,
Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode) {
Handle<DescriptorArray> descriptors =
- Factory::NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+ FACTORY->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
{ // Add length.
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionLength);
- CallbacksDescriptor d(*Factory::length_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionLength);
+ CallbacksDescriptor d(*FACTORY->length_symbol(), *proxy, attributes);
descriptors->Set(0, &d);
}
{ // Add name.
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionName);
- CallbacksDescriptor d(*Factory::name_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionName);
+ CallbacksDescriptor d(*FACTORY->name_symbol(), *proxy, attributes);
descriptors->Set(1, &d);
}
{ // Add arguments.
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionArguments);
- CallbacksDescriptor d(*Factory::arguments_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionArguments);
+ CallbacksDescriptor d(*FACTORY->arguments_symbol(), *proxy, attributes);
descriptors->Set(2, &d);
}
{ // Add caller.
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionCaller);
- CallbacksDescriptor d(*Factory::caller_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionCaller);
+ CallbacksDescriptor d(*FACTORY->caller_symbol(), *proxy, attributes);
descriptors->Set(3, &d);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
}
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionPrototype);
- CallbacksDescriptor d(*Factory::prototype_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionPrototype);
+ CallbacksDescriptor d(*FACTORY->prototype_symbol(), *proxy, attributes);
descriptors->Set(4, &d);
}
descriptors->Sort();
Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
- Handle<Map> map = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
Handle<DescriptorArray> descriptors =
ComputeFunctionInstanceDescriptor(prototype_mode);
map->set_instance_descriptors(*descriptors);
function_instance_map_writable_prototype_ =
CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+ Handle<String> object_name = Handle<String>(HEAP->Object_symbol());
{ // --- O b j e c t ---
Handle<JSFunction> object_fun =
- Factory::NewFunction(object_name, Factory::null_value());
+ FACTORY->NewFunction(object_name, FACTORY->null_value());
Handle<Map> object_function_map =
- Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
object_fun->set_initial_map(*object_function_map);
object_function_map->set_constructor(*object_fun);
global_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
- Handle<JSObject> prototype = Factory::NewJSObject(Top::object_function(),
- TENURED);
+ Handle<JSObject> prototype = FACTORY->NewJSObject(
+ Isolate::Current()->object_function(),
+ TENURED);
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
object_function_map->
- set_instance_descriptors(Heap::empty_descriptor_array());
+ set_instance_descriptors(HEAP->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
// 262 15.3.4.
- Handle<String> symbol = Factory::LookupAsciiSymbol("Empty");
+ Handle<String> symbol = FACTORY->LookupAsciiSymbol("Empty");
Handle<JSFunction> empty_function =
- Factory::NewFunctionWithoutPrototype(symbol, kNonStrictMode);
+ FACTORY->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
// --- E m p t y ---
Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::EmptyFunction));
empty_function->set_code(*code);
empty_function->shared()->set_code(*code);
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
- Handle<Script> script = Factory::NewScript(source);
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("() {}"));
+ Handle<Script> script = FACTORY->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
empty_function->shared()->set_start_position(0);
// Allocate the function map first and then patch the prototype later
Handle<Map> function_without_prototype_map(
global_context()->function_without_prototype_map());
- Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(
+ Handle<Map> empty_fm = FACTORY->CopyMapDropDescriptors(
function_without_prototype_map);
empty_fm->set_instance_descriptors(
function_without_prototype_map->instance_descriptors());
Handle<FixedArray> arguments,
Handle<FixedArray> caller) {
Handle<DescriptorArray> descriptors =
- Factory::NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+ FACTORY->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
PropertyAttributes attributes = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
{ // length
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionLength);
- CallbacksDescriptor d(*Factory::length_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionLength);
+ CallbacksDescriptor d(*FACTORY->length_symbol(), *proxy, attributes);
descriptors->Set(0, &d);
}
{ // name
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionName);
- CallbacksDescriptor d(*Factory::name_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionName);
+ CallbacksDescriptor d(*FACTORY->name_symbol(), *proxy, attributes);
descriptors->Set(1, &d);
}
{ // arguments
- CallbacksDescriptor d(*Factory::arguments_symbol(), *arguments, attributes);
+ CallbacksDescriptor d(*FACTORY->arguments_symbol(), *arguments, attributes);
descriptors->Set(2, &d);
}
{ // caller
- CallbacksDescriptor d(*Factory::caller_symbol(), *caller, attributes);
+ CallbacksDescriptor d(*FACTORY->caller_symbol(), *caller, attributes);
descriptors->Set(3, &d);
}
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
}
- Handle<Proxy> proxy = Factory::NewProxy(&Accessors::FunctionPrototype);
- CallbacksDescriptor d(*Factory::prototype_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionPrototype);
+ CallbacksDescriptor d(*FACTORY->prototype_symbol(), *proxy, attributes);
descriptors->Set(4, &d);
}
// ECMAScript 5th Edition, 13.2.3
Handle<JSFunction> Genesis::CreateThrowTypeErrorFunction(
Builtins::Name builtin) {
- Handle<String> name = Factory::LookupAsciiSymbol("ThrowTypeError");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("ThrowTypeError");
Handle<JSFunction> throw_type_error =
- Factory::NewFunctionWithoutPrototype(name, kStrictMode);
- Handle<Code> code = Handle<Code>(Builtins::builtin(builtin));
+ FACTORY->NewFunctionWithoutPrototype(name, kStrictMode);
+ Handle<Code> code = Handle<Code>(
+ Isolate::Current()->builtins()->builtin(builtin));
throw_type_error->set_map(global_context()->strict_mode_function_map());
throw_type_error->set_code(*code);
Handle<JSFunction> empty_function,
Handle<FixedArray> arguments_callbacks,
Handle<FixedArray> caller_callbacks) {
- Handle<Map> map = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
Handle<DescriptorArray> descriptors =
ComputeStrictFunctionInstanceDescriptor(prototype_mode,
arguments_callbacks,
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Create the callbacks arrays for ThrowTypeError functions.
// The get/set callacks are filled in after the maps are created below.
- Handle<FixedArray> arguments = Factory::NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = Factory::NewFixedArray(2, TENURED);
+ Handle<FixedArray> arguments = FACTORY->NewFixedArray(2, TENURED);
+ Handle<FixedArray> caller = FACTORY->NewFixedArray(2, TENURED);
// Allocate map for the strict mode function instances.
global_context()->set_strict_mode_function_instance_map(
static void AddToWeakGlobalContextList(Context* context) {
ASSERT(context->IsGlobalContext());
+ Heap* heap = Isolate::Current()->heap();
#ifdef DEBUG
{ // NOLINT
ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
// Check that context is not in the list yet.
- for (Object* current = Heap::global_contexts_list();
+ for (Object* current = heap->global_contexts_list();
!current->IsUndefined();
current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
ASSERT(current != context);
}
}
#endif
- context->set(Context::NEXT_CONTEXT_LINK, Heap::global_contexts_list());
- Heap::set_global_contexts_list(context);
+ context->set(Context::NEXT_CONTEXT_LINK, heap->global_contexts_list());
+ heap->set_global_contexts_list(context);
}
void Genesis::CreateRoots() {
+ Isolate* isolate = Isolate::Current();
// Allocate the global context FixedArray first and then patch the
// closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
// global context).
- global_context_ =
- Handle<Context>::cast(
- GlobalHandles::Create(*Factory::NewGlobalContext()));
+ global_context_ = Handle<Context>::cast(isolate->global_handles()->Create(
+ *isolate->factory()->NewGlobalContext()));
AddToWeakGlobalContextList(*global_context_);
- Top::set_context(*global_context());
+ isolate->set_context(*global_context());
// Allocate the message listeners object.
{
}
if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(Heap::empty_symbol());
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<String> name = Handle<String>(HEAP->empty_symbol());
+ Handle<Code> code = Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::Illegal));
js_global_function =
- Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+ FACTORY->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
JSGlobalObject::kSize, code, true);
// Change the constructor property of the prototype of the
// hidden global function to refer to the Object function.
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
SetLocalPropertyNoThrow(
- prototype, Factory::constructor_symbol(), Top::object_function(), NONE);
+ prototype,
+ FACTORY->constructor_symbol(),
+ Isolate::Current()->object_function(),
+ NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
js_global_function =
- Factory::CreateApiFunction(js_global_constructor,
- Factory::InnerGlobalObject);
+ FACTORY->CreateApiFunction(js_global_constructor,
+ FACTORY->InnerGlobalObject);
}
js_global_function->initial_map()->set_is_hidden_prototype();
Handle<GlobalObject> inner_global =
- Factory::NewGlobalObject(js_global_function);
+ FACTORY->NewGlobalObject(js_global_function);
if (inner_global_out != NULL) {
*inner_global_out = inner_global;
}
// Step 2: create or re-initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(Heap::empty_symbol());
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<String> name = Handle<String>(HEAP->empty_symbol());
+ Handle<Code> code = Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::Illegal));
global_proxy_function =
- Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+ FACTORY->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::kSize, code, true);
} else {
Handle<ObjectTemplateInfo> data =
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(data->constructor()));
global_proxy_function =
- Factory::CreateApiFunction(global_constructor,
- Factory::OuterGlobalObject);
+ FACTORY->CreateApiFunction(global_constructor,
+ FACTORY->OuterGlobalObject);
}
- Handle<String> global_name = Factory::LookupAsciiSymbol("global");
+ Handle<String> global_name = FACTORY->LookupAsciiSymbol("global");
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
Handle<JSGlobalProxy>::cast(global_object));
} else {
return Handle<JSGlobalProxy>::cast(
- Factory::NewJSObject(global_proxy_function, TENURED));
+ FACTORY->NewJSObject(global_proxy_function, TENURED));
}
}
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
- Factory::LookupAsciiSymbol("global"),
+ FACTORY->LookupAsciiSymbol("global"),
inner_global,
attributes);
// Setup the reference from the global object to the builtins object.
// object reinitialization.
global_context()->set_security_token(*inner_global);
- Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+ Handle<String> object_name = Handle<String>(HEAP->Object_symbol());
SetLocalPropertyNoThrow(inner_global, object_name,
- Top::object_function(), DONT_ENUM);
+ Isolate::Current()->object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
- Top::initial_object_prototype(), Builtins::ArrayCode,
- true);
+ Isolate::Current()->initial_object_prototype(),
+ Builtins::ArrayCode, true);
array_function->shared()->set_construct_stub(
- Builtins::builtin(Builtins::ArrayConstructCode));
+ Isolate::Current()->builtins()->builtin(Builtins::ArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
Handle<DescriptorArray> array_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::length_symbol(),
- Factory::NewProxy(&Accessors::ArrayLength),
+ FACTORY->CopyAppendProxyDescriptor(
+ FACTORY->empty_descriptor_array(),
+ FACTORY->length_symbol(),
+ FACTORY->NewProxy(&Accessors::ArrayLength),
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
// Cache the fast JavaScript array map
{ // --- N u m b e r ---
Handle<JSFunction> number_fun =
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ Isolate::Current()->initial_object_prototype(),
+ Builtins::Illegal, true);
global_context()->set_number_function(*number_fun);
}
{ // --- B o o l e a n ---
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ Isolate::Current()->initial_object_prototype(),
+ Builtins::Illegal, true);
global_context()->set_boolean_function(*boolean_fun);
}
{ // --- S t r i n g ---
Handle<JSFunction> string_fun =
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ Isolate::Current()->initial_object_prototype(),
+ Builtins::Illegal, true);
string_fun->shared()->set_construct_stub(
- Builtins::builtin(Builtins::StringConstructCode));
+ Isolate::Current()->builtins()->builtin(Builtins::StringConstructCode));
global_context()->set_string_function(*string_fun);
// Add 'length' property to strings.
Handle<DescriptorArray> string_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::length_symbol(),
- Factory::NewProxy(&Accessors::StringLength),
+ FACTORY->CopyAppendProxyDescriptor(
+ FACTORY->empty_descriptor_array(),
+ FACTORY->length_symbol(),
+ FACTORY->NewProxy(&Accessors::StringLength),
static_cast<PropertyAttributes>(DONT_ENUM |
DONT_DELETE |
READ_ONLY));
// Builtin functions for Date.prototype.
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ Isolate::Current()->initial_object_prototype(),
+ Builtins::Illegal, true);
global_context()->set_date_function(*date_fun);
}
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ Isolate::Current()->initial_object_prototype(),
+ Builtins::Illegal, true);
global_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
ASSERT_EQ(0, initial_map->inobject_properties());
- Handle<DescriptorArray> descriptors = Factory::NewDescriptorArray(5);
+ Handle<DescriptorArray> descriptors = FACTORY->NewDescriptorArray(5);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
int enum_index = 0;
{
// ECMA-262, section 15.10.7.1.
- FieldDescriptor field(Heap::source_symbol(),
+ FieldDescriptor field(HEAP->source_symbol(),
JSRegExp::kSourceFieldIndex,
final,
enum_index++);
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(Heap::global_symbol(),
+ FieldDescriptor field(HEAP->global_symbol(),
JSRegExp::kGlobalFieldIndex,
final,
enum_index++);
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(Heap::ignore_case_symbol(),
+ FieldDescriptor field(HEAP->ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
final,
enum_index++);
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(Heap::multiline_symbol(),
+ FieldDescriptor field(HEAP->multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
final,
enum_index++);
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(Heap::last_index_symbol(),
+ FieldDescriptor field(HEAP->last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
writable,
enum_index++);
}
{ // -- J S O N
- Handle<String> name = Factory::NewStringFromAscii(CStrVector("JSON"));
- Handle<JSFunction> cons = Factory::NewFunction(
+ Handle<String> name = FACTORY->NewStringFromAscii(CStrVector("JSON"));
+ Handle<JSFunction> cons = FACTORY->NewFunction(
name,
- Factory::the_hole_value());
+ FACTORY->the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
- Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
+ Handle<JSObject> json_object = FACTORY->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
- Handle<String> symbol = Factory::LookupAsciiSymbol("Arguments");
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<String> symbol = FACTORY->LookupAsciiSymbol("Arguments");
+ Handle<Code> code = Handle<Code>(
+ Isolate::Current()->builtins()->builtin(Builtins::Illegal));
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(global_context()->object_function()->prototype()));
Handle<JSFunction> function =
- Factory::NewFunctionWithPrototype(symbol,
+ FACTORY->NewFunctionWithPrototype(symbol,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
prototype,
ASSERT(!function->has_initial_map());
function->shared()->set_instance_class_name(*symbol);
function->shared()->set_expected_nof_properties(2);
- Handle<JSObject> result = Factory::NewJSObject(function);
+ Handle<JSObject> result = FACTORY->NewJSObject(function);
global_context()->set_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
- SetLocalPropertyNoThrow(result, Factory::length_symbol(),
- Factory::undefined_value(),
+ SetLocalPropertyNoThrow(result, FACTORY->length_symbol(),
+ FACTORY->undefined_value(),
DONT_ENUM);
- SetLocalPropertyNoThrow(result, Factory::callee_symbol(),
- Factory::undefined_value(),
+ SetLocalPropertyNoThrow(result, FACTORY->callee_symbol(),
+ FACTORY->undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
- result->LocalLookup(Heap::callee_symbol(), &lookup);
+ result->LocalLookup(HEAP->callee_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
- result->LocalLookup(Heap::length_symbol(), &lookup);
+ result->LocalLookup(HEAP->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
// Create the ThrowTypeError functions.
- Handle<FixedArray> callee = Factory::NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = Factory::NewFixedArray(2, TENURED);
+ Handle<FixedArray> callee = FACTORY->NewFixedArray(2, TENURED);
+ Handle<FixedArray> caller = FACTORY->NewFixedArray(2, TENURED);
Handle<JSFunction> callee_throw =
CreateThrowTypeErrorFunction(Builtins::StrictArgumentsCallee);
caller->set(1, *caller_throw);
// Create the descriptor array for the arguments object.
- Handle<DescriptorArray> descriptors = Factory::NewDescriptorArray(3);
+ Handle<DescriptorArray> descriptors = FACTORY->NewDescriptorArray(3);
{ // length
- FieldDescriptor d(*Factory::length_symbol(), 0, DONT_ENUM);
+ FieldDescriptor d(*FACTORY->length_symbol(), 0, DONT_ENUM);
descriptors->Set(0, &d);
}
{ // callee
- CallbacksDescriptor d(*Factory::callee_symbol(), *callee, attributes);
+ CallbacksDescriptor d(*FACTORY->callee_symbol(), *callee, attributes);
descriptors->Set(1, &d);
}
{ // caller
- CallbacksDescriptor d(*Factory::caller_symbol(), *caller, attributes);
+ CallbacksDescriptor d(*FACTORY->caller_symbol(), *caller, attributes);
descriptors->Set(2, &d);
}
descriptors->Sort();
// Create the map. Allocate one in-object field for length.
- Handle<Map> map = Factory::NewMap(JS_OBJECT_TYPE,
+ Handle<Map> map = FACTORY->NewMap(JS_OBJECT_TYPE,
Heap::kArgumentsObjectSizeStrict);
map->set_instance_descriptors(*descriptors);
map->set_function_with_prototype(true);
global_context()->arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
- Handle<JSObject> result = Factory::NewJSObjectFromMap(map);
+ Handle<JSObject> result = FACTORY->NewJSObjectFromMap(map);
global_context()->set_strict_mode_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
- SetLocalPropertyNoThrow(result, Factory::length_symbol(),
- Factory::undefined_value(),
+ SetLocalPropertyNoThrow(result, FACTORY->length_symbol(),
+ FACTORY->undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
- result->LocalLookup(Heap::length_symbol(), &lookup);
+ result->LocalLookup(HEAP->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
{ // --- context extension
// Create a function for the context extension objects.
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<Code> code = Handle<Code>(
+ Isolate::Current()->builtins()->builtin(Builtins::Illegal));
Handle<JSFunction> context_extension_fun =
- Factory::NewFunction(Factory::empty_symbol(),
+ FACTORY->NewFunction(FACTORY->empty_symbol(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
true);
- Handle<String> name = Factory::LookupAsciiSymbol("context_extension");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
global_context()->set_context_extension_function(*context_extension_fun);
}
{
// Setup the call-as-function delegate.
Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsFunction));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::HandleApiCallAsFunction));
Handle<JSFunction> delegate =
- Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ FACTORY->NewFunction(FACTORY->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
global_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
{
// Setup the call-as-constructor delegate.
Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsConstructor));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::HandleApiCallAsConstructor));
Handle<JSFunction> delegate =
- Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ FACTORY->NewFunction(FACTORY->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
global_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
// Initialize the out of memory slot.
- global_context()->set_out_of_memory(Heap::false_value());
+ global_context()->set_out_of_memory(HEAP->false_value());
// Initialize the data slot.
- global_context()->set_data(Heap::undefined_value());
+ global_context()->set_data(HEAP->undefined_value());
}
bool Genesis::CompileBuiltin(int index) {
Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ Handle<String> source_code =
+ Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
return CompileNative(name, source_code);
}
bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
HandleScope scope;
+ Isolate* isolate = Isolate::Current();
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger::set_compiling_natives(true);
+ isolate->debugger()->set_compiling_natives(true);
#endif
bool result = CompileScriptCached(name,
source,
NULL,
NULL,
- Handle<Context>(Top::context()),
+ Handle<Context>(isolate->context()),
true);
- ASSERT(Top::has_pending_exception() != result);
- if (!result) Top::clear_pending_exception();
+ ASSERT(isolate->has_pending_exception() != result);
+ if (!result) isolate->clear_pending_exception();
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger::set_compiling_natives(false);
+ isolate->debugger()->set_compiling_natives(false);
#endif
return result;
}
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsAsciiRepresentation());
- Handle<String> script_name = Factory::NewStringFromUtf8(name);
+ Handle<String> script_name = FACTORY->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
script_name,
? Handle<Context>(top_context->runtime_context())
: top_context);
Handle<JSFunction> fun =
- Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
+ FACTORY->NewFunctionFromSharedFunctionInfo(function_info, context);
// Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = Factory::LookupAsciiSymbol(name); \
+ Handle<String> var##_name = FACTORY->LookupAsciiSymbol(name); \
global_context()->set_##var(Type::cast( \
global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name)));
+
void Genesis::InstallNativeFunctions() {
HandleScope scope;
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
bool Genesis::InstallNatives() {
HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
// (itself) and a reference to the global_context directly in the object.
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<Code> code = Handle<Code>(
+ isolate->builtins()->builtin(Builtins::Illegal));
Handle<JSFunction> builtins_fun =
- Factory::NewFunction(Factory::empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
JSBuiltinsObject::kSize, code, true);
- Handle<String> name = Factory::LookupAsciiSymbol("builtins");
+ Handle<String> name = factory->LookupAsciiSymbol("builtins");
builtins_fun->shared()->set_instance_class_name(*name);
// Allocate the builtins object.
Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(Factory::NewGlobalObject(builtins_fun));
+ Handle<JSBuiltinsObject>::cast(factory->NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
builtins->set_global_context(*global_context());
builtins->set_global_receiver(*builtins);
// global object.
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
+ Handle<String> global_symbol = factory->LookupAsciiSymbol("global");
Handle<Object> global_obj(global_context()->global());
SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
// Create a bridge function that has context in the global context.
Handle<JSFunction> bridge =
- Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
- ASSERT(bridge->context() == *Top::global_context());
+ factory->NewFunction(factory->empty_symbol(), factory->undefined_value());
+ ASSERT(bridge->context() == *isolate->global_context());
// Allocate the builtins context.
Handle<Context> context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+ factory->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
context->set_global(*builtins); // override builtins global object
global_context()->set_runtime_context(*context);
// Builtin functions for Script.
Handle<JSFunction> script_fun =
InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- false);
+ isolate->initial_object_prototype(),
+ Builtins::Illegal, false);
Handle<JSObject> prototype =
- Factory::NewJSObject(Top::object_function(), TENURED);
+ factory->NewJSObject(isolate->object_function(), TENURED);
SetPrototype(script_fun, prototype);
global_context()->set_script_function(*script_fun);
// Add 'source' and 'data' property to scripts.
PropertyAttributes common_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Proxy> proxy_source = Factory::NewProxy(&Accessors::ScriptSource);
+ Handle<Proxy> proxy_source = factory->NewProxy(&Accessors::ScriptSource);
Handle<DescriptorArray> script_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::LookupAsciiSymbol("source"),
+ factory->CopyAppendProxyDescriptor(
+ factory->empty_descriptor_array(),
+ factory->LookupAsciiSymbol("source"),
proxy_source,
common_attributes);
- Handle<Proxy> proxy_name = Factory::NewProxy(&Accessors::ScriptName);
+ Handle<Proxy> proxy_name = factory->NewProxy(&Accessors::ScriptName);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("name"),
+ factory->LookupAsciiSymbol("name"),
proxy_name,
common_attributes);
- Handle<Proxy> proxy_id = Factory::NewProxy(&Accessors::ScriptId);
+ Handle<Proxy> proxy_id = factory->NewProxy(&Accessors::ScriptId);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("id"),
+ factory->LookupAsciiSymbol("id"),
proxy_id,
common_attributes);
Handle<Proxy> proxy_line_offset =
- Factory::NewProxy(&Accessors::ScriptLineOffset);
+ factory->NewProxy(&Accessors::ScriptLineOffset);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("line_offset"),
+ factory->LookupAsciiSymbol("line_offset"),
proxy_line_offset,
common_attributes);
Handle<Proxy> proxy_column_offset =
- Factory::NewProxy(&Accessors::ScriptColumnOffset);
+ factory->NewProxy(&Accessors::ScriptColumnOffset);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("column_offset"),
+ factory->LookupAsciiSymbol("column_offset"),
proxy_column_offset,
common_attributes);
- Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptData);
+ Handle<Proxy> proxy_data = factory->NewProxy(&Accessors::ScriptData);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("data"),
+ factory->LookupAsciiSymbol("data"),
proxy_data,
common_attributes);
- Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType);
+ Handle<Proxy> proxy_type = factory->NewProxy(&Accessors::ScriptType);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("type"),
+ factory->LookupAsciiSymbol("type"),
proxy_type,
common_attributes);
Handle<Proxy> proxy_compilation_type =
- Factory::NewProxy(&Accessors::ScriptCompilationType);
+ factory->NewProxy(&Accessors::ScriptCompilationType);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("compilation_type"),
+ factory->LookupAsciiSymbol("compilation_type"),
proxy_compilation_type,
common_attributes);
Handle<Proxy> proxy_line_ends =
- Factory::NewProxy(&Accessors::ScriptLineEnds);
+ factory->NewProxy(&Accessors::ScriptLineEnds);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("line_ends"),
+ factory->LookupAsciiSymbol("line_ends"),
proxy_line_ends,
common_attributes);
Handle<Proxy> proxy_context_data =
- Factory::NewProxy(&Accessors::ScriptContextData);
+ factory->NewProxy(&Accessors::ScriptContextData);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("context_data"),
+ factory->LookupAsciiSymbol("context_data"),
proxy_context_data,
common_attributes);
Handle<Proxy> proxy_eval_from_script =
- Factory::NewProxy(&Accessors::ScriptEvalFromScript);
+ factory->NewProxy(&Accessors::ScriptEvalFromScript);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_script"),
+ factory->LookupAsciiSymbol("eval_from_script"),
proxy_eval_from_script,
common_attributes);
Handle<Proxy> proxy_eval_from_script_position =
- Factory::NewProxy(&Accessors::ScriptEvalFromScriptPosition);
+ factory->NewProxy(&Accessors::ScriptEvalFromScriptPosition);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_script_position"),
+ factory->LookupAsciiSymbol("eval_from_script_position"),
proxy_eval_from_script_position,
common_attributes);
Handle<Proxy> proxy_eval_from_function_name =
- Factory::NewProxy(&Accessors::ScriptEvalFromFunctionName);
+ factory->NewProxy(&Accessors::ScriptEvalFromFunctionName);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory->CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_function_name"),
+ factory->LookupAsciiSymbol("eval_from_function_name"),
proxy_eval_from_function_name,
common_attributes);
script_map->set_instance_descriptors(*script_descriptors);
// Allocate the empty script.
- Handle<Script> script = Factory::NewScript(Factory::empty_string());
+ Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- Heap::public_set_empty_script(*script);
+ HEAP->public_set_empty_script(*script);
}
{
// Builtin function for OpaqueReference -- a JSValue-based object,
// objects, that JavaScript code may not access.
Handle<JSFunction> opaque_reference_fun =
InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
- JSValue::kSize, Top::initial_object_prototype(),
+ JSValue::kSize,
+ isolate->initial_object_prototype(),
Builtins::Illegal, false);
Handle<JSObject> prototype =
- Factory::NewJSObject(Top::object_function(), TENURED);
+ factory->NewJSObject(isolate->object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
global_context()->set_opaque_reference_function(*opaque_reference_fun);
}
"InternalArray",
JS_ARRAY_TYPE,
JSArray::kSize,
- Top::initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::ArrayCode,
true);
Handle<JSObject> prototype =
- Factory::NewJSObject(Top::object_function(), TENURED);
+ factory->NewJSObject(isolate->object_function(), TENURED);
SetPrototype(array_function, prototype);
array_function->shared()->set_construct_stub(
- Builtins::builtin(Builtins::ArrayConstructCode));
+ isolate->builtins()->builtin(Builtins::ArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// Make "length" magic on instances.
Handle<DescriptorArray> array_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::length_symbol(),
- Factory::NewProxy(&Accessors::ArrayLength),
+ factory->CopyAppendProxyDescriptor(
+ factory->empty_descriptor_array(),
+ factory->length_symbol(),
+ factory->NewProxy(&Accessors::ArrayLength),
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
array_function->initial_map()->set_instance_descriptors(
InstallBuiltinFunctionIds();
// Install Function.prototype.call and apply.
- { Handle<String> key = Factory::function_class_symbol();
+ { Handle<String> key = factory->function_class_symbol();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(Top::global(), key));
+ Handle<JSFunction>::cast(GetProperty(isolate->global(), key));
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
// Add initial map.
Handle<Map> initial_map =
- Factory::NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+ factory->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
initial_map->set_constructor(*array_constructor);
// Set prototype on map.
ASSERT_EQ(1, array_descriptors->number_of_descriptors());
Handle<DescriptorArray> reresult_descriptors =
- Factory::NewDescriptorArray(3);
+ factory->NewDescriptorArray(3);
reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
int enum_index = 0;
{
- FieldDescriptor index_field(Heap::index_symbol(),
+ FieldDescriptor index_field(HEAP->index_symbol(),
JSRegExpResult::kIndexIndex,
NONE,
enum_index++);
}
{
- FieldDescriptor input_field(Heap::input_symbol(),
+ FieldDescriptor input_field(HEAP->input_symbol(),
JSRegExpResult::kInputIndex,
NONE,
enum_index++);
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
- GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
+ GetProperty(global, FACTORY->LookupAsciiSymbol(holder_expr)));
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(global, Factory::LookupSymbol(property)));
+ GetProperty(global, FACTORY->LookupSymbol(property)));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
- Handle<String> name = Factory::LookupAsciiSymbol(function_name);
+ Handle<String> name = FACTORY->LookupAsciiSymbol(function_name);
Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
Handle<JSFunction> function(JSFunction::cast(function_object));
function->shared()->set_function_data(Smi::FromInt(id));
int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
// Cannot use cast as object is not fully initialized yet.
JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
- *Factory::NewFixedArrayWithHoles(array_size, TENURED));
+ *FACTORY->NewFixedArrayWithHoles(array_size, TENURED));
cache->set(JSFunctionResultCache::kFactoryIndex, factory);
cache->MakeZeroSize();
return cache;
#undef F
;
- Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
+ Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
- Factory::NewFixedArray(NormalizedMapCache::kEntries, TENURED));
+ FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
-int BootstrapperActive::nesting_ = 0;
-
-
bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
+ Isolate* isolate = Isolate::Current();
BootstrapperActive active;
- SaveContext saved_context;
- Top::set_context(*global_context);
+ SaveContext saved_context(isolate);
+ isolate->set_context(*global_context);
if (!Genesis::InstallExtensions(global_context, extensions)) return false;
Genesis::InstallSpecialObjects(global_context);
return true;
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
- Factory::LookupAsciiSymbol(FLAG_expose_natives_as);
+ FACTORY->LookupAsciiSymbol(FLAG_expose_natives_as);
SetLocalPropertyNoThrow(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
- Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("stackTraceLimit");
SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
+ Debug* debug = Isolate::Current()->debug();
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
- if (!Debug::Load()) return;
+ if (!debug->Load()) return;
// Set the security token for the debugger context to the same as
// the shell global context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
- Debug::debug_context()->set_security_token(
+ debug->debug_context()->set_security_token(
global_context->security_token());
Handle<String> debug_string =
- Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
- Handle<Object> global_proxy(Debug::debug_context()->global_proxy());
+ FACTORY->LookupAsciiSymbol(FLAG_expose_debug_as);
+ Handle<Object> global_proxy(debug->debug_context()->global_proxy());
SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
}
#endif
bool Genesis::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
+ // TODO(isolates): Extensions on multiple isolates may take a little more
+ // effort. (The external API reads 'ignore'-- does that mean
+ // we can break the interface?)
+
// Clear coloring of extension list
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
while (current != NULL) {
if (!InstallExtension(extension->dependencies()[i])) return false;
}
Vector<const char> source = CStrVector(extension->source());
- Handle<String> source_code = Factory::NewStringFromAscii(source);
+ Handle<String> source_code = FACTORY->NewStringFromAscii(source);
bool result = CompileScriptCached(CStrVector(extension->name()),
source_code,
- &extensions_cache,
+ Isolate::Current()->bootstrapper()->
+ extensions_cache(),
extension,
- Handle<Context>(Top::context()),
+ Handle<Context>(
+ Isolate::Current()->context()),
false);
- ASSERT(Top::has_pending_exception() != result);
+ ASSERT(Isolate::Current()->has_pending_exception() != result);
if (!result) {
- Top::clear_pending_exception();
+ Isolate::Current()->clear_pending_exception();
}
current->set_state(v8::INSTALLED);
return result;
HandleScope scope;
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
+ Handle<String> name = FACTORY->LookupAsciiSymbol(Builtins::GetName(id));
Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
Handle<JSObject> obj =
Execution::InstantiateObject(object_template, &pending_exception);
if (pending_exception) {
- ASSERT(Top::has_pending_exception());
- Top::clear_pending_exception();
+ ASSERT(Isolate::Current()->has_pending_exception());
+ Isolate::Current()->clear_pending_exception();
return false;
}
TransferObject(obj, object);
// Cloning the elements array is sufficient.
Handle<FixedArray> from_elements =
Handle<FixedArray>(FixedArray::cast(from->elements()));
- Handle<FixedArray> to_elements = Factory::CopyFixedArray(from_elements);
+ Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
to->set_elements(*to_elements);
}
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = Factory::CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
Genesis::Genesis(Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) {
+ Isolate* isolate = Isolate::Current();
result_ = Handle<Context>::null();
// If V8 isn't running and cannot be initialized, just return.
if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
HandleScope scope;
- SaveContext saved_context;
+ SaveContext saved_context(isolate);
Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
if (!new_context.is_null()) {
global_context_ =
- Handle<Context>::cast(GlobalHandles::Create(*new_context));
+ Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
AddToWeakGlobalContextList(*global_context_);
- Top::set_context(*global_context_);
- i::Counters::contexts_created_by_snapshot.Increment();
+ isolate->set_context(*global_context_);
+ isolate->counters()->contexts_created_by_snapshot()->Increment();
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template,
MakeFunctionInstancePrototypeWritable();
if (!ConfigureGlobalObjects(global_template)) return;
- i::Counters::contexts_created_from_scratch.Increment();
+ isolate->counters()->contexts_created_from_scratch()->Increment();
}
result_ = global_context_;
// Reserve space for statics needing saving and restoring.
int Bootstrapper::ArchiveSpacePerThread() {
- return BootstrapperActive::ArchiveSpacePerThread();
+ return sizeof(NestingCounterType);
}
// Archive statics that are thread local.
char* Bootstrapper::ArchiveState(char* to) {
- return BootstrapperActive::ArchiveState(to);
+ *reinterpret_cast<NestingCounterType*>(to) = nesting_;
+ nesting_ = 0;
+ return to + sizeof(NestingCounterType);
}
// Restore statics that are thread local.
char* Bootstrapper::RestoreState(char* from) {
- return BootstrapperActive::RestoreState(from);
+ nesting_ = *reinterpret_cast<NestingCounterType*>(from);
+ return from + sizeof(NestingCounterType);
}
// Called when the top-level V8 mutex is destroyed.
void Bootstrapper::FreeThreadResources() {
- ASSERT(!BootstrapperActive::IsActive());
-}
-
-
-// Reserve space for statics needing saving and restoring.
-int BootstrapperActive::ArchiveSpacePerThread() {
- return sizeof(nesting_);
-}
-
-
-// Archive statics that are thread local.
-char* BootstrapperActive::ArchiveState(char* to) {
- *reinterpret_cast<int*>(to) = nesting_;
- nesting_ = 0;
- return to + sizeof(nesting_);
-}
-
-
-// Restore statics that are thread local.
-char* BootstrapperActive::RestoreState(char* from) {
- nesting_ = *reinterpret_cast<int*>(from);
- return from + sizeof(nesting_);
+ ASSERT(!IsActive());
}
} } // namespace v8::internal
namespace internal {
-class BootstrapperActive BASE_EMBEDDED {
+// A SourceCodeCache uses a FixedArray to store pairs of
+// (AsciiString*, JSFunction*), mapping names of native code files
+// (runtime.js, etc.) to precompiled functions. Instead of mapping
+// names to functions it might make sense to let the JS2C tool
+// generate an index for each native JS file.
+class SourceCodeCache BASE_EMBEDDED {
public:
- BootstrapperActive() { nesting_++; }
- ~BootstrapperActive() { nesting_--; }
+ explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
- // Support for thread preemption.
- static int ArchiveSpacePerThread();
- static char* ArchiveState(char* to);
- static char* RestoreState(char* from);
+ void Initialize(bool create_heap_objects) {
+ cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
+ }
+
+ void Iterate(ObjectVisitor* v) {
+ v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
+ }
+
+ bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
+ for (int i = 0; i < cache_->length(); i+=2) {
+ SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
+ if (str->IsEqualTo(name)) {
+ *handle = Handle<SharedFunctionInfo>(
+ SharedFunctionInfo::cast(cache_->get(i + 1)));
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
+ HandleScope scope;
+ int length = cache_->length();
+ Handle<FixedArray> new_array =
+ FACTORY->NewFixedArray(length + 2, TENURED);
+ cache_->CopyTo(0, *new_array, 0, cache_->length());
+ cache_ = *new_array;
+ Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
+ cache_->set(length, *str);
+ cache_->set(length + 1, *shared);
+ Script::cast(shared->script())->set_type(Smi::FromInt(type_));
+ }
private:
- static bool IsActive() { return nesting_ != 0; }
- static int nesting_;
- friend class Bootstrapper;
+ Script::Type type_;
+ FixedArray* cache_;
+ DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
// The Boostrapper is the public interface for creating a JavaScript global
// context.
-class Bootstrapper : public AllStatic {
+class Bootstrapper {
public:
// Requires: Heap::Setup has been called.
- static void Initialize(bool create_heap_objects);
- static void TearDown();
+ void Initialize(bool create_heap_objects);
+ void TearDown();
// Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*.
- static Handle<Context> CreateEnvironment(
+ Handle<Context> CreateEnvironment(
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
// Detach the environment from its outer global object.
- static void DetachGlobal(Handle<Context> env);
+ void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
- static void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
+ void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
// Traverses the pointers for memory management.
- static void Iterate(ObjectVisitor* v);
+ void Iterate(ObjectVisitor* v);
// Accessor for the native scripts source code.
- static Handle<String> NativesSourceLookup(int index);
+ Handle<String> NativesSourceLookup(int index);
// Tells whether bootstrapping is active.
- static bool IsActive() { return BootstrapperActive::IsActive(); }
+ bool IsActive() const { return nesting_ != 0; }
// Support for thread preemption.
- static int ArchiveSpacePerThread();
- static char* ArchiveState(char* to);
- static char* RestoreState(char* from);
- static void FreeThreadResources();
+ RLYSTC int ArchiveSpacePerThread();
+ char* ArchiveState(char* to);
+ char* RestoreState(char* from);
+ void FreeThreadResources();
// This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations.
- static char* AllocateAutoDeletedArray(int bytes);
+ char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
- static bool InstallExtensions(Handle<Context> global_context,
- v8::ExtensionConfiguration* extensions);
+ bool InstallExtensions(Handle<Context> global_context,
+ v8::ExtensionConfiguration* extensions);
+
+ SourceCodeCache* extensions_cache() { return &extensions_cache_; }
+
+ private:
+ typedef int NestingCounterType;
+ NestingCounterType nesting_;
+ SourceCodeCache extensions_cache_;
+ // This is for delete, not delete[].
+ List<char*>* delete_these_non_arrays_on_tear_down_;
+ // This is for delete[]
+ List<char*>* delete_these_arrays_on_tear_down_;
+
+ friend class BootstrapperActive;
+ friend class Isolate;
+ friend class NativesExternalStringResource;
+
+ Bootstrapper();
+
+ DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
+};
+
+
+class BootstrapperActive BASE_EMBEDDED {
+ public:
+ BootstrapperActive() {
+ ++Isolate::Current()->bootstrapper()->nesting_;
+ }
+
+ ~BootstrapperActive() {
+ --Isolate::Current()->bootstrapper()->nesting_;
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
};
class NativesExternalStringResource
: public v8::String::ExternalAsciiStringResource {
public:
- explicit NativesExternalStringResource(const char* source);
+ explicit NativesExternalStringResource(Bootstrapper* bootstrapper,
+ const char* source);
const char* data() const {
return data_;
} // namespace
-
// ----------------------------------------------------------------------------
// Support macro for defining builtins in C++.
// ----------------------------------------------------------------------------
#ifdef DEBUG
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args) { \
- args.Verify(); \
- return Builtin_Impl_##name(args); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args)
+#define BUILTIN(name) \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ name##ArgumentsType args, Isolate* isolate) { \
+ ASSERT(isolate == Isolate::Current()); \
+ args.Verify(); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args)
+#define BUILTIN(name) \
+ static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
#endif
-static inline bool CalledAsConstructor() {
+static inline bool CalledAsConstructor(Isolate* isolate) {
#ifdef DEBUG
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
StackFrame* frame = it.frame();
bool reference_result = frame->is_construct();
#endif
- Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+ Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
// Because we know fp points to an exit frame we can use the relevant
// part of ExitFrame::ComputeCallerState directly.
const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
// ----------------------------------------------------------------------------
-
BUILTIN(Illegal) {
UNREACHABLE();
- return Heap::undefined_value(); // Make compiler happy.
+ return isolate->heap()->undefined_value(); // Make compiler happy.
}
BUILTIN(EmptyFunction) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
BUILTIN(ArrayCodeGeneric) {
- Counters::array_function_runtime.Increment();
+ Heap* heap = isolate->heap();
+ isolate->counters()->array_function_runtime()->Increment();
JSArray* array;
- if (CalledAsConstructor()) {
+ if (CalledAsConstructor(isolate)) {
array = JSArray::cast(*args.receiver());
} else {
// Allocate the JS Array
JSFunction* constructor =
- Top::context()->global_context()->array_function();
+ isolate->context()->global_context()->array_function();
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateJSObject(constructor);
+ { MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
array = JSArray::cast(obj);
int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(len);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
array->SetContent(FixedArray::cast(obj));
int number_of_elements = args.length() - 1;
Smi* len = Smi::FromInt(number_of_elements);
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(len->value());
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
-MUST_USE_RESULT static MaybeObject* AllocateJSArray() {
+MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
JSFunction* array_function =
- Top::context()->global_context()->array_function();
+ heap->isolate()->context()->global_context()->array_function();
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(array_function);
+ { MaybeObject* maybe_result = heap->AllocateJSObject(array_function);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return result;
}
-MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray() {
+MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray(Heap* heap) {
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray* result_array = JSArray::cast(result);
result_array->set_length(Smi::FromInt(0));
- result_array->set_elements(Heap::empty_fixed_array());
+ result_array->set_elements(heap->empty_fixed_array());
return result_array;
}
-static void CopyElements(AssertNoAllocation* no_gc,
+static void CopyElements(Heap* heap,
+ AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
ASSERT(dst != src); // Use MoveElements instead.
- ASSERT(dst->map() != Heap::fixed_cow_array_map());
+ ASSERT(dst->map() != HEAP->fixed_cow_array_map());
ASSERT(len > 0);
CopyWords(dst->data_start() + dst_index,
src->data_start() + src_index,
len);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
- Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+ heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
}
-static void MoveElements(AssertNoAllocation* no_gc,
+static void MoveElements(Heap* heap,
+ AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
- ASSERT(dst->map() != Heap::fixed_cow_array_map());
+ ASSERT(dst->map() != HEAP->fixed_cow_array_map());
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kPointerSize);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
- Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+ heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
}
-static void FillWithHoles(FixedArray* dst, int from, int to) {
- ASSERT(dst->map() != Heap::fixed_cow_array_map());
- MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
+static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
+ ASSERT(dst->map() != heap->fixed_cow_array_map());
+ MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
}
-static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != Heap::fixed_cow_array_map());
+static FixedArray* LeftTrimFixedArray(Heap* heap,
+ FixedArray* elms,
+ int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- ASSERT(!Heap::lo_space()->Contains(elms));
+ ASSERT(!HEAP->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
const int len = elms->length();
if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
- !Heap::new_space()->Contains(elms)) {
+ !heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
// remembered set) won't find pointers to new-space there.
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
+ heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
- former_start[to_trim] = Heap::fixed_array_map();
+ former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
return FixedArray::cast(HeapObject::FromAddress(
}
-static bool ArrayPrototypeHasNoElements(Context* global_context,
+static bool ArrayPrototypeHasNoElements(Heap* heap,
+ Context* global_context,
JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
- if (array_proto->elements() != Heap::empty_fixed_array()) return false;
+ if (array_proto->elements() != heap->empty_fixed_array()) return false;
// Hidden prototype
array_proto = JSObject::cast(array_proto->GetPrototype());
- ASSERT(array_proto->elements() == Heap::empty_fixed_array());
+ ASSERT(array_proto->elements() == heap->empty_fixed_array());
// Object.prototype
Object* proto = array_proto->GetPrototype();
- if (proto == Heap::null_value()) return false;
+ if (proto == heap->null_value()) return false;
array_proto = JSObject::cast(proto);
if (array_proto != global_context->initial_object_prototype()) return false;
- if (array_proto->elements() != Heap::empty_fixed_array()) return false;
+ if (array_proto->elements() != heap->empty_fixed_array()) return false;
ASSERT(array_proto->GetPrototype()->IsNull());
return true;
}
MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Object* receiver) {
+ Heap* heap, Object* receiver) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements();
- if (elms->map() == Heap::fixed_array_map()) return elms;
- if (elms->map() == Heap::fixed_cow_array_map()) {
+ if (elms->map() == heap->fixed_array_map()) return elms;
+ if (elms->map() == heap->fixed_cow_array_map()) {
return array->EnsureWritableFastElements();
}
return NULL;
}
-static inline bool IsJSArrayFastElementMovingAllowed(JSArray* receiver) {
- Context* global_context = Top::context()->global_context();
+static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
+ JSArray* receiver) {
+ Context* global_context = heap->isolate()->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
return receiver->GetPrototype() == array_proto &&
- ArrayPrototypeHasNoElements(global_context, array_proto);
+ ArrayPrototypeHasNoElements(heap, global_context, array_proto);
}
MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
+ Isolate* isolate,
const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- HandleScope handleScope;
+ HandleScope handleScope(isolate);
Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
- name);
+ GetProperty(Handle<JSObject>(
+ isolate->global_context()->builtins()),
+ name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
ScopedVector<Object**> argv(args.length() - 1);
BUILTIN(ArrayPush) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayPush", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
+ }
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArray* elms = FixedArray::cast(elms_obj);
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateUninitializedFixedArray(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
if (len > 0) {
- CopyElements(&no_gc, new_elms, 0, elms, 0, len);
+ CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
}
- FillWithHoles(new_elms, new_length, capacity);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
BUILTIN(ArrayPop) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayPop", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
- if (len == 0) return Heap::undefined_value();
+ if (len == 0) return heap->undefined_value();
// Get top element
MaybeObject* top = elms->get(len - 1);
BUILTIN(ArrayShift) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayShift", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
- return CallJsBuiltin("ArrayShift", args);
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ return CallJsBuiltin(isolate, "ArrayShift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
- if (len == 0) return Heap::undefined_value();
+ if (len == 0) return heap->undefined_value();
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
- first = Heap::undefined_value();
+ first = heap->undefined_value();
}
- if (!Heap::lo_space()->Contains(elms)) {
+ if (!heap->lo_space()->Contains(elms)) {
// As elms still in the same space they used to be,
// there is no need to update region dirty mark.
- array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
+ array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
AssertNoAllocation no_gc;
- MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, Heap::the_hole_value());
+ MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
+ elms->set(len - 1, heap->the_hole_value());
}
// Set the length.
BUILTIN(ArrayUnshift) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayUnshift", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
- return CallJsBuiltin("ArrayUnshift", args);
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateUninitializedFixedArray(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
if (len > 0) {
- CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
+ CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
}
- FillWithHoles(new_elms, new_length, capacity);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
- MoveElements(&no_gc, elms, to_add, elms, 0, len);
+ MoveElements(heap, &no_gc, elms, to_add, elms, 0, len);
}
// Add the provided values.
BUILTIN(ArraySlice) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArray* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(array)) {
- return CallJsBuiltin("ArraySlice", args);
+ !IsJSArrayFastElementMovingAllowed(heap, array)) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(array->elements());
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
- Top::context()->global_context()->arguments_boilerplate()->map();
+ isolate->context()->global_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
Object* len_obj = JSObject::cast(receiver)
->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
len = Smi::cast(len_obj)->value();
if (len > elms->length()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
for (int i = 0; i < len; i++) {
- if (elms->get(i) == Heap::the_hole_value()) {
- return CallJsBuiltin("ArraySlice", args);
+ if (elms->get(i) == heap->the_hole_value()) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
}
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
} else if (!arg2->IsUndefined()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
}
// Calculate the length of result array.
int result_len = final - k;
if (result_len <= 0) {
- return AllocateEmptyJSArray();
+ return AllocateEmptyJSArray(heap);
}
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray* result_array = JSArray::cast(result);
{ MaybeObject* maybe_result =
- Heap::AllocateUninitializedFixedArray(result_len);
+ heap->AllocateUninitializedFixedArray(result_len);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* result_elms = FixedArray::cast(result);
AssertNoAllocation no_gc;
- CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
+ CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
// Set elements.
result_array->set_elements(result_elms);
BUILTIN(ArraySplice) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArraySplice", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
- return CallJsBuiltin("ArraySplice", args);
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
- return CallJsBuiltin("ArraySplice", args);
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
}
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
if (arg2->IsSmi()) {
value = Smi::cast(arg2)->value();
} else {
- return CallJsBuiltin("ArraySplice", args);
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
}
actual_delete_count = Min(Max(value, 0), len - actual_start);
JSArray* result_array = NULL;
if (actual_delete_count == 0) {
Object* result;
- { MaybeObject* maybe_result = AllocateEmptyJSArray();
+ { MaybeObject* maybe_result = AllocateEmptyJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
result_array = JSArray::cast(result);
} else {
// Allocate result array.
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
result_array = JSArray::cast(result);
{ MaybeObject* maybe_result =
- Heap::AllocateUninitializedFixedArray(actual_delete_count);
+ heap->AllocateUninitializedFixedArray(actual_delete_count);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* result_elms = FixedArray::cast(result);
AssertNoAllocation no_gc;
// Fill newly created array.
- CopyElements(&no_gc,
+ CopyElements(heap,
+ &no_gc,
result_elms, 0,
elms, actual_start,
actual_delete_count);
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !Heap::lo_space()->Contains(elms) &&
+ const bool trim_array = !heap->lo_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
memmove(start + delta, start, actual_start * kPointerSize);
}
- elms = LeftTrimFixedArray(elms, delta);
+ elms = LeftTrimFixedArray(heap, elms, delta);
array->set_elements(elms, SKIP_WRITE_BARRIER);
} else {
AssertNoAllocation no_gc;
- MoveElements(&no_gc,
+ MoveElements(heap, &no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
+ FillWithHoles(heap, elms, new_length, len);
}
} else if (item_count > actual_delete_count) {
// Currently fixed arrays cannot grow too big, so
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
{ MaybeObject* maybe_obj =
- Heap::AllocateUninitializedFixedArray(capacity);
+ heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
if (actual_start > 0) {
- CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
+ CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
}
const int to_copy = len - actual_delete_count - actual_start;
if (to_copy > 0) {
- CopyElements(&no_gc,
+ CopyElements(heap, &no_gc,
new_elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
to_copy);
}
- FillWithHoles(new_elms, new_length, capacity);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
- MoveElements(&no_gc,
+ MoveElements(heap, &no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
BUILTIN(ArrayConcat) {
- Context* global_context = Top::context()->global_context();
+ Heap* heap = isolate->heap();
+ Context* global_context = isolate->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(global_context, array_proto)) {
- return CallJsBuiltin("ArrayConcat", args);
+ if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
+ return CallJsBuiltin(isolate, "ArrayConcat", args);
}
// Iterate through all the arguments performing checks
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
- return CallJsBuiltin("ArrayConcat", args);
+ return CallJsBuiltin(isolate, "ArrayConcat", args);
}
int len = Smi::cast(JSArray::cast(arg)->length())->value();
ASSERT(result_len >= 0);
if (result_len > FixedArray::kMaxLength) {
- return CallJsBuiltin("ArrayConcat", args);
+ return CallJsBuiltin(isolate, "ArrayConcat", args);
}
}
if (result_len == 0) {
- return AllocateEmptyJSArray();
+ return AllocateEmptyJSArray(heap);
}
// Allocate result.
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray* result_array = JSArray::cast(result);
{ MaybeObject* maybe_result =
- Heap::AllocateUninitializedFixedArray(result_len);
+ heap->AllocateUninitializedFixedArray(result_len);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* result_elms = FixedArray::cast(result);
int len = Smi::cast(array->length())->value();
if (len > 0) {
FixedArray* elms = FixedArray::cast(array->elements());
- CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
+ CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
start_pos += len;
}
}
BUILTIN(StrictArgumentsCallee) {
HandleScope scope;
- return Top::Throw(*Factory::NewTypeError("strict_arguments_callee",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
BUILTIN(StrictArgumentsCaller) {
HandleScope scope;
- return Top::Throw(*Factory::NewTypeError("strict_arguments_caller",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_arguments_caller", HandleVector<Object>(NULL, 0)));
}
BUILTIN(StrictFunctionCaller) {
HandleScope scope;
- return Top::Throw(*Factory::NewTypeError("strict_function_caller",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_function_caller", HandleVector<Object>(NULL, 0)));
}
BUILTIN(StrictFunctionArguments) {
HandleScope scope;
- return Top::Throw(*Factory::NewTypeError("strict_function_arguments",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_function_arguments", HandleVector<Object>(NULL, 0)));
}
// overwritten with undefined. Arguments that do fit the expected
// type is overwritten with the object in the prototype chain that
// actually has that type.
-static inline Object* TypeCheck(int argc,
+static inline Object* TypeCheck(Heap* heap,
+ int argc,
Object** argv,
FunctionTemplateInfo* info) {
Object* recv = argv[0];
Object* holder = recv;
if (!recv_type->IsUndefined()) {
- for (; holder != Heap::null_value(); holder = holder->GetPrototype()) {
+ for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
break;
}
}
- if (holder == Heap::null_value()) return holder;
+ if (holder == heap->null_value()) return holder;
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
- for (; current != Heap::null_value(); current = current->GetPrototype()) {
+ for (; current != heap->null_value(); current = current->GetPrototype()) {
if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
*arg = current;
break;
}
}
- if (current == Heap::null_value()) *arg = Heap::undefined_value();
+ if (current == heap->null_value()) *arg = heap->undefined_value();
}
return holder;
}
template <bool is_construct>
MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
- BuiltinArguments<NEEDS_CALLED_FUNCTION> args) {
- ASSERT(is_construct == CalledAsConstructor());
+ BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
+ ASSERT(is_construct == CalledAsConstructor(isolate));
+ Heap* heap = isolate->heap();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSFunction> function = args.called_function();
ASSERT(function->shared()->IsApiFunction());
FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
if (is_construct) {
- Handle<FunctionTemplateInfo> desc(fun_data);
+ Handle<FunctionTemplateInfo> desc(fun_data, isolate);
bool pending_exception = false;
- Factory::ConfigureInstance(desc, Handle<JSObject>::cast(args.receiver()),
- &pending_exception);
- ASSERT(Top::has_pending_exception() == pending_exception);
+ isolate->factory()->ConfigureInstance(
+ desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
+ ASSERT(isolate->has_pending_exception() == pending_exception);
if (pending_exception) return Failure::Exception();
fun_data = *desc;
}
- Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
+ Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
Handle<Object> obj =
- Factory::NewTypeError("illegal_invocation", HandleVector(&function, 1));
- return Top::Throw(*obj);
+ isolate->factory()->NewTypeError(
+ "illegal_invocation", HandleVector(&function, 1));
+ return isolate->Throw(*obj);
}
Object* raw_call_data = fun_data->call_code();
Object* data_obj = call_data->data();
Object* result;
- LOG(ApiObjectAccess("call", JSObject::cast(*args.receiver())));
+ LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
ASSERT(raw_holder->IsJSObject());
- CustomArguments custom;
+ CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
data_obj, *function, raw_holder);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(callback_obj));
value = callback(new_args);
}
if (value.IsEmpty()) {
- result = Heap::undefined_value();
+ result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!is_construct || result->IsJSObject()) return result;
}
BUILTIN(HandleApiCall) {
- return HandleApiCallHelper<false>(args);
+ return HandleApiCallHelper<false>(args, isolate);
}
BUILTIN(HandleApiCallConstruct) {
- return HandleApiCallHelper<true>(args);
+ return HandleApiCallHelper<true>(args, isolate);
}
BUILTIN(FastHandleApiCall) {
- ASSERT(!CalledAsConstructor());
+ ASSERT(!CalledAsConstructor(isolate));
+ Heap* heap = isolate->heap();
const bool is_construct = false;
// We expect four more arguments: callback, function, call data, and holder.
VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
Utils::OpenHandle(*new_args.Callee()));
#endif
- HandleScope scope;
+ HandleScope scope(isolate);
Object* result;
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(callback_obj));
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
value = callback(new_args);
}
if (value.IsEmpty()) {
- result = Heap::undefined_value();
+ result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
+ Isolate* isolate,
bool is_construct_call,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
- ASSERT(!CalledAsConstructor());
+ ASSERT(!CalledAsConstructor(isolate));
+ Heap* heap = isolate->heap();
Handle<Object> receiver = args.at<Object>(0);
// Get the data for the call and perform the callback.
Object* result;
{
- HandleScope scope;
-
- LOG(ApiObjectAccess("call non-function", obj));
+ HandleScope scope(isolate);
+ LOG(isolate, ApiObjectAccess("call non-function", obj));
- CustomArguments custom;
+ CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
call_data->data(), constructor, obj);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(callback_obj));
value = callback(new_args);
}
if (value.IsEmpty()) {
- result = Heap::undefined_value();
+ result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
}
// Check for exceptions and return result.
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a normal function call.
BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(false, args);
+ return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
}
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a construct call.
BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(true, args);
+ return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
}
}
#endif
-Object* Builtins::builtins_[builtin_count] = { NULL, };
-const char* Builtins::names_[builtin_count] = { NULL, };
+
+Builtins::Builtins() : initialized_(false) {
+ memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
+ memset(names_, 0, sizeof(names_[0]) * builtin_count);
+}
+
+
+Builtins::~Builtins() {
+}
+
#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
- Address Builtins::c_functions_[cfunction_count] = {
- BUILTIN_LIST_C(DEF_ENUM_C)
- };
+Address const Builtins::c_functions_[cfunction_count] = {
+ BUILTIN_LIST_C(DEF_ENUM_C)
+};
#undef DEF_ENUM_C
#define DEF_JS_NAME(name, ignore) #name,
#define DEF_JS_ARGC(ignore, argc) argc,
-const char* Builtins::javascript_names_[id_count] = {
+const char* const Builtins::javascript_names_[id_count] = {
BUILTINS_LIST_JS(DEF_JS_NAME)
};
-int Builtins::javascript_argc_[id_count] = {
+int const Builtins::javascript_argc_[id_count] = {
BUILTINS_LIST_JS(DEF_JS_ARGC)
};
#undef DEF_JS_NAME
#undef DEF_JS_ARGC
-static bool is_initialized = false;
-void Builtins::Setup(bool create_heap_objects) {
- ASSERT(!is_initialized);
+struct BuiltinDesc {
+ byte* generator;
+ byte* c_code;
+ const char* s_name; // name is only used for generating log information.
+ int name;
+ Code::Flags flags;
+ BuiltinExtraArguments extra_args;
+};
- // Create a scope for the handles in the builtins.
- HandleScope scope;
+class BuiltinFunctionTable {
+ public:
+ BuiltinFunctionTable() {
+ Builtins::InitBuiltinFunctionTable();
+ }
+
+ static const BuiltinDesc* functions() { return functions_; }
+
+ private:
+ static BuiltinDesc functions_[Builtins::builtin_count + 1];
+
+ friend class Builtins;
+};
- struct BuiltinDesc {
- byte* generator;
- byte* c_code;
- const char* s_name; // name is only used for generating log information.
- int name;
- Code::Flags flags;
- BuiltinExtraArguments extra_args;
- };
-
-#define DEF_FUNCTION_PTR_C(name, extra_args) \
- { FUNCTION_ADDR(Generate_Adaptor), \
- FUNCTION_ADDR(Builtin_##name), \
- #name, \
- c_##name, \
- Code::ComputeFlags(Code::BUILTIN), \
- extra_args \
- },
-
-#define DEF_FUNCTION_PTR_A(name, kind, state, extra) \
- { FUNCTION_ADDR(Generate_##name), \
- NULL, \
- #name, \
- name, \
- Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state, extra), \
- NO_EXTRA_ARGUMENTS \
- },
-
- // Define array of pointers to generators and C builtin functions.
- static BuiltinDesc functions[] = {
- BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
- BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
- BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
- // Terminator:
- { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0),
- NO_EXTRA_ARGUMENTS }
- };
+BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
+
+static const BuiltinFunctionTable builtin_function_table_init;
+
+// Define array of pointers to generators and C builtin functions.
+// We do this in a sort of roundabout way so that we can do the initialization
+// within the lexical scope of Builtins:: and within a context where
+// Code::Flags names a non-abstract type.
+void Builtins::InitBuiltinFunctionTable() {
+ BuiltinDesc* functions = BuiltinFunctionTable::functions_;
+ functions[builtin_count].generator = NULL;
+ functions[builtin_count].c_code = NULL;
+ functions[builtin_count].s_name = NULL;
+ functions[builtin_count].name = builtin_count;
+ functions[builtin_count].flags = static_cast<Code::Flags>(0);
+ functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
+
+#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
+ functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
+ functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
+ functions->s_name = #aname; \
+ functions->name = c_##aname; \
+ functions->flags = Code::ComputeFlags(Code::BUILTIN); \
+ functions->extra_args = aextra_args; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = aname; \
+ functions->flags = Code::ComputeFlags(Code::kind, \
+ NOT_IN_LOOP, \
+ state, \
+ extra); \
+ functions->extra_args = NO_EXTRA_ARGUMENTS; \
+ ++functions;
+
+ BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
+ BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
#undef DEF_FUNCTION_PTR_A
+}
+
+void Builtins::Setup(bool create_heap_objects) {
+ ASSERT(!initialized_);
+ Heap* heap = Isolate::Current()->heap();
+
+ // Create a scope for the handles in the builtins.
+ HandleScope scope;
+
+ const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects.
// This simplifies things because we don't need to retry.
AlwaysAllocateScope __scope__;
{ MaybeObject* maybe_code =
- Heap::CreateCode(desc, flags, masm.CodeObject());
+ heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_code->ToObject(&code)) {
v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
}
}
}
// Log the event and add the code to the builtins array.
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ PROFILE(ISOLATE,
+ CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code),
functions[i].s_name));
GDBJIT(AddCode(GDBJITInterface::BUILTIN,
}
// Mark as initialized.
- is_initialized = true;
+ initialized_ = true;
}
void Builtins::TearDown() {
- is_initialized = false;
+ initialized_ = false;
}
const char* Builtins::Lookup(byte* pc) {
- if (is_initialized) { // may be called during initialization (disassembler!)
+ // may be called during initialization (disassembler!)
+ if (initialized_) {
for (int i = 0; i < builtin_count; i++) {
Code* entry = Code::cast(builtins_[i]);
if (entry->contains(pc)) {
V(APPLY_OVERFLOW, 1)
+class BuiltinFunctionTable;
class ObjectVisitor;
-class Builtins : public AllStatic {
+class Builtins {
public:
+ ~Builtins();
+
// Generate all builtin code objects. Should be called once during
- // VM initialization.
- static void Setup(bool create_heap_objects);
- static void TearDown();
+ // isolate initialization.
+ void Setup(bool create_heap_objects);
+ void TearDown();
// Garbage collection support.
- static void IterateBuiltins(ObjectVisitor* v);
+ void IterateBuiltins(ObjectVisitor* v);
// Disassembler support.
- static const char* Lookup(byte* pc);
+ const char* Lookup(byte* pc);
enum Name {
#define DEF_ENUM_C(name, ignore) name,
id_count
};
- static Code* builtin(Name name) {
+ Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
return reinterpret_cast<Code*>(builtins_[name]);
}
- static Address builtin_address(Name name) {
+ Address builtin_address(Name name) {
return reinterpret_cast<Address>(&builtins_[name]);
}
static const char* GetName(JavaScript id) { return javascript_names_[id]; }
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
- static Handle<Code> GetCode(JavaScript id, bool* resolved);
+ Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
+ bool is_initialized() const { return initialized_; }
+
private:
+ Builtins();
+
// The external C++ functions called from the code.
- static Address c_functions_[cfunction_count];
+ static Address const c_functions_[cfunction_count];
// Note: These are always Code objects, but to conform with
// IterateBuiltins() above which assumes Object**'s for the callback
// function f, we use an Object* array here.
- static Object* builtins_[builtin_count];
- static const char* names_[builtin_count];
- static const char* javascript_names_[id_count];
- static int javascript_argc_[id_count];
+ Object* builtins_[builtin_count];
+ const char* names_[builtin_count];
+ static const char* const javascript_names_[id_count];
+ static int const javascript_argc_[id_count];
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
-
static void Generate_OnStackReplacement(MacroAssembler* masm);
+
+ static void InitBuiltinFunctionTable();
+
+ bool initialized_;
+
+ friend class BuiltinFunctionTable;
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(Builtins);
};
} } // namespace v8::internal
#include "v8.h"
#include "platform.h"
-#include "top.h"
+// TODO(isolates): is it necessary to lift this?
static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault
- i::Top::PrintStack();
+ i::Isolate::Current()->PrintStack();
}
}
i::OS::Abort();
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
- int index = Heap::code_stubs()->FindEntry(GetKey());
+ int index = HEAP->code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) {
- *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
+ *code_out = Code::cast(HEAP->code_stubs()->ValueAt(index));
return true;
}
return false;
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Counters::code_stubs.Increment();
+ COUNTERS->code_stubs()->Increment();
// Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
- PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+ PROFILE(ISOLATE, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
- Counters::total_stubs_code_size.Increment(code->instruction_size());
+ COUNTERS->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Handle<Code> new_object = Factory::NewCode(
+ Handle<Code> new_object = FACTORY->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
- Factory::DictionaryAtNumberPut(
- Handle<NumberDictionary>(Heap::code_stubs()),
+ FACTORY->DictionaryAtNumberPut(
+ Handle<NumberDictionary>(HEAP->code_stubs()),
GetKey(),
new_object);
- Heap::public_set_code_stubs(*dict);
+ HEAP->public_set_code_stubs(*dict);
code = *new_object;
}
- ASSERT(!NeedsImmovableCode() || Heap::lo_space()->Contains(code));
+ ASSERT(!NeedsImmovableCode() || HEAP->lo_space()->Contains(code));
return Handle<Code>(code);
}
GetICState());
Object* new_object;
{ MaybeObject* maybe_new_object =
- Heap::CreateCode(desc, flags, masm.CodeObject());
+ HEAP->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
code = Code::cast(new_object);
// Try to update the code cache but do not fail if unable.
MaybeObject* maybe_new_object =
- Heap::code_stubs()->AtNumberPut(GetKey(), code);
+ HEAP->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
- Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
+ HEAP->public_set_code_stubs(NumberDictionary::cast(new_object));
}
}
const char* InstanceofStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* args = "";
#undef __
-CodeGenerator* CodeGeneratorScope::top_ = NULL;
-
-
void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast();
bool print_json_ast = false;
const char* ftype;
- if (Bootstrapper::IsActive()) {
+ if (Isolate::Current()->bootstrapper()->IsActive()) {
print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
print_json_ast = FLAG_print_builtin_json_ast;
// Allocate and install the code.
CodeDesc desc;
masm->GetCode(&desc);
- Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
+ Handle<Code> code = FACTORY->NewCode(desc, flags, masm->CodeObject());
if (!code.is_null()) {
- Counters::total_compiled_code_size.Increment(code->instruction_size());
+ COUNTERS->total_compiled_code_size()->Increment(code->instruction_size());
}
return code;
}
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
- bool print_code = Bootstrapper::IsActive()
+ bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
- Counters::total_old_codegen_source_size.Increment(len);
+ COUNTERS->total_old_codegen_source_size()->Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Classic Compiler - ");
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
CodeGenerator cgen(&masm);
- CodeGeneratorScope scope(&cgen);
+ CodeGeneratorScope scope(Isolate::Current(), &cgen);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
+ ASSERT(!Isolate::Current()->has_pending_exception());
return false;
}
#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Vector<const char> kRegexp = CStrVector("regexp");
+
+
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
- if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
+ if (!LOGGER->is_logging() && !CpuProfiler::is_profiling()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
- static Vector<const char> kRegexp = CStrVector("regexp");
if (name->IsEqualTo(kRegexp))
return true;
}
if (globals == 0) return;
// Compute array of global variable and function declarations.
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ Handle<FixedArray> array = FACTORY->NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
Handle<String> name = node->name();
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
int lookup_index = static_cast<int>(function->function_id) -
static_cast<int>(Runtime::kFirstInlineFunction);
// of active code generators.
class CodeGeneratorScope BASE_EMBEDDED {
public:
- explicit CodeGeneratorScope(CodeGenerator* cgen) {
- previous_ = top_;
- top_ = cgen;
+ explicit CodeGeneratorScope(Isolate* isolate, CodeGenerator* cgen)
+ : isolate_(isolate) {
+ previous_ = isolate->current_code_generator();
+ isolate->set_current_code_generator(cgen);
}
~CodeGeneratorScope() {
- top_ = previous_;
+ isolate_->set_current_code_generator(previous_);
}
- static CodeGenerator* Current() {
- ASSERT(top_ != NULL);
- return top_;
+ static CodeGenerator* Current(Isolate* isolate) {
+ ASSERT(isolate->current_code_generator() != NULL);
+ return isolate->current_code_generator();
}
private:
- static CodeGenerator* top_;
CodeGenerator* previous_;
+ Isolate* isolate_;
};
-
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
namespace v8 {
namespace internal {
-// The number of sub caches covering the different types to cache.
-static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
// The number of ScriptGenerations is carefully chosen based on histograms.
// Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
-// Index for the first generation in the cache.
-static const int kFirstGeneration = 0;
-
-// The compilation cache consists of several generational sub-caches which uses
-// this class as a base class. A sub-cache contains a compilation cache tables
-// for each generation of the sub-cache. Since the same source code string has
-// different compiled code for scripts and evals, we use separate sub-caches
-// for different compilation modes, to avoid retrieving the wrong result.
-class CompilationSubCache {
- public:
- explicit CompilationSubCache(int generations): generations_(generations) {
- tables_ = NewArray<Object*>(generations);
- }
-
- ~CompilationSubCache() { DeleteArray(tables_); }
-
- // Get the compilation cache tables for a specific generation.
- Handle<CompilationCacheTable> GetTable(int generation);
- // Accessors for first generation.
- Handle<CompilationCacheTable> GetFirstTable() {
- return GetTable(kFirstGeneration);
+CompilationCache::CompilationCache()
+ : script_(kScriptGenerations),
+ eval_global_(kEvalGlobalGenerations),
+ eval_contextual_(kEvalContextualGenerations),
+ reg_exp_(kRegExpGenerations),
+ enabled_(true),
+ eager_optimizing_set_(NULL) {
+ CompilationSubCache* subcaches[kSubCacheCount] =
+ {&script_, &eval_global_, &eval_contextual_, ®_exp_};
+ for (int i = 0; i < kSubCacheCount; ++i) {
+ subcaches_[i] = subcaches[i];
}
- void SetFirstTable(Handle<CompilationCacheTable> value) {
- ASSERT(kFirstGeneration < generations_);
- tables_[kFirstGeneration] = *value;
- }
-
- // Age the sub-cache by evicting the oldest generation and creating a new
- // young generation.
- void Age();
-
- // GC support.
- void Iterate(ObjectVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
-
- // Clear this sub-cache evicting all its content.
- void Clear();
-
- // Remove given shared function info from sub-cache.
- void Remove(Handle<SharedFunctionInfo> function_info);
-
- // Number of generations in this sub-cache.
- inline int generations() { return generations_; }
-
- private:
- int generations_; // Number of generations.
- Object** tables_; // Compilation cache tables - one for each generation.
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
-};
-
-
-// Sub-cache for scripts.
-class CompilationCacheScript : public CompilationSubCache {
- public:
- explicit CompilationCacheScript(int generations)
- : CompilationSubCache(generations) { }
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset);
- void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- bool HasOrigin(Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
-};
-
-
-// Sub-cache for eval scripts.
-class CompilationCacheEval: public CompilationSubCache {
- public:
- explicit CompilationCacheEval(int generations)
- : CompilationSubCache(generations) { }
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context,
- StrictModeFlag strict_mode);
-
- void Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
-};
-
-
-// Sub-cache for regular expressions.
-class CompilationCacheRegExp: public CompilationSubCache {
- public:
- explicit CompilationCacheRegExp(int generations)
- : CompilationSubCache(generations) { }
-
- Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
-
- void Put(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
-};
-
-
-// Statically allocate all the sub-caches.
-static CompilationCacheScript script(kScriptGenerations);
-static CompilationCacheEval eval_global(kEvalGlobalGenerations);
-static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
-static CompilationCacheRegExp reg_exp(kRegExpGenerations);
-static CompilationSubCache* subcaches[kSubCacheCount] =
- {&script, &eval_global, &eval_contextual, ®_exp};
+}
-// Current enable state of the compilation cache.
-static bool enabled = true;
-static inline bool IsEnabled() {
- return FLAG_compilation_cache && enabled;
+CompilationCache::~CompilationCache() {
+ delete eager_optimizing_set_;
+ eager_optimizing_set_ = NULL;
}
static Handle<CompilationCacheTable> AllocateTable(int size) {
- CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ CompilationCacheTable::Allocate(size),
CompilationCacheTable);
}
return result;
}
-
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
}
// Set the first generation as unborn.
- tables_[0] = Heap::undefined_value();
+ tables_[0] = HEAP->undefined_value();
}
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = Heap::raw_unchecked_undefined_value();
+ Object* undefined = HEAP->raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
void CompilationSubCache::Clear() {
- MemsetPointer(tables_, Heap::undefined_value(), generations_);
+ MemsetPointer(tables_, HEAP->undefined_value(), generations_);
}
}
+CompilationCacheScript::CompilationCacheScript(int generations)
+ : CompilationSubCache(generations),
+ script_histogram_(NULL),
+ script_histogram_initialized_(false) {
+}
+
+
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
}
}
- static void* script_histogram = StatsTable::CreateHistogram(
- "V8.ScriptCache",
- 0,
- kScriptGenerations,
- kScriptGenerations + 1);
+ Isolate* isolate = Isolate::Current();
+ if (!script_histogram_initialized_) {
+ script_histogram_ = isolate->stats_table()->CreateHistogram(
+ "V8.ScriptCache",
+ 0,
+ kScriptGenerations,
+ kScriptGenerations + 1);
+ script_histogram_initialized_ = true;
+ }
- if (script_histogram != NULL) {
+ if (script_histogram_ != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
- StatsTable::AddHistogramSample(script_histogram, generation);
+ isolate->stats_table()->AddHistogramSample(script_histogram_, generation);
}
// Once outside the manacles of the handle scope, we need to recheck
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, shared);
- Counters::compilation_cache_hits.Increment();
+ isolate->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
- Counters::compilation_cache_misses.Increment();
+ isolate->counters()->compilation_cache_misses()->Increment();
return Handle<SharedFunctionInfo>::null();
}
}
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ TryTablePut(source, function_info),
+ CompilationCacheTable);
}
if (generation != 0) {
Put(source, context, function_info);
}
- Counters::compilation_cache_hits.Increment();
+ COUNTERS->compilation_cache_hits()->Increment();
return function_info;
} else {
- Counters::compilation_cache_misses.Increment();
+ COUNTERS->compilation_cache_misses()->Increment();
return Handle<SharedFunctionInfo>::null();
}
}
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ TryTablePut(source, context, function_info),
CompilationCacheTable);
}
if (generation != 0) {
Put(source, flags, data);
}
- Counters::compilation_cache_hits.Increment();
+ COUNTERS->compilation_cache_hits()->Increment();
return data;
} else {
- Counters::compilation_cache_misses.Increment();
+ COUNTERS->compilation_cache_misses()->Increment();
return Handle<FixedArray>::null();
}
}
Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
- CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ TryTablePut(source, flags, data),
+ CompilationCacheTable);
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
- eval_global.Remove(function_info);
- eval_contextual.Remove(function_info);
- script.Remove(function_info);
+ eval_global_.Remove(function_info);
+ eval_contextual_.Remove(function_info);
+ script_.Remove(function_info);
}
return Handle<SharedFunctionInfo>::null();
}
- return script.Lookup(source, name, line_offset, column_offset);
+ return script_.Lookup(source, name, line_offset, column_offset);
}
Handle<SharedFunctionInfo> result;
if (is_global) {
- result = eval_global.Lookup(source, context, strict_mode);
+ result = eval_global_.Lookup(source, context, strict_mode);
} else {
- result = eval_contextual.Lookup(source, context, strict_mode);
+ result = eval_contextual_.Lookup(source, context, strict_mode);
}
return result;
}
return Handle<FixedArray>::null();
}
- return reg_exp.Lookup(source, flags);
+ return reg_exp_.Lookup(source, flags);
}
return;
}
- script.Put(source, function_info);
+ script_.Put(source, function_info);
}
HandleScope scope;
if (is_global) {
- eval_global.Put(source, context, function_info);
+ eval_global_.Put(source, context, function_info);
} else {
- eval_contextual.Put(source, context, function_info);
+ eval_contextual_.Put(source, context, function_info);
}
}
return;
}
- reg_exp.Put(source, flags, data);
+ reg_exp_.Put(source, flags, data);
}
}
-static HashMap* EagerOptimizingSet() {
- static HashMap map(&SourceHashCompare);
- return ↦
+HashMap* CompilationCache::EagerOptimizingSet() {
+ if (eager_optimizing_set_ == NULL) {
+ eager_optimizing_set_ = new HashMap(&SourceHashCompare);
+ }
+ return eager_optimizing_set_;
}
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Clear();
+ subcaches_[i]->Clear();
}
}
+
void CompilationCache::Iterate(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Iterate(v);
+ subcaches_[i]->Iterate(v);
}
}
void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->IterateFunctions(v);
+ subcaches_[i]->IterateFunctions(v);
}
}
void CompilationCache::MarkCompactPrologue() {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Age();
+ subcaches_[i]->Age();
}
}
void CompilationCache::Enable() {
- enabled = true;
+ enabled_ = true;
}
void CompilationCache::Disable() {
- enabled = false;
+ enabled_ = false;
Clear();
}
namespace v8 {
namespace internal {
+class HashMap;
+
+// The compilation cache consists of several generational sub-caches which uses
+// this class as a base class. A sub-cache contains a compilation cache tables
+// for each generation of the sub-cache. Since the same source code string has
+// different compiled code for scripts and evals, we use separate sub-caches
+// for different compilation modes, to avoid retrieving the wrong result.
+class CompilationSubCache {
+ public:
+ explicit CompilationSubCache(int generations): generations_(generations) {
+ tables_ = NewArray<Object*>(generations);
+ }
+
+ ~CompilationSubCache() { DeleteArray(tables_); }
+
+ // Index for the first generation in the cache.
+ static const int kFirstGeneration = 0;
+
+ // Get the compilation cache tables for a specific generation.
+ Handle<CompilationCacheTable> GetTable(int generation);
+
+ // Accessors for first generation.
+ Handle<CompilationCacheTable> GetFirstTable() {
+ return GetTable(kFirstGeneration);
+ }
+ void SetFirstTable(Handle<CompilationCacheTable> value) {
+ ASSERT(kFirstGeneration < generations_);
+ tables_[kFirstGeneration] = *value;
+ }
+
+ // Age the sub-cache by evicting the oldest generation and creating a new
+ // young generation.
+ void Age();
+
+ // GC support.
+ void Iterate(ObjectVisitor* v);
+ void IterateFunctions(ObjectVisitor* v);
+
+ // Clear this sub-cache evicting all its content.
+ void Clear();
+
+ // Remove given shared function info from sub-cache.
+ void Remove(Handle<SharedFunctionInfo> function_info);
+
+ // Number of generations in this sub-cache.
+ inline int generations() { return generations_; }
+
+ private:
+ int generations_; // Number of generations.
+ Object** tables_; // Compilation cache tables - one for each generation.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
+};
+
+
+// Sub-cache for scripts.
+class CompilationCacheScript : public CompilationSubCache {
+ public:
+ explicit CompilationCacheScript(int generations);
+
+ Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+ void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+ private:
+ MUST_USE_RESULT MaybeObject* TryTablePut(
+ Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(
+ Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+ bool HasOrigin(Handle<SharedFunctionInfo> function_info,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+
+ void* script_histogram_;
+ bool script_histogram_initialized_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
+};
+
+
+// Sub-cache for eval scripts.
+class CompilationCacheEval: public CompilationSubCache {
+ public:
+ explicit CompilationCacheEval(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+ Handle<Context> context,
+ StrictModeFlag strict_mode);
+
+ void Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+ private:
+ MUST_USE_RESULT MaybeObject* TryTablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
+};
+
+
+// Sub-cache for regular expressions.
+class CompilationCacheRegExp: public CompilationSubCache {
+ public:
+ explicit CompilationCacheRegExp(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
+
+ void Put(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+ private:
+ MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
+};
+
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset);
+ Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
- static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- StrictModeFlag strict_mode);
+ Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
+ Handle<Context> context,
+ bool is_global,
+ StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
- static Handle<FixedArray> LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags);
+ Handle<FixedArray> LookupRegExp(Handle<String> source,
+ JSRegExp::Flags flags);
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
- static void PutScript(Handle<String> source,
- Handle<SharedFunctionInfo> function_info);
+ void PutScript(Handle<String> source,
+ Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
- static void PutEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- Handle<SharedFunctionInfo> function_info);
+ void PutEval(Handle<String> source,
+ Handle<Context> context,
+ bool is_global,
+ Handle<SharedFunctionInfo> function_info);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
- static void PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
+ void PutRegExp(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
// Support for eager optimization tracking.
- static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
- static void MarkForEagerOptimizing(Handle<JSFunction> function);
- static void MarkForLazyOptimizing(Handle<JSFunction> function);
+ bool ShouldOptimizeEagerly(Handle<JSFunction> function);
+ void MarkForEagerOptimizing(Handle<JSFunction> function);
+ void MarkForLazyOptimizing(Handle<JSFunction> function);
// Reset the eager optimization tracking data.
- static void ResetEagerOptimizingData();
+ void ResetEagerOptimizingData();
// Clear the cache - also used to initialize the cache at startup.
- static void Clear();
+ void Clear();
// Remove given shared function info from all caches.
- static void Remove(Handle<SharedFunctionInfo> function_info);
+ void Remove(Handle<SharedFunctionInfo> function_info);
// GC support.
- static void Iterate(ObjectVisitor* v);
- static void IterateFunctions(ObjectVisitor* v);
+ void Iterate(ObjectVisitor* v);
+ void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
// avoid keeping them alive too long without using them.
- static void MarkCompactPrologue();
+ void MarkCompactPrologue();
// Enable/disable compilation cache. Used by debugger to disable compilation
// cache during debugging to make sure new scripts are always compiled.
- static void Enable();
- static void Disable();
+ void Enable();
+ void Disable();
+ private:
+ CompilationCache();
+ ~CompilationCache();
+
+ HashMap* EagerOptimizingSet();
+
+ // The number of sub caches covering the different types to cache.
+ static const int kSubCacheCount = 4;
+
+ CompilationCacheScript script_;
+ CompilationCacheEval eval_global_;
+ CompilationCacheEval eval_contextual_;
+ CompilationCacheRegExp reg_exp_;
+ CompilationSubCache* subcaches_[kSubCacheCount];
+
+ // Current enable state of the compilation cache.
+ bool enabled_;
+
+ HashMap* eager_optimizing_set_;
+
+ bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationCache);
};
CompilationInfo::CompilationInfo(Handle<Script> script)
- : flags_(0),
+ : isolate_(script->GetIsolate()),
+ flags_(0),
function_(NULL),
scope_(NULL),
script_(script),
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
- : flags_(IsLazy::encode(true)),
+ : isolate_(shared_info->GetIsolate()),
+ flags_(IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
shared_info_(shared_info),
CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
- : flags_(IsLazy::encode(true)),
+ : isolate_(closure->GetIsolate()),
+ flags_(IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
closure_(closure),
// break points has actually been set.
static bool AlwaysFullCompiler() {
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Isolate* isolate = Isolate::Current();
if (V8::UseCrankshaft()) {
- return FLAG_always_full_compiler || Debug::has_break_points();
+ return FLAG_always_full_compiler || isolate->debug()->has_break_points();
} else {
- return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+ return FLAG_always_full_compiler || isolate->debugger()->IsDebuggerActive();
}
#else
return FLAG_always_full_compiler;
ASSERT(code->kind() == Code::FUNCTION);
code->set_optimizable(false);
info->SetCode(code);
- CompilationCache::MarkForLazyOptimizing(info->closure());
+ Isolate* isolate = code->GetIsolate();
+ isolate->compilation_cache()->MarkForLazyOptimizing(info->closure());
if (FLAG_trace_opt) {
PrintF("[disabled optimization for: ");
info->closure()->PrintName();
HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
- if (Top::has_pending_exception()) {
+ if (info->isolate()->has_pending_exception()) {
info->SetCode(Handle<Code>::null());
return false;
}
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone;
+ Isolate* isolate = info->isolate();
+ PostponeInterruptsScope postpone(isolate);
- ASSERT(!i::Top::global_context().is_null());
+ ASSERT(!isolate->global_context().is_null());
Handle<Script> script = info->script();
- script->set_context_data((*i::Top::global_context())->data());
+ script->set_context_data((*isolate->global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
if (!it.done()) {
script->set_eval_from_shared(
JSFunction::cast(it.frame()->function())->shared());
+ Code* code = it.frame()->LookupCode(isolate);
int offset = static_cast<int>(
- it.frame()->pc() - it.frame()->code()->instruction_start());
+ it.frame()->pc() - code->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset));
}
}
}
// Notify debugger
- Debugger::OnBeforeCompile(script);
+ isolate->debugger()->OnBeforeCompile(script);
#endif
// Only allow non-global compiles for eval.
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
- ? &Counters::compile_eval
- : &Counters::compile;
+ ? COUNTERS->compile_eval()
+ : COUNTERS->compile();
HistogramTimerScope timer(rate);
// Compile the code.
FunctionLiteral* lit = info->function();
- LiveEditFunctionTracker live_edit_tracker(lit);
+ LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
- Top::StackOverflow();
+ isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(
+ isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
info->code(),
Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) {
- PROFILE(CodeCreateEvent(
+ PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
script,
info->code()));
} else {
- PROFILE(CodeCreateEvent(
+ PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
- Heap::empty_string()));
+ isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
- Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
+ isolate->debugger()->OnAfterCompile(
+ script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
live_edit_tracker.RecordFunctionInfo(result, lit);
ScriptDataImpl* input_pre_data,
Handle<Object> script_data,
NativesFlag natives) {
+ Isolate* isolate = Isolate::Current();
int source_length = source->length();
- Counters::total_load_size.Increment(source_length);
- Counters::total_compile_size.Increment(source_length);
+ COUNTERS->total_load_size()->Increment(source_length);
+ COUNTERS->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
- VMState state(COMPILER);
+ VMState state(isolate, COMPILER);
+
+ CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
Handle<SharedFunctionInfo> result;
if (extension == NULL) {
- result = CompilationCache::LookupScript(source,
- script_name,
- line_offset,
- column_offset);
+ result = compilation_cache->LookupScript(source,
+ script_name,
+ line_offset,
+ column_offset);
}
if (result.is_null()) {
}
// Create a script object describing the script to be compiled.
- Handle<Script> script = Factory::NewScript(source);
+ Handle<Script> script = FACTORY->NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
script->set_column_offset(Smi::FromInt(column_offset));
}
- script->set_data(script_data.is_null() ? Heap::undefined_value()
+ script->set_data(script_data.is_null() ? HEAP->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
if (natives == NATIVES_CODE) info.MarkAsAllowingNativesSyntax();
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
- CompilationCache::PutScript(source, result);
+ compilation_cache->PutScript(source, result);
}
// Get rid of the pre-parsing data (if necessary).
}
}
- if (result.is_null()) Top::ReportPendingMessages();
+ if (result.is_null()) isolate->ReportPendingMessages();
return result;
}
Handle<Context> context,
bool is_global,
StrictModeFlag strict_mode) {
+ Isolate* isolate = source->GetIsolate();
int source_length = source->length();
- Counters::total_eval_size.Increment(source_length);
- Counters::total_compile_size.Increment(source_length);
+ isolate->counters()->total_eval_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
- VMState state(COMPILER);
+ VMState state(isolate, COMPILER);
// Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result;
- result = CompilationCache::LookupEval(source,
- context,
- is_global,
- strict_mode);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ result = compilation_cache->LookupEval(source,
+ context,
+ is_global,
+ strict_mode);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
- Handle<Script> script = Factory::NewScript(source);
+ Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
+ CompilationCache* compilation_cache = isolate->compilation_cache();
// If caller is strict mode, the result must be strict as well,
// but not the other way around. Consider:
// eval("'use strict'; ...");
ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
- CompilationCache::PutEval(source, context, is_global, result);
+ compilation_cache->PutEval(source, context, is_global, result);
}
}
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
- VMState state(COMPILER);
+ VMState state(info->isolate(), COMPILER);
- PostponeInterruptsScope postpone;
+ Isolate* isolate = info->isolate();
+ PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
- Counters::total_compile_size.Increment(compiled_size);
+ isolate->counters()->total_compile_size()->Increment(compiled_size);
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
- HistogramTimerScope timer(&Counters::compile_lazy);
+ HistogramTimerScope timer(isolate->counters()->compile_lazy());
// Compile the code.
if (!MakeCode(info)) {
- if (!Top::has_pending_exception()) {
- Top::StackOverflow();
+ if (!isolate->has_pending_exception()) {
+ isolate->StackOverflow();
}
} else {
ASSERT(!info->code().is_null());
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt && !Debug::has_break_points()) {
+ if (FLAG_always_opt &&
+ !Isolate::Current()->debug()->has_break_points()) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
- } else if (CompilationCache::ShouldOptimizeEagerly(function)) {
- RuntimeProfiler::OptimizeSoon(*function);
+ } else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
+ function)) {
+ isolate->runtime_profiler()->OptimizeSoon(*function);
}
}
}
info.SetFunction(literal);
info.SetScope(literal->scope());
- LiveEditFunctionTracker live_edit_tracker(literal);
+ LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the
// parser records.
bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive();
+ !LiveEditFunctionTracker::IsActive(info.isolate());
Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
// Generate code
if (FLAG_lazy && allow_lazy) {
- Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
+ Handle<Code> code(
+ info.isolate()->builtins()->builtin(Builtins::LazyCompile));
info.SetCode(code);
} else {
if (V8::UseCrankshaft()) {
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(literal->name(),
+ FACTORY->NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
info.code(),
scope_info);
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
- if (Logger::is_logging() || CpuProfiler::is_profiling()) {
+ if (info->isolate()->logger()->is_logging() || CpuProfiler::is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
- if (*code == Builtins::builtin(Builtins::LazyCompile)) return;
+ if (*code == info->isolate()->builtins()->builtin(Builtins::LazyCompile))
+ return;
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num);
- PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
+ PROFILE(info->isolate(),
+ CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
String::cast(script->name()),
line_num));
} else {
- PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
+ PROFILE(info->isolate(),
+ CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
shared->DebugName()));
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
explicit CompilationInfo(Handle<JSFunction> closure);
+ Isolate* isolate() {
+ ASSERT(Isolate::Current() == isolate_);
+ return isolate_;
+ }
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
}
private:
+ Isolate* isolate_;
+
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
virtual ~CompilationZoneScope() {
if (ShouldDeleteOnExit()) {
- FrameElement::ClearConstantList();
- Result::ClearConstantList();
+ Isolate* isolate = Isolate::Current();
+ isolate->frame_element_constant_list()->Clear();
+ isolate->result_constant_list()->Clear();
}
}
};
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the global context.
- ASSERT(Bootstrapper::IsActive());
+ ASSERT(Isolate::Current()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsGlobalContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
int* index_, PropertyAttributes* attributes) {
- Handle<Context> context(this);
+ Isolate* isolate = GetIsolate();
+ Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index_ = -1;
// check extension/with object
if (context->has_extension()) {
- Handle<JSObject> extension = Handle<JSObject>(context->extension());
+ Handle<JSObject> extension = Handle<JSObject>(context->extension(),
+ isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects.
// check non-parameter locals in context
Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info());
+ context->closure()->shared()->scope_info(), isolate);
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) {
// slot found.
- int index =
- scope_info->ContextSlotIndex(Heap::arguments_shadow_symbol(), NULL);
+ int index = scope_info->ContextSlotIndex(
+ isolate->heap()->arguments_shadow_symbol(), NULL);
ASSERT(index >= 0); // arguments must exist and be in the heap context
- Handle<JSObject> arguments(JSObject::cast(context->get(index)));
- ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
+ Handle<JSObject> arguments(JSObject::cast(context->get(index)),
+ isolate);
+ ASSERT(arguments->HasLocalProperty(isolate->heap()->length_symbol()));
if (FLAG_trace_contexts) {
PrintF("=> found parameter %d in arguments object\n", param_index);
}
if (context->IsGlobalContext()) {
follow_context_chain = false;
} else if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()));
+ context = Handle<Context>(Context::cast(context->closure()->context()),
+ isolate);
} else {
- context = Handle<Context>(context->previous());
+ context = Handle<Context>(context->previous(), isolate);
}
} while (follow_context_chain);
// Check that the context belongs to the weak global contexts list.
bool found = false;
- Object* context = Heap::global_contexts_list();
+ Object* context = GetHeap()->global_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
} else {
prev->set_next_function_link(element_function->next_function_link());
}
- element_function->set_next_function_link(Heap::undefined_value());
+ element_function->set_next_function_link(GetHeap()->undefined_value());
return;
}
prev = element_function;
void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
+ set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
}
bool Context::IsBootstrappingOrContext(Object* object) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- return Bootstrapper::IsActive() || object->IsContext();
+ return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
}
bool Context::IsBootstrappingOrGlobalObject(Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
- return Bootstrapper::IsActive() || object->IsGlobalObject();
+ Isolate* isolate = Isolate::Current();
+ return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
+ isolate->bootstrapper()->IsActive() ||
+ object->IsGlobalObject();
}
#endif
GlobalObject* global() {
Object* result = get(GLOBAL_INDEX);
- ASSERT(Heap::gc_state() != Heap::NOT_IN_GC ||
- IsBootstrappingOrGlobalObject(result));
+ ASSERT(IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
bool is_function_context() { return unchecked_previous() == NULL; }
// Tells whether the global context is marked with out of memory.
- bool has_out_of_memory() {
- return global_context()->out_of_memory() == Heap::true_value();
- }
+ inline bool has_out_of_memory();
// Mark the global context with out of memory.
- void mark_out_of_memory() {
- global_context()->set_out_of_memory(Heap::true_value());
- }
+ inline void mark_out_of_memory();
// The exception holder is the object used as a with object in
// the implementation of a catch block.
// Returns true if a nonspace found and false if the end has reached.
template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
+static inline bool AdvanceToNonspace(ScannerConstants* scanner_constants,
+ Iterator* current,
+ EndMark end) {
while (*current != end) {
- if (!ScannerConstants::kIsWhiteSpace.get(**current)) return true;
+ if (!scanner_constants->IsWhiteSpace(**current)) return true;
++*current;
}
return false;
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(Iterator current,
+static double InternalStringToIntDouble(ScannerConstants* scanner_constants,
+ Iterator current,
EndMark end,
bool negative,
bool allow_trailing_junk) {
} else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
digit = static_cast<char>(*current) - 'A' + 10;
} else {
- if (allow_trailing_junk || !AdvanceToNonspace(¤t, end)) {
+ if (allow_trailing_junk ||
+ !AdvanceToNonspace(scanner_constants, ¤t, end)) {
break;
} else {
return JUNK_STRING_VALUE;
exponent += radix_log_2;
}
- if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(scanner_constants, ¤t, end)) {
return JUNK_STRING_VALUE;
}
template <class Iterator, class EndMark>
-static double InternalStringToInt(Iterator current, EndMark end, int radix) {
+static double InternalStringToInt(ScannerConstants* scanner_constants,
+ Iterator current,
+ EndMark end,
+ int radix) {
const bool allow_trailing_junk = true;
const double empty_string_val = JUNK_STRING_VALUE;
- if (!AdvanceToNonspace(¤t, end)) return empty_string_val;
+ if (!AdvanceToNonspace(scanner_constants, ¤t, end)) {
+ return empty_string_val;
+ }
bool negative = false;
bool leading_zero = false;
if (*current == '+') {
// Ignore leading sign; skip following spaces.
++current;
- if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
+ if (!AdvanceToNonspace(scanner_constants, ¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
} else if (*current == '-') {
++current;
- if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
+ if (!AdvanceToNonspace(scanner_constants, ¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
negative = true;
}
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
- current, end, negative, allow_trailing_junk);
+ scanner_constants, current, end, negative, allow_trailing_junk);
case 4:
return InternalStringToIntDouble<2>(
- current, end, negative, allow_trailing_junk);
+ scanner_constants, current, end, negative, allow_trailing_junk);
case 8:
return InternalStringToIntDouble<3>(
- current, end, negative, allow_trailing_junk);
+ scanner_constants, current, end, negative, allow_trailing_junk);
case 16:
return InternalStringToIntDouble<4>(
- current, end, negative, allow_trailing_junk);
+ scanner_constants, current, end, negative, allow_trailing_junk);
case 32:
return InternalStringToIntDouble<5>(
- current, end, negative, allow_trailing_junk);
+ scanner_constants, current, end, negative, allow_trailing_junk);
default:
UNREACHABLE();
}
if (current == end) break;
}
- if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(scanner_constants, ¤t, end)) {
return JUNK_STRING_VALUE;
}
v = v * multiplier + part;
} while (!done);
- if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(scanner_constants, ¤t, end)) {
return JUNK_STRING_VALUE;
}
// 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position).
template <class Iterator, class EndMark>
-static double InternalStringToDouble(Iterator current,
+static double InternalStringToDouble(ScannerConstants* scanner_constants,
+ Iterator current,
EndMark end,
int flags,
double empty_string_val) {
// 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(¤t, end)) return empty_string_val;
+ if (!AdvanceToNonspace(scanner_constants, ¤t, end)) {
+ return empty_string_val;
+ }
const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
return JUNK_STRING_VALUE;
}
- if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(scanner_constants, ¤t, end)) {
return JUNK_STRING_VALUE;
}
return JUNK_STRING_VALUE; // "0x".
}
- return InternalStringToIntDouble<4>(current,
+ return InternalStringToIntDouble<4>(scanner_constants,
+ current,
end,
negative,
allow_trailing_junk);
exponent += (sign == '-' ? -num : num);
}
- if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(scanner_constants, ¤t, end)) {
return JUNK_STRING_VALUE;
}
exponent += insignificant_digits;
if (octal) {
- return InternalStringToIntDouble<3>(buffer,
+ return InternalStringToIntDouble<3>(scanner_constants,
+ buffer,
buffer + buffer_pos,
negative,
allow_trailing_junk);
double StringToDouble(String* str, int flags, double empty_string_val) {
+ ScannerConstants* scanner_constants =
+ Isolate::Current()->scanner_constants();
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
- return InternalStringToDouble(begin, end, flags, empty_string_val);
+ return InternalStringToDouble(scanner_constants, begin, end, flags,
+ empty_string_val);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
- return InternalStringToDouble(begin, end, flags, empty_string_val);
+ return InternalStringToDouble(scanner_constants, begin, end, flags,
+ empty_string_val);
} else {
StringInputBuffer buffer(str);
- return InternalStringToDouble(StringInputBufferIterator(&buffer),
+ return InternalStringToDouble(scanner_constants,
+ StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
flags,
empty_string_val);
double StringToInt(String* str, int radix) {
+ ScannerConstants* scanner_constants =
+ Isolate::Current()->scanner_constants();
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
- return InternalStringToInt(begin, end, radix);
+ return InternalStringToInt(scanner_constants, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
- return InternalStringToInt(begin, end, radix);
+ return InternalStringToInt(scanner_constants, begin, end, radix);
} else {
StringInputBuffer buffer(str);
- return InternalStringToInt(StringInputBufferIterator(&buffer),
+ return InternalStringToInt(scanner_constants,
+ StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
radix);
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
+ ScannerConstants* scanner_constants =
+ Isolate::Current()->scanner_constants();
const char* end = str + StrLength(str);
- return InternalStringToDouble(str, end, flags, empty_string_val);
+ return InternalStringToDouble(scanner_constants, str, end, flags,
+ empty_string_val);
}
double StringToDouble(Vector<const char> str,
int flags,
double empty_string_val) {
+ ScannerConstants* scanner_constants =
+ Isolate::Current()->scanner_constants();
const char* end = str.start() + str.length();
- return InternalStringToDouble(str.start(), end, flags, empty_string_val);
+ return InternalStringToDouble(scanner_constants, str.start(), end, flags,
+ empty_string_val);
}
}
+static Mutex* dtoa_lock_one = OS::CreateMutex();
+static Mutex* dtoa_lock_zero = OS::CreateMutex();
+
+
} } // namespace v8::internal
+
+
+extern "C" {
+void ACQUIRE_DTOA_LOCK(int n) {
+ ASSERT(n == 0 || n == 1);
+ (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
+}
+
+
+void FREE_DTOA_LOCK(int n) {
+ ASSERT(n == 0 || n == 1);
+ (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
+ Unlock();
+}
+}
#include "v8.h"
#include "counters.h"
+#include "isolate.h"
#include "platform.h"
namespace v8 {
namespace internal {
-CounterLookupCallback StatsTable::lookup_function_ = NULL;
-CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
-AddHistogramSampleCallback StatsTable::add_histogram_sample_function_ = NULL;
+StatsTable::StatsTable()
+ : lookup_function_(NULL),
+ create_histogram_function_(NULL),
+ add_histogram_sample_function_(NULL) {}
+
+
+int* StatsCounter::FindLocationInStatsTable() const {
+ return Isolate::Current()->stats_table()->FindLocation(name_);
+}
+
// Start the timer.
void StatsCounterTimer::Start() {
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- StatsTable::AddHistogramSample(histogram_, milliseconds);
+ Isolate::Current()->stats_table()->
+ AddHistogramSample(histogram_, milliseconds);
}
}
+
+void* HistogramTimer::CreateHistogram() const {
+ return Isolate::Current()->stats_table()->
+ CreateHistogram(name_, 0, 10000, 50);
+}
+
} } // namespace v8::internal
// counters for monitoring. Counters can be looked up and
// manipulated by name.
-class StatsTable : public AllStatic {
+class StatsTable {
public:
// Register an application-defined function where
// counters can be looked up.
- static void SetCounterFunction(CounterLookupCallback f) {
+ void SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f;
}
// Register an application-defined function to create
// a histogram for passing to the AddHistogramSample function
- static void SetCreateHistogramFunction(CreateHistogramCallback f) {
+ void SetCreateHistogramFunction(CreateHistogramCallback f) {
create_histogram_function_ = f;
}
// Register an application-defined function to add a sample
// to a histogram created with CreateHistogram function
- static void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
+ void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
add_histogram_sample_function_ = f;
}
- static bool HasCounterFunction() {
+ bool HasCounterFunction() const {
return lookup_function_ != NULL;
}
// may receive a different location to store it's counter.
// The return value must not be cached and re-used across
// threads, although a single thread is free to cache it.
- static int* FindLocation(const char* name) {
+ int* FindLocation(const char* name) {
if (!lookup_function_) return NULL;
return lookup_function_(name);
}
// function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets
// that the samples will be grouped into.
- static void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
+ void* CreateHistogram(const char* name,
+ int min,
+ int max,
+ size_t buckets) {
if (!create_histogram_function_) return NULL;
return create_histogram_function_(name, min, max, buckets);
}
// Add a sample to a histogram created with the CreateHistogram
// function.
- static void AddHistogramSample(void* histogram, int sample) {
+ void AddHistogramSample(void* histogram, int sample) {
if (!add_histogram_sample_function_) return;
return add_histogram_sample_function_(histogram, sample);
}
private:
- static CounterLookupCallback lookup_function_;
- static CreateHistogramCallback create_histogram_function_;
- static AddHistogramSampleCallback add_histogram_sample_function_;
+ StatsTable();
+
+ CounterLookupCallback lookup_function_;
+ CreateHistogramCallback create_histogram_function_;
+ AddHistogramSampleCallback add_histogram_sample_function_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(StatsTable);
};
// StatsCounters are dynamically created values which can be tracked in
if (lookup_done_)
return ptr_;
lookup_done_ = true;
- ptr_ = StatsTable::FindLocation(name_);
+ ptr_ = FindLocationInStatsTable();
return ptr_;
}
+
+ private:
+ int* FindLocationInStatsTable() const;
};
// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
void* GetHistogram() {
if (!lookup_done_) {
lookup_done_ = true;
- histogram_ = StatsTable::CreateHistogram(name_, 0, 10000, 50);
+ histogram_ = CreateHistogram();
}
return histogram_;
}
+
+ private:
+ void* CreateHistogram() const;
};
// Helper class for scoping a HistogramTimer.
static const int kTickSamplesBufferChunksCount = 16;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
- : Thread("v8:ProfEvntProc"),
+ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
+ ProfileGenerator* generator)
+ : Thread(isolate, "v8:ProfEvntProc"),
generator_(generator),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
void ProfilerEventsProcessor::AddCurrentStack() {
TickSampleEventRecord record;
TickSample* sample = &record.sample;
- sample->state = Top::current_vm_state();
+ sample->state = Isolate::Current()->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
sample->tos = NULL;
sample->frames_count = 0;
}
-CpuProfiler* CpuProfiler::singleton_ = NULL;
-Atomic32 CpuProfiler::is_profiling_ = false;
-
void CpuProfiler::StartProfiling(const char* title) {
- ASSERT(singleton_ != NULL);
- singleton_->StartCollectingProfile(title);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
void CpuProfiler::StartProfiling(String* title) {
- ASSERT(singleton_ != NULL);
- singleton_->StartCollectingProfile(title);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
+ return is_profiling() ?
+ Isolate::Current()->cpu_profiler()->StopCollectingProfile(title) : NULL;
}
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
return is_profiling() ?
- singleton_->StopCollectingProfile(security_token, title) : NULL;
+ Isolate::Current()->cpu_profiler()->StopCollectingProfile(
+ security_token, title) : NULL;
}
int CpuProfiler::GetProfilesCount() {
- ASSERT(singleton_ != NULL);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
// The count of profiles doesn't depend on a security token.
- return singleton_->profiles_->Profiles(
+ return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
TokenEnumerator::kNoSecurityToken)->length();
}
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
- ASSERT(singleton_ != NULL);
- const int token = singleton_->token_enumerator_->GetTokenId(security_token);
- return singleton_->profiles_->Profiles(token)->at(index);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
+ const int token = profiler->token_enumerator_->GetTokenId(security_token);
+ return profiler->profiles_->Profiles(token)->at(index);
}
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
- ASSERT(singleton_ != NULL);
- const int token = singleton_->token_enumerator_->GetTokenId(security_token);
- return singleton_->profiles_->GetProfile(token, uid);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
+ const int token = profiler->token_enumerator_->GetTokenId(security_token);
+ return profiler->profiles_->GetProfile(token, uid);
}
-TickSample* CpuProfiler::TickSampleEvent() {
- if (CpuProfiler::is_profiling()) {
- return singleton_->processor_->TickSampleEvent();
+TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
+ if (CpuProfiler::is_profiling(isolate)) {
+ return isolate->cpu_profiler()->processor_->TickSampleEvent();
} else {
return NULL;
}
void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
- singleton_->processor_->CallbackCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag, comment, code->address(), code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
- Heap::empty_string(),
+ HEAP->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
Code* code,
SharedFunctionInfo* shared,
String* name) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
- Heap::empty_string(),
+ HEAP->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
shared->DebugName(),
source,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
args_count,
code->address(),
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
- singleton_->processor_->CodeMoveEvent(from, to);
+ Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
}
void CpuProfiler::CodeDeleteEvent(Address from) {
- singleton_->processor_->CodeDeleteEvent(from);
+ Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from);
}
void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
- singleton_->processor_->SharedFunctionInfoMoveEvent(from, to);
+ CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
+ profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
}
void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
- singleton_->processor_->CallbackCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "get ", name, entry_point);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- singleton_->processor_->RegExpCodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
Logger::REG_EXP_TAG,
"RegExp: ",
source,
void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
- singleton_->processor_->CallbackCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "set ", name, entry_point);
}
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(NULL),
- processor_(NULL) {
+ processor_(NULL),
+ is_profiling_(false) {
}
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
// Disable logging when using the new implementation.
- saved_logging_nesting_ = Logger::logging_nesting_;
- Logger::logging_nesting_ = 0;
+ saved_logging_nesting_ = LOGGER->logging_nesting_;
+ LOGGER->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
+ processor_ = new ProfilerEventsProcessor(Isolate::Current(), generator_);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
- if (Heap::HasBeenSetup()) {
+ if (HEAP->HasBeenSetup()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
- Logger::LogCodeObjects();
+ LOGGER->LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
- Logger::LogCompiledFunctions();
- Logger::LogAccessorCallbacks();
+ LOGGER->LogCompiledFunctions();
+ LOGGER->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
+ Sampler* sampler = reinterpret_cast<Sampler*>(LOGGER->ticker_);
if (!sampler->IsActive()) sampler->Start();
sampler->IncreaseProfilingDepth();
}
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
if (profiles_->IsLastProfile(title)) {
- Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
+ Sampler* sampler = reinterpret_cast<Sampler*>(LOGGER->ticker_);
sampler->DecreaseProfilingDepth();
sampler->Stop();
processor_->Stop();
processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL;
- Logger::logging_nesting_ = saved_logging_nesting_;
+ LOGGER->logging_nesting_ = saved_logging_nesting_;
}
}
void CpuProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (singleton_ == NULL) {
- singleton_ = new CpuProfiler();
+ Isolate* isolate = Isolate::Current();
+ if (isolate->cpu_profiler() == NULL) {
+ isolate->set_cpu_profiler(new CpuProfiler());
}
#endif
}
void CpuProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (singleton_ != NULL) {
- delete singleton_;
+ Isolate* isolate = Isolate::Current();
+ if (isolate->cpu_profiler() != NULL) {
+ delete isolate->cpu_profiler();
}
- singleton_ = NULL;
+ isolate->set_cpu_profiler(NULL);
#endif
}
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ explicit ProfilerEventsProcessor(Isolate* isolate,
+ ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
} } // namespace v8::internal
-#define PROFILE(Call) \
- LOG(Call); \
+#define PROFILE(isolate, Call) \
+ LOG(isolate, Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling()) { \
v8::internal::CpuProfiler::Call; \
} \
} while (false)
#else
-#define PROFILE(Call) LOG(Call)
+#define PROFILE(isolate, Call) LOG(isolate, Call)
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
namespace internal {
+
+// TODO(isolates): isolatify this class.
class CpuProfiler {
public:
static void Setup();
static CpuProfile* FindProfile(Object* security_token, unsigned uid);
// Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent();
+ static TickSample* TickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
static void SetterCallbackEvent(String* name, Address entry_point);
static void SharedFunctionInfoMoveEvent(Address from, Address to);
+ // TODO(isolates): this doesn't have to use atomics anymore.
+
static INLINE(bool is_profiling()) {
- return NoBarrier_Load(&is_profiling_);
+ return is_profiling(Isolate::Current());
+ }
+
+ static INLINE(bool is_profiling(Isolate* isolate)) {
+ CpuProfiler* profiler = isolate->cpu_profiler();
+ return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
}
private:
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
-
- static CpuProfiler* singleton_;
- static Atomic32 is_profiling_;
+ Atomic32 is_profiling_;
#else
static INLINE(bool is_profiling()) { return false; }
void RunRemoteDebugger(int port) {
- RemoteDebugger debugger(port);
+ RemoteDebugger debugger(i::Isolate::Current(), port);
debugger.Run();
}
}
// Start the receiver thread.
- ReceiverThread receiver(this);
+ ReceiverThread receiver(isolate_, this);
receiver.Start();
// Start the keyboard thread.
- KeyboardThread keyboard(this);
+ KeyboardThread keyboard(isolate_, this);
keyboard.Start();
PrintPrompt();
// Remote debugging class.
class RemoteDebugger {
public:
- explicit RemoteDebugger(int port)
+ RemoteDebugger(i::Isolate* isolate, int port)
: port_(port),
event_access_(i::OS::CreateMutex()),
event_available_(i::OS::CreateSemaphore(0)),
- head_(NULL), tail_(NULL) {}
+ head_(NULL), tail_(NULL), isolate_(isolate) {}
void Run();
// Handle events from the subordinate threads.
i::Semaphore* event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
+ i::Isolate* isolate_;
friend class ReceiverThread;
};
// Thread reading from debugged V8 instance.
class ReceiverThread: public i::Thread {
public:
- explicit ReceiverThread(RemoteDebugger* remote_debugger)
- : Thread("d8:ReceiverThrd"),
+ ReceiverThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
+ : Thread(isolate, "d8:ReceiverThrd"),
remote_debugger_(remote_debugger) {}
~ReceiverThread() {}
// Thread reading keyboard input.
class KeyboardThread: public i::Thread {
public:
- explicit KeyboardThread(RemoteDebugger* remote_debugger)
- : Thread("d8:KeyboardThrd"),
+ explicit KeyboardThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
+ : Thread(isolate, "d8:KeyboardThrd"),
remote_debugger_(remote_debugger) {}
~KeyboardThread() {}
#include <stdlib.h>
#include <errno.h>
+#include "v8.h"
+
#include "d8.h"
#include "d8-debug.h"
#include "debug.h"
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
- i::Factory::NewFixedArray(js_args.argc());
+ FACTORY->NewFixedArray(js_args.argc());
for (int j = 0; j < js_args.argc(); j++) {
i::Handle<i::String> arg =
- i::Factory::NewStringFromUtf8(i::CStrVector(js_args[j]));
+ FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
- i::Factory::NewJSArrayWithElements(arguments_array);
+ FACTORY->NewJSArrayWithElements(arguments_array);
global_template->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope
- i::Debug::Load();
- i::Handle<i::JSObject> debug
- = i::Handle<i::JSObject>(i::Debug::debug_context()->global());
+ i::Debug* debug = i::Isolate::Current()->debug();
+ debug->Load();
+ i::Handle<i::JSObject> js_debug
+ = i::Handle<i::JSObject>(debug->debug_context()->global());
utility_context_->Global()->Set(String::New("$debug"),
- Utils::ToLocal(debug));
+ Utils::ToLocal(js_debug));
#endif
// Run the d8 shell utility script in the utility context
#ifdef ENABLE_DEBUGGER_SUPPORT
// Set the security token of the debug context to allow access.
- i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
+ debug->debug_context()->set_security_token(HEAP->undefined_value());
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
class ShellThread : public i::Thread {
public:
- ShellThread(int no, i::Vector<const char> files)
- : Thread("d8:ShellThread"),
+ ShellThread(i::Isolate* isolate, int no, i::Vector<const char> files)
+ : Thread(isolate, "d8:ShellThread"),
no_(no), files_(files) { }
virtual void Run();
private:
const char* files = ReadChars(argv[++i], &size);
if (files == NULL) return 1;
ShellThread* thread =
- new ShellThread(threads.length(),
+ new ShellThread(i::Isolate::Current(),
+ threads.length(),
i::Vector<const char>(files, size));
thread->Start();
threads.Add(thread);
explicit BitVector(int length)
: length_(length),
data_length_(SizeFor(length)),
- data_(Zone::NewArray<uint32_t>(data_length_)) {
+ data_(ZONE->NewArray<uint32_t>(data_length_)) {
ASSERT(length > 0);
Clear();
}
BitVector(const BitVector& other)
: length_(other.length()),
data_length_(SizeFor(length_)),
- data_(Zone::NewArray<uint32_t>(data_length_)) {
+ data_(ZONE->NewArray<uint32_t>(data_length_)) {
CopyFrom(other);
}
explicit SparseSet(int universe_size)
: dense_(4),
- sparse_(Zone::NewArray<int>(universe_size)) {
+ sparse_(ZONE->NewArray<int>(universe_size)) {
#ifdef DEBUG
size_ = universe_size;
iterator_count_ = 0;
explicit InputReader(Vector<Char> s)
: index_(0),
buffer_(s),
- has_read_number_(false) {
+ has_read_number_(false),
+ scanner_constants_(Isolate::Current()->scanner_constants()) {
Next();
}
}
bool SkipWhiteSpace() {
- if (ScannerConstants::kIsWhiteSpace.get(ch_)) {
+ if (scanner_constants_->IsWhiteSpace(ch_)) {
Next();
return true;
}
Vector<Char> buffer_;
bool has_read_number_;
uint32_t ch_;
+ ScannerConstants* scanner_constants_;
};
enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent::instance_->DebuggerMessage(message);
+ DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
+ ASSERT(agent != NULL);
+ agent->DebuggerMessage(message);
}
-// static
-DebuggerAgent* DebuggerAgent::instance_ = NULL;
// Debugger agent main thread.
void DebuggerAgent::Run() {
listening_->Wait();
}
+static const char* kCreateSessionMessage =
+ "Remote debugging session already active\r\n";
+
void DebuggerAgent::CreateSession(Socket* client) {
ScopedLock with(session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
- static const char* message = "Remote debugging session already active\r\n";
-
- client->Send(message, StrLength(message));
+ client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
delete client;
return;
}
// Create a new session and hook up the debug message handler.
- session_ = new DebuggerAgentSession(this, client);
+ session_ = new DebuggerAgentSession(isolate(), this, client);
v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
session_->Start();
}
}
-const char* DebuggerAgentUtil::kContentLength = "Content-Length";
-int DebuggerAgentUtil::kContentLengthSize =
+const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
+const int DebuggerAgentUtil::kContentLengthSize =
StrLength(kContentLength);
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
- explicit DebuggerAgent(const char* name, int port)
- : Thread(name),
+ DebuggerAgent(Isolate* isolate, const char* name, int port)
+ : Thread(isolate, name),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
listening_(OS::CreateSemaphore(0)) {
- ASSERT(instance_ == NULL);
- instance_ = this;
+ ASSERT(Isolate::Current()->debugger_agent_instance() == NULL);
+ Isolate::Current()->set_debugger_agent_instance(this);
}
~DebuggerAgent() {
- instance_ = NULL;
+ Isolate::Current()->set_debugger_agent_instance(NULL);
delete server_;
}
Semaphore* terminate_now_; // Semaphore to signal termination.
Semaphore* listening_;
- static DebuggerAgent* instance_;
-
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
// debugger and sends debugger events/responses to the remote debugger.
class DebuggerAgentSession: public Thread {
public:
- DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
- : Thread("v8:DbgAgntSessn"),
+ DebuggerAgentSession(Isolate* isolate, DebuggerAgent* agent, Socket* client)
+ : Thread(isolate, "v8:DbgAgntSessn"),
agent_(agent), client_(client) {}
void DebuggerMessage(Vector<uint16_t> message);
// Utility methods factored out to be used by the D8 shell as well.
class DebuggerAgentUtil {
public:
- static const char* kContentLength;
- static int kContentLengthSize;
+ static const char* const kContentLength;
+ static const int kContentLengthSize;
static SmartPointer<char> ReceiveMessage(const Socket* conn);
static bool SendConnectMessage(const Socket* conn,
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
+
+
+Debug::Debug(Isolate* isolate)
+ : has_break_points_(false),
+ script_cache_(NULL),
+ debug_info_list_(NULL),
+ disable_break_(false),
+ break_on_exception_(false),
+ break_on_uncaught_exception_(false),
+ debug_break_return_(NULL),
+ debug_break_slot_(NULL),
+ isolate_(isolate) {
+ memset(registers_, 0, sizeof(JSCallerSavedBuffer));
+}
+
+
+Debug::~Debug() {
+}
+
+
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
ScopedVector<char> data(s->Length() + 1);
static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
- CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc, kind), Code);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate,
+ isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
+ Code);
}
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
+static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
+ Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
- StubCache::ComputeCallDebugPrepareStepIn(argc, kind), Code);
+ isolate,
+ isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
+ Code);
}
-static v8::Handle<v8::Context> GetDebugEventContext() {
- Handle<Context> context = Debug::debugger_entry()->GetContext();
- // Top::context() may have been NULL when "script collected" event occured.
- if (*context == NULL) {
- return v8::Local<v8::Context>();
- }
+static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
+ Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
+ // Isolate::context() may have been NULL when "script collected" event
+ // occured.
+ if (context.is_null()) return v8::Local<v8::Context>();
Handle<Context> global_context(context->global_context());
return v8::Utils::ToLocal(global_context);
}
}
-bool Debug::has_break_points_ = false;
-ScriptCache* Debug::script_cache_ = NULL;
-DebugInfoListNode* Debug::debug_info_list_ = NULL;
-
-
// Threading support.
void Debug::ThreadInit() {
thread_local_.break_count_ = 0;
thread_local_.step_into_fp_ = 0;
thread_local_.step_out_fp_ = 0;
thread_local_.after_break_target_ = 0;
+ // TODO(isolates): frames_are_dropped_?
thread_local_.debugger_entry_ = NULL;
thread_local_.pending_interrupts_ = 0;
thread_local_.restarter_frame_function_pointer_ = NULL;
}
-JSCallerSavedBuffer Debug::registers_;
-Debug::ThreadLocal Debug::thread_local_;
-
-
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
int Debug::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal) + sizeof(registers_);
+ return sizeof(ThreadLocal) + sizeof(JSCallerSavedBuffer);
}
const int Debug::kFrameDropperFrameSize = 4;
-
-
-
-// Default break enabled.
-bool Debug::disable_break_ = false;
-
-// Default call debugger on uncaught exception.
-bool Debug::break_on_exception_ = false;
-bool Debug::break_on_uncaught_exception_ = false;
-
-Handle<Context> Debug::debug_context_ = Handle<Context>();
-Code* Debug::debug_break_return_ = NULL;
-Code* Debug::debug_break_slot_ = NULL;
-
-
void ScriptCache::Add(Handle<Script> script) {
+ Isolate* isolate = Isolate::Current();
// Create an entry in the hash map for the script.
int id = Smi::cast(script->id())->value();
HashMap::Entry* entry =
// Globalize the script object, make it weak and use the location of the
// global handle as the value in the hash map.
Handle<Script> script_ =
- Handle<Script>::cast((GlobalHandles::Create(*script)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this, ScriptCache::HandleWeakScript);
+ Handle<Script>::cast(
+ (isolate->global_handles()->Create(*script)));
+ isolate->global_handles()->MakeWeak(
+ reinterpret_cast<Object**>(script_.location()),
+ this,
+ ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
Handle<FixedArray> ScriptCache::GetScripts() {
- Handle<FixedArray> instances = Factory::NewFixedArray(occupancy());
+ Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry->value != NULL);
void ScriptCache::ProcessCollectedScripts() {
+ Isolate* isolate = Isolate::Current();
for (int i = 0; i < collected_scripts_.length(); i++) {
- Debugger::OnScriptCollected(collected_scripts_[i]);
+ isolate->debugger()->OnScriptCollected(collected_scripts_[i]);
}
collected_scripts_.Clear();
}
void ScriptCache::Clear() {
+ Isolate* isolate = Isolate::Current();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
Object** location = reinterpret_cast<Object**>(entry->value);
ASSERT((*location)->IsScript());
- GlobalHandles::ClearWeakness(location);
- GlobalHandles::Destroy(location);
+ isolate->global_handles()->ClearWeakness(location);
+ isolate->global_handles()->Destroy(location);
}
// Clear the content of the hash map.
HashMap::Clear();
if (create_heap_objects) {
// Get code to handle debug break on return.
debug_break_return_ =
- Builtins::builtin(Builtins::Return_DebugBreak);
+ Isolate::Current()->builtins()->builtin(Builtins::Return_DebugBreak);
ASSERT(debug_break_return_->IsCode());
// Get code to handle debug break in debug break slots.
debug_break_slot_ =
- Builtins::builtin(Builtins::Slot_DebugBreak);
+ Isolate::Current()->builtins()->builtin(Builtins::Slot_DebugBreak);
ASSERT(debug_break_slot_->IsCode());
}
}
void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
+ Debug* debug = Isolate::Current()->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// Runtime::FindSharedFunctionInfoInScript.
BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
it.ClearAllDebugBreak();
- RemoveDebugInfo(node->debug_info());
+ debug->RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
- node = Debug::debug_info_list_;
+ node = debug->debug_info_list_;
while (node != NULL) {
ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
node = node->next();
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
+ Isolate* isolate = Isolate::Current();
// Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast((GlobalHandles::Create(debug_info)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
- this, Debug::HandleWeakDebugInfo);
+ debug_info_ = Handle<DebugInfo>::cast(
+ (isolate->global_handles()->Create(debug_info)));
+ isolate->global_handles()->MakeWeak(
+ reinterpret_cast<Object**>(debug_info_.location()),
+ this,
+ Debug::HandleWeakDebugInfo);
}
DebugInfoListNode::~DebugInfoListNode() {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+ Isolate::Current()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(debug_info_.location()));
}
}
// Find source and name for the requested script.
- Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ Handle<String> source_code =
+ Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name = Factory::NewStringFromAscii(name);
+ Handle<String> script_name = FACTORY->NewStringFromAscii(name);
// Compile the script.
Handle<SharedFunctionInfo> function_info;
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
- ASSERT(Top::has_pending_exception());
- Top::clear_pending_exception();
+ ASSERT(Isolate::Current()->has_pending_exception());
+ Isolate::Current()->clear_pending_exception();
return false;
}
// Execute the shared function in the debugger context.
- Handle<Context> context = Top::global_context();
+ Handle<Context> context = Isolate::Current()->global_context();
bool caught_exception = false;
Handle<JSFunction> function =
- Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
+ FACTORY->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> result =
Execution::TryCall(function, Handle<Object>(context->global()),
0, NULL, &caught_exception);
// Return if debugger is already loaded.
if (IsLoaded()) return true;
+ Isolate* isolate = Isolate::Current();
+
// Bail out if we're already in the process of compiling the native
// JavaScript source code for the debugger.
- if (Debugger::compiling_natives() || Debugger::is_loading_debugger())
+ if (isolate->debugger()->compiling_natives() ||
+ isolate->debugger()->is_loading_debugger())
return false;
- Debugger::set_loading_debugger(true);
+ isolate->debugger()->set_loading_debugger(true);
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
DisableBreak disable(true);
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
// Create the debugger context.
HandleScope scope;
Handle<Context> context =
- Bootstrapper::CreateEnvironment(Handle<Object>::null(),
- v8::Handle<ObjectTemplate>(),
- NULL);
+ isolate->bootstrapper()->CreateEnvironment(
+ Handle<Object>::null(),
+ v8::Handle<ObjectTemplate>(),
+ NULL);
// Use the debugger context.
- SaveContext save;
- Top::set_context(*context);
+ SaveContext save(isolate);
+ isolate->set_context(*context);
// Expose the builtins object in the debugger context.
- Handle<String> key = Factory::LookupAsciiSymbol("builtins");
+ Handle<String> key = FACTORY->LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
- Debugger::set_compiling_natives(true);
+ isolate->debugger()->set_compiling_natives(true);
bool caught_exception =
!CompileDebuggerScript(Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(Natives::GetIndex("debug"));
!CompileDebuggerScript(Natives::GetIndex("liveedit"));
}
- Debugger::set_compiling_natives(false);
+ isolate->debugger()->set_compiling_natives(false);
// Make sure we mark the debugger as not loading before we might
// return.
- Debugger::set_loading_debugger(false);
+ isolate->debugger()->set_loading_debugger(false);
// Check for caught exceptions.
if (caught_exception) return false;
DestroyScriptCache();
// Clear debugger context global handle.
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
+ Isolate::Current()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
}
}
-Object* Debug::Break(Arguments args) {
+// This remains a static method so that generated code can call it.
+Object* Debug::Break(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+
+ Debug* debug = isolate->debug();
+ Heap* heap = isolate->heap();
HandleScope scope;
ASSERT(args.length() == 0);
- thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
+ debug->thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
// Just continue if breaks are disabled or debugger cannot be loaded.
- if (disable_break() || !Load()) {
- SetAfterBreakTarget(frame);
- return Heap::undefined_value();
+ if (debug->disable_break() || !debug->Load()) {
+ debug->SetAfterBreakTarget(frame);
+ return heap->undefined_value();
}
// Enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
// Postpone interrupt during breakpoint processing.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
// Get the debug info (create it if it does not exist).
Handle<SharedFunctionInfo> shared =
break_location_iterator.FindBreakLocationFromAddress(frame->pc());
// Check whether step next reached a new statement.
- if (!StepNextContinue(&break_location_iterator, frame)) {
+ if (!debug->StepNextContinue(&break_location_iterator, frame)) {
// Decrease steps left if performing multiple steps.
- if (thread_local_.step_count_ > 0) {
- thread_local_.step_count_--;
+ if (debug->thread_local_.step_count_ > 0) {
+ debug->thread_local_.step_count_--;
}
}
// If there is one or more real break points check whether any of these are
// triggered.
- Handle<Object> break_points_hit(Heap::undefined_value());
+ Handle<Object> break_points_hit(heap->undefined_value());
if (break_location_iterator.HasBreakPoint()) {
Handle<Object> break_point_objects =
Handle<Object>(break_location_iterator.BreakPointObjects());
- break_points_hit = CheckBreakPoints(break_point_objects);
+ break_points_hit = debug->CheckBreakPoints(break_point_objects);
}
// If step out is active skip everything until the frame where we need to step
// out to is reached, unless real breakpoint is hit.
- if (Debug::StepOutActive() && frame->fp() != Debug::step_out_fp() &&
+ if (debug->StepOutActive() && frame->fp() != debug->step_out_fp() &&
break_points_hit->IsUndefined() ) {
// Step count should always be 0 for StepOut.
- ASSERT(thread_local_.step_count_ == 0);
+ ASSERT(debug->thread_local_.step_count_ == 0);
} else if (!break_points_hit->IsUndefined() ||
- (thread_local_.last_step_action_ != StepNone &&
- thread_local_.step_count_ == 0)) {
+ (debug->thread_local_.last_step_action_ != StepNone &&
+ debug->thread_local_.step_count_ == 0)) {
// Notify debugger if a real break point is triggered or if performing
// single stepping with no more steps to perform. Otherwise do another step.
// Clear all current stepping setup.
- ClearStepping();
+ debug->ClearStepping();
// Notify the debug event listeners.
- Debugger::OnDebugBreak(break_points_hit, false);
- } else if (thread_local_.last_step_action_ != StepNone) {
+ isolate->debugger()->OnDebugBreak(break_points_hit, false);
+ } else if (debug->thread_local_.last_step_action_ != StepNone) {
// Hold on to last step action as it is cleared by the call to
// ClearStepping.
- StepAction step_action = thread_local_.last_step_action_;
- int step_count = thread_local_.step_count_;
+ StepAction step_action = debug->thread_local_.last_step_action_;
+ int step_count = debug->thread_local_.step_count_;
// Clear all current stepping setup.
- ClearStepping();
+ debug->ClearStepping();
// Set up for the remaining steps.
- PrepareStep(step_action, step_count);
+ debug->PrepareStep(step_action, step_count);
}
- if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
- SetAfterBreakTarget(frame);
- } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_IC_CALL) {
+ if (debug->thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
+ debug->SetAfterBreakTarget(frame);
+ } else if (debug->thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_IC_CALL) {
// We must have been calling IC stub. Do not go there anymore.
- Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
+ Code* plain_return =
+ Isolate::Current()->builtins()->builtin(Builtins::PlainReturn_LiveEdit);
+ debug->thread_local_.after_break_target_ = plain_return->entry();
+ } else if (debug->thread_local_.frame_drop_mode_ ==
FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
// Debug break slot stub does not return normally, instead it manually
// cleans the stack and jumps. We should patch the jump address.
- Code* plain_return = Builtins::builtin(Builtins::FrameDropper_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_DIRECT_CALL) {
+ Code* plain_return = Isolate::Current()->builtins()->builtin(
+ Builtins::FrameDropper_LiveEdit);
+ debug->thread_local_.after_break_target_ = plain_return->entry();
+ } else if (debug->thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_DIRECT_CALL) {
// Nothing to do, after_break_target is not used here.
} else {
UNREACHABLE();
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
ASSERT(!break_point_objects->IsUndefined());
if (break_point_objects->IsFixedArray()) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
- break_points_hit = Factory::NewFixedArray(array->length());
+ break_points_hit = FACTORY->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) {
}
}
} else {
- break_points_hit = Factory::NewFixedArray(1);
+ break_points_hit = FACTORY->NewFixedArray(1);
if (CheckBreakPoint(break_point_objects)) {
break_points_hit->set(break_points_hit_count++, *break_point_objects);
}
// Return undefined if no break points were triggered.
if (break_points_hit_count == 0) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
// Return break points hit as a JSArray.
- Handle<JSArray> result = Factory::NewJSArrayWithElements(break_points_hit);
+ Handle<JSArray> result = FACTORY->NewJSArrayWithElements(break_points_hit);
result->set_length(Smi::FromInt(break_points_hit_count));
return result;
}
// Get the function IsBreakPointTriggered (defined in debug-debugger.js).
Handle<String> is_break_point_triggered_symbol =
- Factory::LookupAsciiSymbol("IsBreakPointTriggered");
+ FACTORY->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
// Get the break id as an object.
- Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
+ Handle<Object> break_id = FACTORY->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
bool caught_exception = false;
reinterpret_cast<Object**>(break_point_object.location())
};
Handle<Object> result = Execution::TryCall(check_break_point,
- Top::builtins(), argc, argv,
- &caught_exception);
+ Isolate::Current()->js_builtins_object(), argc, argv, &caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
}
// Return whether the break point is triggered.
- return *result == Heap::true_value();
+ ASSERT(!result.is_null());
+ return (*result)->IsTrue();
}
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Handle<Object> obj(
- Heap::code_stubs()->SlowReverseLookup(*call_function_stub));
- ASSERT(*obj != Heap::undefined_value());
+ HEAP->code_stubs()->SlowReverseLookup(*call_function_stub));
+ ASSERT(!obj.is_null());
+ ASSERT(!(*obj)->IsUndefined());
ASSERT(obj->IsSmi());
// Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(*obj)->value();
return ComputeCallDebugBreak(code->arguments_count(), code->kind());
case Code::LOAD_IC:
- return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+ return Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_DebugBreak));
case Code::STORE_IC:
- return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+ return Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_DebugBreak));
case Code::KEYED_LOAD_IC:
return Handle<Code>(
- Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_DebugBreak));
case Code::KEYED_STORE_IC:
return Handle<Code>(
- Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_DebugBreak));
default:
UNREACHABLE();
}
if (RelocInfo::IsConstructCall(mode)) {
Handle<Code> result =
- Handle<Code>(Builtins::builtin(Builtins::ConstructCall_DebugBreak));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::ConstructCall_DebugBreak));
return result;
}
if (code->kind() == Code::STUB) {
ASSERT(code->major_key() == CodeStub::CallFunction);
Handle<Code> result =
- Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::StubNoRegisters_DebugBreak));
return result;
}
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared) {
- if (!HasDebugInfo(shared)) return Handle<Object>(Heap::undefined_value());
+ if (!HasDebugInfo(shared)) return Handle<Object>(HEAP->undefined_value());
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(Heap::undefined_value());
+ return Handle<Object>(HEAP->undefined_value());
}
Handle<FixedArray> locations =
- Factory::NewFixedArray(debug_info->GetBreakPointCount());
+ FACTORY->NewFixedArray(debug_info->GetBreakPointCount());
int count = 0;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (!debug_info->break_points()->get(i)->IsUndefined()) {
// Flood the function with one-shot break points if it is called from where
// step into was requested.
- if (fp == Debug::step_in_fp()) {
+ if (fp == step_in_fp()) {
// Don't allow step into functions in the native context.
if (!function->IsBuiltin()) {
if (function->shared()->code() ==
- Builtins::builtin(Builtins::FunctionApply) ||
+ Isolate::Current()->builtins()->builtin(Builtins::FunctionApply) ||
function->shared()->code() ==
- Builtins::builtin(Builtins::FunctionCall)) {
+ Isolate::Current()->builtins()->builtin(Builtins::FunctionCall)) {
// Handle function.apply and function.call separately to flood the
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
}
// Create the debug info object.
- Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
+ Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
// Add debug info to the list.
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
} else {
prev->set_next(current->next());
}
- current->debug_info()->shared()->set_debug_info(Heap::undefined_value());
+ current->debug_info()->shared()->set_debug_info(HEAP->undefined_value());
delete current;
// If there are no more debug info objects there are not more break
Handle<Code> original_code(debug_info->original_code());
#ifdef DEBUG
// Get the code which is actually executing.
- Handle<Code> frame_code(frame->code());
+ Handle<Code> frame_code(frame->LookupCode(Isolate::Current()));
ASSERT(frame_code.is_identical_to(code));
#endif
Handle<Code> code(debug_info->code());
#ifdef DEBUG
// Get the code which is actually executing.
- Handle<Code> frame_code(frame->code());
+ Handle<Code> frame_code(frame->LookupCode(Isolate::Current()));
ASSERT(frame_code.is_identical_to(code));
#endif
bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == Debug::debug_context()->global();
+ return IsLoaded() && global == debug_context()->global();
}
void Debug::ClearMirrorCache() {
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate_);
HandleScope scope;
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(Isolate::Current()->context() == *Debug::debug_context());
// Clear the mirror cache.
Handle<String> function_name =
- Factory::LookupSymbol(CStrVector("ClearMirrorCache"));
- Handle<Object> fun(Top::global()->GetPropertyNoExceptionThrown(
+ FACTORY->LookupSymbol(CStrVector("ClearMirrorCache"));
+ Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
*function_name));
ASSERT(fun->IsJSFunction());
bool caught_exception;
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced.
- Heap::CollectAllGarbage(false);
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
// If the script cache is not active just return an empty array.
ASSERT(script_cache_ != NULL);
if (script_cache_ == NULL) {
- Factory::NewFixedArray(0);
+ FACTORY->NewFixedArray(0);
}
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Get the scripts from the cache.
return script_cache_->GetScripts();
}
-Mutex* Debugger::debugger_access_ = OS::CreateMutex();
-Handle<Object> Debugger::event_listener_ = Handle<Object>();
-Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
-bool Debugger::compiling_natives_ = false;
-bool Debugger::is_loading_debugger_ = false;
-bool Debugger::never_unload_debugger_ = false;
-v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
-bool Debugger::debugger_unload_pending_ = false;
-v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
-Mutex* Debugger::dispatch_handler_access_ = OS::CreateMutex();
-v8::Debug::DebugMessageDispatchHandler
- Debugger::debug_message_dispatch_handler_ = NULL;
-MessageDispatchHelperThread* Debugger::message_dispatch_helper_thread_ = NULL;
-int Debugger::host_dispatch_micros_ = 100 * 1000;
-DebuggerAgent* Debugger::agent_ = NULL;
-LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
-Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
-LockingCommandMessageQueue Debugger::event_command_queue_(kQueueInitialSize);
+Debugger::Debugger()
+ : debugger_access_(OS::CreateMutex()),
+ event_listener_(Handle<Object>()),
+ event_listener_data_(Handle<Object>()),
+ compiling_natives_(false),
+ is_loading_debugger_(false),
+ never_unload_debugger_(false),
+ message_handler_(NULL),
+ debugger_unload_pending_(false),
+ host_dispatch_handler_(NULL),
+ dispatch_handler_access_(OS::CreateMutex()),
+ debug_message_dispatch_handler_(NULL),
+ message_dispatch_helper_thread_(NULL),
+ host_dispatch_micros_(100 * 1000),
+ agent_(NULL),
+ command_queue_(kQueueInitialSize),
+ command_received_(OS::CreateSemaphore(0)),
+ event_command_queue_(kQueueInitialSize) {
+}
+
+
+Debugger::~Debugger() {
+ delete debugger_access_;
+ debugger_access_ = 0;
+ delete dispatch_handler_access_;
+ dispatch_handler_access_ = 0;
+ delete command_received_;
+ command_received_ = 0;
+}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
bool* caught_exception) {
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(Isolate::Current() == isolate_);
+ ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Create the execution state object.
- Handle<String> constructor_str = Factory::LookupSymbol(constructor_name);
- Handle<Object> constructor(Top::global()->GetPropertyNoExceptionThrown(
- *constructor_str));
+ Handle<String> constructor_str = FACTORY->LookupSymbol(constructor_name);
+ Handle<Object> constructor(
+ isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(Debug::debug_context()->global()), argc, argv,
- caught_exception);
+ Handle<JSObject>(isolate_->debug()->debug_context()->global()),
+ argc, argv, caught_exception);
return js_object;
}
Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// Create the execution state object.
- Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
+ Handle<Object> break_id = FACTORY->NewNumberFromInt(
+ isolate_->debug()->break_id());
const int argc = 1;
Object** argv[argc] = { break_id.location() };
return MakeJSObject(CStrVector("MakeExecutionState"),
Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit,
bool* caught_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// Create the new break event object.
const int argc = 2;
Object** argv[argc] = { exec_state.location(),
Handle<Object> exception,
bool uncaught,
bool* caught_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// Create the new exception event object.
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
exception.location(),
- uncaught ? Factory::true_value().location() :
- Factory::false_value().location()};
+ uncaught ? FACTORY->true_value().location() :
+ FACTORY->false_value().location()};
return MakeJSObject(CStrVector("MakeExceptionEvent"),
argc, argv, caught_exception);
}
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// Create the new function event object.
const int argc = 1;
Object** argv[argc] = { function.location() };
Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
bool before,
bool* caught_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
script_wrapper.location(),
- before ? Factory::true_value().location() :
- Factory::false_value().location() };
+ before ? FACTORY->true_value().location() :
+ FACTORY->false_value().location() };
return MakeJSObject(CStrVector("MakeCompileEvent"),
argc,
Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
bool* caught_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
void Debugger::OnException(Handle<Object> exception, bool uncaught) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// Bail out based on state or if there is no listener for this event
- if (Debug::InDebugger()) return;
+ if (isolate_->debug()->InDebugger()) return;
if (!Debugger::EventActive(v8::Exception)) return;
// Bail out if exception breaks are not active
if (uncaught) {
// Uncaught exceptions are reported by either flags.
- if (!(Debug::break_on_uncaught_exception() ||
- Debug::break_on_exception())) return;
+ if (!(isolate_->debug()->break_on_uncaught_exception() ||
+ isolate_->debug()->break_on_exception())) return;
} else {
// Caught exceptions are reported is activated.
- if (!Debug::break_on_exception()) return;
+ if (!isolate_->debug()->break_on_exception()) return;
}
// Enter the debugger.
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
- Debug::ClearStepping();
+ isolate_->debug()->ClearStepping();
// Create the event data object.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
bool auto_continue) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// Debugger has already been entered by caller.
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Bail out if there is no listener for this event
if (!Debugger::EventActive(v8::Break)) return;
// Debugger must be entered in advance.
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(Isolate::Current()->context() == *isolate_->debug()->debug_context());
// Create the event data object.
bool caught_exception = false;
void Debugger::OnBeforeCompile(Handle<Script> script) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// Bail out based on state or if there is no listener for this event
- if (Debug::InDebugger()) return;
+ if (isolate_->debug()->InDebugger()) return;
if (compiling_natives()) return;
if (!EventActive(v8::BeforeCompile)) return;
// Handle debugger actions when a new script is compiled.
void Debugger::OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// Add the newly compiled script to the script cache.
- Debug::AddScriptToScriptCache(script);
+ isolate_->debug()->AddScriptToScriptCache(script);
// No more to do if not debugging.
if (!IsDebuggerActive()) return;
if (compiling_natives()) return;
// Store whether in debugger before entering debugger.
- bool in_debugger = Debug::InDebugger();
+ bool in_debugger = isolate_->debug()->InDebugger();
// Enter the debugger.
EnterDebugger debugger;
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<String> update_script_break_points_symbol =
- Factory::LookupAsciiSymbol("UpdateScriptBreakPoints");
+ FACTORY->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
- Handle<Object>(Debug::debug_context()->global()->
+ Handle<Object>(isolate_->debug()->debug_context()->global()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
Handle<Object> result = Execution::TryCall(
Handle<JSFunction>::cast(update_script_break_points),
- Top::builtins(), argc, argv,
+ Isolate::Current()->js_builtins_object(), argc, argv,
&caught_exception);
if (caught_exception) {
return;
void Debugger::OnScriptCollected(int id) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// No more to do if not debugging.
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
bool auto_continue) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// Clear any pending debug break if this is a real break.
if (!auto_continue) {
- Debug::clear_interrupt_pending(DEBUGBREAK);
+ isolate_->debug()->clear_interrupt_pending(DEBUGBREAK);
}
// Create the execution state.
Handle<Object> exec_state,
Handle<Object> event_data) {
ASSERT(event_listener_->IsJSFunction());
+ ASSERT(Isolate::Current() == isolate_);
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
Handle<Object>::cast(event_data).location(),
event_listener_data_.location() };
bool caught_exception = false;
- Execution::TryCall(fun, Top::global(), argc, argv, &caught_exception);
+ Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
// Silently ignore exceptions from debug event listeners.
}
Handle<Context> Debugger::GetDebugContext() {
- never_unload_debugger_ = true;
- EnterDebugger debugger;
- return Debug::debug_context();
+ ASSERT(Isolate::Current() == isolate_);
+ never_unload_debugger_ = true;
+ EnterDebugger debugger;
+ return isolate_->debug()->debug_context();
}
void Debugger::UnloadDebugger() {
+ ASSERT(Isolate::Current() == isolate_);
+
// Make sure that there are no breakpoints left.
- Debug::ClearAllBreakPoints();
+ isolate_->debug()->ClearAllBreakPoints();
// Unload the debugger if feasible.
if (!never_unload_debugger_) {
- Debug::Unload();
+ isolate_->debug()->Unload();
}
// Clear the flag indicating that the debugger should be unloaded.
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
- if (!Debug::Load()) return;
+ if (!isolate_->debug()->Load()) return;
// Process the individual events.
bool sendEventMessage = false;
// The debug command interrupt flag might have been set when the command was
// added. It should be enough to clear the flag only once while we are in the
// debugger.
- ASSERT(Debug::InDebugger());
- StackGuard::Continue(DEBUGCOMMAND);
+ ASSERT(isolate_->debug()->InDebugger());
+ isolate_->stack_guard()->Continue(DEBUGCOMMAND);
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
// Get the command from the queue.
CommandMessage command = command_queue_.Get();
- Logger::DebugTag("Got request from command queue, in interactive loop.");
+ LOGGER->DebugTag("Got request from command queue, in interactive loop.");
if (!Debugger::IsDebuggerActive()) {
// Delete command text and user data.
command.Dispose();
void Debugger::SetEventListener(Handle<Object> callback,
Handle<Object> data) {
+ ASSERT(Isolate::Current() == isolate_);
HandleScope scope;
// Clear the global handles for the event listener and the event listener data
// object.
if (!event_listener_.is_null()) {
- GlobalHandles::Destroy(
+ isolate_->global_handles()->Destroy(
reinterpret_cast<Object**>(event_listener_.location()));
event_listener_ = Handle<Object>();
}
if (!event_listener_data_.is_null()) {
- GlobalHandles::Destroy(
+ isolate_->global_handles()->Destroy(
reinterpret_cast<Object**>(event_listener_data_.location()));
event_listener_data_ = Handle<Object>();
}
// If there is a new debug event listener register it together with its data
// object.
if (!callback->IsUndefined() && !callback->IsNull()) {
- event_listener_ = Handle<Object>::cast(GlobalHandles::Create(*callback));
+ event_listener_ = Handle<Object>::cast(
+ isolate_->global_handles()->Create(*callback));
if (data.is_null()) {
- data = Factory::undefined_value();
+ data = FACTORY->undefined_value();
}
- event_listener_data_ = Handle<Object>::cast(GlobalHandles::Create(*data));
+ event_listener_data_ = Handle<Object>::cast(
+ isolate_->global_handles()->Create(*data));
}
ListenersChanged();
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
+ ASSERT(Isolate::Current() == isolate_);
ScopedLock with(debugger_access_);
message_handler_ = handler;
if (handler == NULL) {
// Send an empty command to the debugger if in a break to make JavaScript
// run again if the debugger is closed.
- if (Debug::InDebugger()) {
+ if (isolate_->debug()->InDebugger()) {
ProcessCommand(Vector<const uint16_t>::empty());
}
}
void Debugger::ListenersChanged() {
+ Isolate* isolate = Isolate::Current();
if (IsDebuggerActive()) {
// Disable the compilation cache when the debugger is active.
- CompilationCache::Disable();
+ isolate->compilation_cache()->Disable();
debugger_unload_pending_ = false;
} else {
- CompilationCache::Enable();
+ isolate->compilation_cache()->Enable();
// Unload the debugger if event listener and message handler cleared.
// Schedule this for later, because we may be in non-V8 thread.
debugger_unload_pending_ = true;
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period) {
+ ASSERT(Isolate::Current() == isolate_);
host_dispatch_handler_ = handler;
host_dispatch_micros_ = period * 1000;
}
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
+ ASSERT(Isolate::Current() == isolate_);
ScopedLock with(dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
- message_dispatch_helper_thread_ = new MessageDispatchHelperThread;
+ message_dispatch_helper_thread_ = new MessageDispatchHelperThread(isolate_);
message_dispatch_helper_thread_->Start();
}
}
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
+ ASSERT(Isolate::Current() == isolate_);
ScopedLock with(debugger_access_);
if (message_handler_ != NULL) {
// by the API client thread.
void Debugger::ProcessCommand(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data) {
+ ASSERT(Isolate::Current() == isolate_);
// Need to cast away const.
CommandMessage message = CommandMessage::New(
Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
command.length()),
client_data);
- Logger::DebugTag("Put command on command_queue.");
+ LOGGER->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
command_received_->Signal();
// Set the debug command break flag to have the command processed.
- if (!Debug::InDebugger()) {
- StackGuard::DebugCommand();
+ if (!isolate_->debug()->InDebugger()) {
+ isolate_->stack_guard()->DebugCommand();
}
MessageDispatchHelperThread* dispatch_thread;
bool Debugger::HasCommands() {
+ ASSERT(Isolate::Current() == isolate_);
return !command_queue_.IsEmpty();
}
void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
+ ASSERT(Isolate::Current() == isolate_);
CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
event_command_queue_.Put(message);
// Set the debug command break flag to have the command processed.
- if (!Debug::InDebugger()) {
- StackGuard::DebugCommand();
+ if (!isolate_->debug()->InDebugger()) {
+ isolate_->stack_guard()->DebugCommand();
}
}
bool Debugger::IsDebuggerActive() {
+ ASSERT(Isolate::Current() == isolate_);
ScopedLock with(debugger_access_);
return message_handler_ != NULL || !event_listener_.is_null();
Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> data,
bool* pending_exception) {
+ ASSERT(Isolate::Current() == isolate_);
// When calling functions in the debugger prevent it from beeing unloaded.
Debugger::never_unload_debugger_ = true;
// Enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
// Create the execution state.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
if (caught_exception) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
static const int kArgc = 2;
Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> result = Execution::Call(
fun,
- Handle<Object>(Debug::debug_context_->global_proxy()),
+ Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
kArgc,
argv,
pending_exception);
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
+ ASSERT(Isolate::Current() == isolate_);
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
if (Socket::Setup()) {
if (agent_ == NULL) {
- agent_ = new DebuggerAgent(name, port);
+ agent_ = new DebuggerAgent(isolate_, name, port);
agent_->Start();
}
return true;
void Debugger::StopAgent() {
+ ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL) {
agent_->Shutdown();
agent_->Join();
void Debugger::WaitForAgent() {
+ ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL)
agent_->WaitUntilListening();
}
void Debugger::CallMessageDispatchHandler() {
+ ASSERT(Isolate::Current() == isolate_);
v8::Debug::DebugMessageDispatchHandler handler;
{
ScopedLock with(dispatch_handler_access_);
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- v8::Handle<v8::Context> context = GetDebugEventContext();
- // Top::context() may be NULL when "script collected" event occures.
+ Isolate* isolate = Isolate::Current();
+ v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
+ // Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
- return GetDebugEventContext();
+ return GetDebugEventContext(isolate);
}
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext();
+ return GetDebugEventContext(Isolate::Current());
}
CommandMessage LockingCommandMessageQueue::Get() {
ScopedLock sl(lock_);
CommandMessage result = queue_.Get();
- Logger::DebugEvent("Get", result.text());
+ LOGGER->DebugEvent("Get", result.text());
return result;
}
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
ScopedLock sl(lock_);
queue_.Put(message);
- Logger::DebugEvent("Put", message.text());
+ LOGGER->DebugEvent("Put", message.text());
}
}
-MessageDispatchHelperThread::MessageDispatchHelperThread()
- : Thread("v8:MsgDispHelpr"),
+MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
+ : Thread(isolate, "v8:MsgDispHelpr"),
sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
already_signalled_(false) {
}
}
{
Locker locker;
- Debugger::CallMessageDispatchHandler();
+ Isolate::Current()->debugger()->CallMessageDispatchHandler();
}
}
}
#ifndef V8_DEBUG_H_
#define V8_DEBUG_H_
+#include "arguments.h"
#include "assembler.h"
#include "debug-agent.h"
#include "execution.h"
DebugInfoListNode* next_;
};
-
// This class contains the debugger support. The main purpose is to handle
// setting break points in the code.
//
// DebugInfo.
class Debug {
public:
- static void Setup(bool create_heap_objects);
- static bool Load();
- static void Unload();
- static bool IsLoaded() { return !debug_context_.is_null(); }
- static bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
- static void PreemptionWhileInDebugger();
- static void Iterate(ObjectVisitor* v);
-
- static Object* Break(Arguments args);
- static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
- Handle<Object> break_point_object,
- int* source_position);
- static void ClearBreakPoint(Handle<Object> break_point_object);
- static void ClearAllBreakPoints();
- static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
- static void FloodHandlerWithOneShot();
- static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
- static bool IsBreakOnException(ExceptionBreakType type);
- static void PrepareStep(StepAction step_action, int step_count);
- static void ClearStepping();
- static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame);
+ void Setup(bool create_heap_objects);
+ bool Load();
+ void Unload();
+ bool IsLoaded() { return !debug_context_.is_null(); }
+ bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
+ void PreemptionWhileInDebugger();
+ void Iterate(ObjectVisitor* v);
+
+ static Object* Break(RUNTIME_CALLING_CONVENTION);
+ void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+ Handle<Object> break_point_object,
+ int* source_position);
+ void ClearBreakPoint(Handle<Object> break_point_object);
+ void ClearAllBreakPoints();
+ void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ void FloodHandlerWithOneShot();
+ void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+ bool IsBreakOnException(ExceptionBreakType type);
+ void PrepareStep(StepAction step_action, int step_count);
+ void ClearStepping();
+ bool StepNextContinue(BreakLocationIterator* break_location_iterator,
+ JavaScriptFrame* frame);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
// Returns whether the operation succeeded.
- static bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
+ bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
// Returns true if the current stub call is patched to call the debugger.
static bool IsDebugBreak(Address addr);
Handle<SharedFunctionInfo> shared);
// Getter for the debug_context.
- inline static Handle<Context> debug_context() { return debug_context_; }
+ inline Handle<Context> debug_context() { return debug_context_; }
// Check whether a global object is the debug global object.
- static bool IsDebugGlobal(GlobalObject* global);
+ bool IsDebugGlobal(GlobalObject* global);
// Check whether this frame is just about to return.
- static bool IsBreakAtReturn(JavaScriptFrame* frame);
+ bool IsBreakAtReturn(JavaScriptFrame* frame);
// Fast check to see if any break points are active.
- inline static bool has_break_points() { return has_break_points_; }
+ inline bool has_break_points() { return has_break_points_; }
- static void NewBreak(StackFrame::Id break_frame_id);
- static void SetBreak(StackFrame::Id break_frame_id, int break_id);
- static StackFrame::Id break_frame_id() {
+ void NewBreak(StackFrame::Id break_frame_id);
+ void SetBreak(StackFrame::Id break_frame_id, int break_id);
+ StackFrame::Id break_frame_id() {
return thread_local_.break_frame_id_;
}
- static int break_id() { return thread_local_.break_id_; }
+ int break_id() { return thread_local_.break_id_; }
- static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- static void HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor);
- static Address step_in_fp() { return thread_local_.step_into_fp_; }
- static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
+ bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
+ void HandleStepIn(Handle<JSFunction> function,
+ Handle<Object> holder,
+ Address fp,
+ bool is_constructor);
+ Address step_in_fp() { return thread_local_.step_into_fp_; }
+ Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
- static bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- static Address step_out_fp() { return thread_local_.step_out_fp_; }
+ bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
+ Address step_out_fp() { return thread_local_.step_out_fp_; }
- static EnterDebugger* debugger_entry() {
+ EnterDebugger* debugger_entry() {
return thread_local_.debugger_entry_;
}
- static void set_debugger_entry(EnterDebugger* entry) {
+ void set_debugger_entry(EnterDebugger* entry) {
thread_local_.debugger_entry_ = entry;
}
// Check whether any of the specified interrupts are pending.
- static bool is_interrupt_pending(InterruptFlag what) {
+ bool is_interrupt_pending(InterruptFlag what) {
return (thread_local_.pending_interrupts_ & what) != 0;
}
// Set specified interrupts as pending.
- static void set_interrupts_pending(InterruptFlag what) {
+ void set_interrupts_pending(InterruptFlag what) {
thread_local_.pending_interrupts_ |= what;
}
// Clear specified interrupts from pending.
- static void clear_interrupt_pending(InterruptFlag what) {
+ void clear_interrupt_pending(InterruptFlag what) {
thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
}
// Getter and setter for the disable break state.
- static bool disable_break() { return disable_break_; }
- static void set_disable_break(bool disable_break) {
+ bool disable_break() { return disable_break_; }
+ void set_disable_break(bool disable_break) {
disable_break_ = disable_break;
}
// Getters for the current exception break state.
- static bool break_on_exception() { return break_on_exception_; }
- static bool break_on_uncaught_exception() {
+ bool break_on_exception() { return break_on_exception_; }
+ bool break_on_uncaught_exception() {
return break_on_uncaught_exception_;
}
};
// Support for setting the address to jump to when returning from break point.
- static Address* after_break_target_address() {
+ Address* after_break_target_address() {
return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
}
- static Address* restarter_frame_function_pointer_address() {
+ Address* restarter_frame_function_pointer_address() {
Object*** address = &thread_local_.restarter_frame_function_pointer_;
return reinterpret_cast<Address*>(address);
}
// Support for saving/restoring registers when handling debug break calls.
- static Object** register_address(int r) {
+ Object** register_address(int r) {
return ®isters_[r];
}
// Access to the debug break on return code.
- static Code* debug_break_return() { return debug_break_return_; }
- static Code** debug_break_return_address() {
+ Code* debug_break_return() { return debug_break_return_; }
+ Code** debug_break_return_address() {
return &debug_break_return_;
}
// Access to the debug break in debug break slot code.
- static Code* debug_break_slot() { return debug_break_slot_; }
- static Code** debug_break_slot_address() {
+ Code* debug_break_slot() { return debug_break_slot_; }
+ Code** debug_break_slot_address() {
return &debug_break_slot_;
}
static const int kEstimatedNofDebugInfoEntries = 16;
static const int kEstimatedNofBreakPointsInFunction = 16;
+ // Passed to MakeWeak.
static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
friend class Debugger;
friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
// Threading support.
- static char* ArchiveDebug(char* to);
- static char* RestoreDebug(char* from);
+ char* ArchiveDebug(char* to);
+ char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
- static void FreeThreadResources() { }
+ void FreeThreadResources() { }
// Mirror cache handling.
- static void ClearMirrorCache();
+ void ClearMirrorCache();
// Script cache handling.
- static void CreateScriptCache();
- static void DestroyScriptCache();
- static void AddScriptToScriptCache(Handle<Script> script);
- static Handle<FixedArray> GetLoadedScripts();
+ void CreateScriptCache();
+ void DestroyScriptCache();
+ void AddScriptToScriptCache(Handle<Script> script);
+ Handle<FixedArray> GetLoadedScripts();
// Garbage collection notifications.
- static void AfterGarbageCollection();
+ void AfterGarbageCollection();
// Code generator routines.
static void GenerateSlot(MacroAssembler* masm);
FRAME_DROPPED_IN_DIRECT_CALL
};
- static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode,
Object** restarter_frame_function_pointer);
static const bool kFrameDropperSupported;
private:
+ explicit Debug(Isolate* isolate);
+ ~Debug();
+
static bool CompileDebuggerScript(int index);
- static void ClearOneShot();
- static void ActivateStepIn(StackFrame* frame);
- static void ClearStepIn();
- static void ActivateStepOut(StackFrame* frame);
- static void ClearStepOut();
- static void ClearStepNext();
+ void ClearOneShot();
+ void ActivateStepIn(StackFrame* frame);
+ void ClearStepIn();
+ void ActivateStepOut(StackFrame* frame);
+ void ClearStepOut();
+ void ClearStepNext();
// Returns whether the compile succeeded.
- static void RemoveDebugInfo(Handle<DebugInfo> debug_info);
- static void SetAfterBreakTarget(JavaScriptFrame* frame);
- static Handle<Object> CheckBreakPoints(Handle<Object> break_point);
- static bool CheckBreakPoint(Handle<Object> break_point_object);
+ void RemoveDebugInfo(Handle<DebugInfo> debug_info);
+ void SetAfterBreakTarget(JavaScriptFrame* frame);
+ Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+ bool CheckBreakPoint(Handle<Object> break_point_object);
// Global handle to debug context where all the debugger JavaScript code is
// loaded.
- static Handle<Context> debug_context_;
+ Handle<Context> debug_context_;
// Boolean state indicating whether any break points are set.
- static bool has_break_points_;
+ bool has_break_points_;
// Cache of all scripts in the heap.
- static ScriptCache* script_cache_;
+ ScriptCache* script_cache_;
// List of active debug info objects.
- static DebugInfoListNode* debug_info_list_;
+ DebugInfoListNode* debug_info_list_;
- static bool disable_break_;
- static bool break_on_exception_;
- static bool break_on_uncaught_exception_;
+ bool disable_break_;
+ bool break_on_exception_;
+ bool break_on_uncaught_exception_;
// Per-thread data.
class ThreadLocal {
};
// Storage location for registers when handling debug break calls
- static JSCallerSavedBuffer registers_;
- static ThreadLocal thread_local_;
- static void ThreadInit();
+ JSCallerSavedBuffer registers_;
+ ThreadLocal thread_local_;
+ void ThreadInit();
// Code to call for handling debug break on return.
- static Code* debug_break_return_;
+ Code* debug_break_return_;
// Code to call for handling debug break in debug break slots.
- static Code* debug_break_slot_;
+ Code* debug_break_slot_;
+
+ Isolate* isolate_;
+
+ friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(Debug);
};
class Debugger {
public:
- static void DebugRequest(const uint16_t* json_request, int length);
-
- static Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc, Object*** argv,
- bool* caught_exception);
- static Handle<Object> MakeExecutionState(bool* caught_exception);
- static Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception);
- static Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception);
- static Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
- bool* caught_exception);
- static Handle<Object> MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception);
- static Handle<Object> MakeScriptCollectedEvent(int id,
- bool* caught_exception);
- static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
- static void OnException(Handle<Object> exception, bool uncaught);
- static void OnBeforeCompile(Handle<Script> script);
+ ~Debugger();
+
+ void DebugRequest(const uint16_t* json_request, int length);
+
+ Handle<Object> MakeJSObject(Vector<const char> constructor_name,
+ int argc, Object*** argv,
+ bool* caught_exception);
+ Handle<Object> MakeExecutionState(bool* caught_exception);
+ Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
+ Handle<Object> break_points_hit,
+ bool* caught_exception);
+ Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
+ Handle<Object> exception,
+ bool uncaught,
+ bool* caught_exception);
+ Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
+ bool* caught_exception);
+ Handle<Object> MakeCompileEvent(Handle<Script> script,
+ bool before,
+ bool* caught_exception);
+ Handle<Object> MakeScriptCollectedEvent(int id,
+ bool* caught_exception);
+ void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
+ void OnException(Handle<Object> exception, bool uncaught);
+ void OnBeforeCompile(Handle<Script> script);
enum AfterCompileFlags {
NO_AFTER_COMPILE_FLAGS,
SEND_WHEN_DEBUGGING
};
- static void OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags);
- static void OnNewFunction(Handle<JSFunction> fun);
- static void OnScriptCollected(int id);
- static void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue);
- static void NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue);
- static void SetEventListener(Handle<Object> callback, Handle<Object> data);
- static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
- static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
- static void SetDebugMessageDispatchHandler(
+ void OnAfterCompile(Handle<Script> script,
+ AfterCompileFlags after_compile_flags);
+ void OnNewFunction(Handle<JSFunction> fun);
+ void OnScriptCollected(int id);
+ void ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ bool auto_continue);
+ void NotifyMessageHandler(v8::DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ bool auto_continue);
+ void SetEventListener(Handle<Object> callback, Handle<Object> data);
+ void SetMessageHandler(v8::Debug::MessageHandler2 handler);
+ void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
+ int period);
+ void SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler,
bool provide_locker);
// Invoke the message handler function.
- static void InvokeMessageHandler(MessageImpl message);
+ void InvokeMessageHandler(MessageImpl message);
// Add a debugger command to the command queue.
- static void ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data = NULL);
+ void ProcessCommand(Vector<const uint16_t> command,
+ v8::Debug::ClientData* client_data = NULL);
// Check whether there are commands in the command queue.
- static bool HasCommands();
+ bool HasCommands();
// Enqueue a debugger command to the command queue for event listeners.
- static void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
+ void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
- static Handle<Object> Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception);
+ Handle<Object> Call(Handle<JSFunction> fun,
+ Handle<Object> data,
+ bool* pending_exception);
// Start the debugger agent listening on the provided port.
- static bool StartAgent(const char* name, int port,
- bool wait_for_connection = false);
+ bool StartAgent(const char* name, int port,
+ bool wait_for_connection = false);
// Stop the debugger agent.
- static void StopAgent();
+ void StopAgent();
// Blocks until the agent has started listening for connections
- static void WaitForAgent();
+ void WaitForAgent();
- static void CallMessageDispatchHandler();
+ void CallMessageDispatchHandler();
- static Handle<Context> GetDebugContext();
+ Handle<Context> GetDebugContext();
// Unload the debugger if possible. Only called when no debugger is currently
// active.
- static void UnloadDebugger();
+ void UnloadDebugger();
friend void ForceUnloadDebugger(); // In test-debug.cc
- inline static bool EventActive(v8::DebugEvent event) {
+ inline bool EventActive(v8::DebugEvent event) {
ScopedLock with(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
- if (Debug::debugger_entry() == NULL) {
+ if (isolate_->debug()->debugger_entry() == NULL) {
UnloadDebugger();
}
}
return !compiling_natives_ && Debugger::IsDebuggerActive();
}
- static void set_compiling_natives(bool compiling_natives) {
+ void set_compiling_natives(bool compiling_natives) {
Debugger::compiling_natives_ = compiling_natives;
}
- static bool compiling_natives() { return Debugger::compiling_natives_; }
- static void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
- static bool is_loading_debugger() { return Debugger::is_loading_debugger_; }
+ bool compiling_natives() const { return compiling_natives_; }
+ void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
+ bool is_loading_debugger() const { return is_loading_debugger_; }
- static bool IsDebuggerActive();
+ bool IsDebuggerActive();
private:
- static void CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- static void CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- static void CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data);
- static void ListenersChanged();
-
- static Mutex* debugger_access_; // Mutex guarding debugger variables.
- static Handle<Object> event_listener_; // Global handle to listener.
- static Handle<Object> event_listener_data_;
- static bool compiling_natives_; // Are we compiling natives?
- static bool is_loading_debugger_; // Are we loading the debugger?
- static bool never_unload_debugger_; // Can we unload the debugger?
- static v8::Debug::MessageHandler2 message_handler_;
- static bool debugger_unload_pending_; // Was message handler cleared?
- static v8::Debug::HostDispatchHandler host_dispatch_handler_;
- static Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
- static v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
- static MessageDispatchHelperThread* message_dispatch_helper_thread_;
- static int host_dispatch_micros_;
-
- static DebuggerAgent* agent_;
+ Debugger();
+
+ void CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ void CallCEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ void CallJSEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data);
+ void ListenersChanged();
+
+ Mutex* debugger_access_; // Mutex guarding debugger variables.
+ Handle<Object> event_listener_; // Global handle to listener.
+ Handle<Object> event_listener_data_;
+ bool compiling_natives_; // Are we compiling natives?
+ bool is_loading_debugger_; // Are we loading the debugger?
+ bool never_unload_debugger_; // Can we unload the debugger?
+ v8::Debug::MessageHandler2 message_handler_;
+ bool debugger_unload_pending_; // Was message handler cleared?
+ v8::Debug::HostDispatchHandler host_dispatch_handler_;
+ Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
+ MessageDispatchHelperThread* message_dispatch_helper_thread_;
+ int host_dispatch_micros_;
+
+ DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
- static LockingCommandMessageQueue command_queue_;
- static Semaphore* command_received_; // Signaled for each command received.
+ LockingCommandMessageQueue command_queue_;
+ Semaphore* command_received_; // Signaled for each command received.
+ LockingCommandMessageQueue event_command_queue_;
- static LockingCommandMessageQueue event_command_queue_;
+ Isolate* isolate_;
friend class EnterDebugger;
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(Debugger);
};
class EnterDebugger BASE_EMBEDDED {
public:
EnterDebugger()
- : prev_(Debug::debugger_entry()),
- has_js_frames_(!it_.done()) {
- ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(DEBUGBREAK));
+ : isolate_(Isolate::Current()),
+ prev_(isolate_->debug()->debugger_entry()),
+ has_js_frames_(!it_.done()),
+ save_(isolate_) {
+ Debug* debug = isolate_->debug();
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
- Debug::set_debugger_entry(this);
+ debug->set_debugger_entry(this);
// Store the previous break id and frame id.
- break_id_ = Debug::break_id();
- break_frame_id_ = Debug::break_frame_id();
+ break_id_ = debug->break_id();
+ break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
- Debug::NewBreak(it_.frame()->id());
+ debug->NewBreak(it_.frame()->id());
} else {
- Debug::NewBreak(StackFrame::NO_ID);
+ debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !Debug::Load();
+ load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
- Top::set_context(*Debug::debug_context());
+ isolate_->set_context(*debug->debug_context());
}
}
~EnterDebugger() {
+ ASSERT(Isolate::Current() == isolate_);
+ Debug* debug = isolate_->debug();
+
// Restore to the previous break state.
- Debug::SetBreak(break_frame_id_, break_id_);
+ debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
- if (!Top::has_pending_exception()) {
+ if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
- if (StackGuard::IsDebugBreak()) {
- Debug::set_interrupts_pending(DEBUGBREAK);
- StackGuard::Continue(DEBUGBREAK);
+ if (isolate_->stack_guard()->IsDebugBreak()) {
+ debug->set_interrupts_pending(DEBUGBREAK);
+ isolate_->stack_guard()->Continue(DEBUGBREAK);
}
- Debug::ClearMirrorCache();
+ debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
- if (Debug::is_interrupt_pending(PREEMPT)) {
+ if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
- Debug::clear_interrupt_pending(PREEMPT);
- StackGuard::Preempt();
+ debug->clear_interrupt_pending(PREEMPT);
+ isolate_->stack_guard()->Preempt();
}
- if (Debug::is_interrupt_pending(DEBUGBREAK)) {
- Debug::clear_interrupt_pending(DEBUGBREAK);
- StackGuard::DebugBreak();
+ if (debug->is_interrupt_pending(DEBUGBREAK)) {
+ debug->clear_interrupt_pending(DEBUGBREAK);
+ isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
- if (Debugger::HasCommands()) {
- StackGuard::DebugCommand();
+ if (isolate_->debugger()->HasCommands()) {
+ isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
- if (!Debugger::IsDebuggerActive()) {
- Debugger::UnloadDebugger();
+ if (!isolate_->debugger()->IsDebuggerActive()) {
+ isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
- Debug::set_debugger_entry(prev_);
+ debug->set_debugger_entry(prev_);
}
// Check whether the debugger could be entered.
inline Handle<Context> GetContext() { return save_.context(); }
private:
+ Isolate* isolate_;
EnterDebugger* prev_; // Previous debugger entry if entered recursively.
JavaScriptFrameIterator it_;
const bool has_js_frames_; // Were there any JavaScript frames?
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(bool disable_break) {
- prev_disable_break_ = Debug::disable_break();
- Debug::set_disable_break(disable_break);
+ explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
+ prev_disable_break_ = isolate_->debug()->disable_break();
+ isolate_->debug()->set_disable_break(disable_break);
}
~DisableBreak() {
- Debug::set_disable_break(prev_disable_break_);
+ ASSERT(Isolate::Current() == isolate_);
+ isolate_->debug()->set_disable_break(prev_disable_break_);
}
private:
+ Isolate* isolate_;
// The previous state of the disable break used to restore the value when this
// object is destructed.
bool prev_disable_break_;
return Debug_Address(Debug::k_restarter_frame_function_pointer);
}
- Address address() const {
+ Address address(Isolate* isolate) const {
+ Debug* debug = isolate->debug();
switch (id_) {
case Debug::k_after_break_target_address:
- return reinterpret_cast<Address>(Debug::after_break_target_address());
+ return reinterpret_cast<Address>(debug->after_break_target_address());
case Debug::k_debug_break_return_address:
- return reinterpret_cast<Address>(Debug::debug_break_return_address());
+ return reinterpret_cast<Address>(debug->debug_break_return_address());
case Debug::k_debug_break_slot_address:
- return reinterpret_cast<Address>(Debug::debug_break_slot_address());
+ return reinterpret_cast<Address>(debug->debug_break_slot_address());
case Debug::k_restarter_frame_function_pointer:
return reinterpret_cast<Address>(
- Debug::restarter_frame_function_pointer_address());
+ debug->restarter_frame_function_pointer_address());
default:
UNREACHABLE();
return NULL;
// to do this via v8::Debug::HostDispatchHandler
class MessageDispatchHelperThread: public Thread {
public:
- MessageDispatchHelperThread();
+ explicit MessageDispatchHelperThread(Isolate* isolate);
~MessageDispatchHelperThread();
void Schedule();
namespace v8 {
namespace internal {
-LargeObjectChunk* Deoptimizer::eager_deoptimization_entry_code_ = NULL;
-LargeObjectChunk* Deoptimizer::lazy_deoptimization_entry_code_ = NULL;
-Deoptimizer* Deoptimizer::current_ = NULL;
-DeoptimizingCodeListNode* Deoptimizer::deoptimizing_code_list_ = NULL;
+DeoptimizerData::DeoptimizerData() {
+ eager_deoptimization_entry_code_ = NULL;
+ lazy_deoptimization_entry_code_ = NULL;
+ current_ = NULL;
+ deoptimizing_code_list_ = NULL;
+}
+DeoptimizerData::~DeoptimizerData() {
+ if (eager_deoptimization_entry_code_ != NULL) {
+ eager_deoptimization_entry_code_->Free(EXECUTABLE);
+ eager_deoptimization_entry_code_ = NULL;
+ }
+ if (lazy_deoptimization_entry_code_ != NULL) {
+ lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+ lazy_deoptimization_entry_code_ = NULL;
+ }
+}
+
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta) {
- Deoptimizer* deoptimizer =
- new Deoptimizer(function, type, bailout_id, from, fp_to_sp_delta);
- ASSERT(current_ == NULL);
- current_ = deoptimizer;
+ int fp_to_sp_delta,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ Deoptimizer* deoptimizer = new Deoptimizer(isolate,
+ function,
+ type,
+ bailout_id,
+ from,
+ fp_to_sp_delta);
+ ASSERT(isolate->deoptimizer_data()->current_ == NULL);
+ isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
}
-Deoptimizer* Deoptimizer::Grab() {
- Deoptimizer* result = current_;
+Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ Deoptimizer* result = isolate->deoptimizer_data()->current_;
ASSERT(result != NULL);
result->DeleteFrameDescriptions();
- current_ = NULL;
+ isolate->deoptimizer_data()->current_ = NULL;
return result;
}
AssertNoAllocation no_allocation;
// Run through the list of all global contexts and deoptimize.
- Object* global = Heap::global_contexts_list();
+ Object* global = Isolate::Current()->heap()->global_contexts_list();
while (!global->IsUndefined()) {
VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
visitor);
reinterpret_cast<DeoptimizingCodeListNode*>(data);
RemoveDeoptimizingCode(*node->code());
#ifdef DEBUG
- node = Deoptimizer::deoptimizing_code_list_;
+ node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
node = node->next();
}
-void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
+void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer,
+ Isolate* isolate) {
deoptimizer->DoComputeOutputFrames();
}
-Deoptimizer::Deoptimizer(JSFunction* function,
+Deoptimizer::Deoptimizer(Isolate* isolate,
+ JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta)
- : function_(function),
+ : isolate_(isolate),
+ function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
from_(from),
ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!optimized_code_->contains(from));
}
- ASSERT(Heap::allow_allocation(false));
+ ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
}
delete[] output_;
input_ = NULL;
output_ = NULL;
- ASSERT(!Heap::allow_allocation(true));
+ ASSERT(!HEAP->allow_allocation(true));
}
ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL;
LargeObjectChunk* base = NULL;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- if (eager_deoptimization_entry_code_ == NULL) {
- eager_deoptimization_entry_code_ = CreateCode(type);
+ if (data->eager_deoptimization_entry_code_ == NULL) {
+ data->eager_deoptimization_entry_code_ = CreateCode(type);
}
- base = eager_deoptimization_entry_code_;
+ base = data->eager_deoptimization_entry_code_;
} else {
- if (lazy_deoptimization_entry_code_ == NULL) {
- lazy_deoptimization_entry_code_ = CreateCode(type);
+ if (data->lazy_deoptimization_entry_code_ == NULL) {
+ data->lazy_deoptimization_entry_code_ = CreateCode(type);
}
- base = lazy_deoptimization_entry_code_;
+ base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
LargeObjectChunk* base = NULL;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- base = eager_deoptimization_entry_code_;
+ base = data->eager_deoptimization_entry_code_;
} else {
- base = lazy_deoptimization_entry_code_;
+ base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->GetStartAddress() ||
}
-void Deoptimizer::Setup() {
- // Do nothing yet.
-}
-
-
-void Deoptimizer::TearDown() {
- if (eager_deoptimization_entry_code_ != NULL) {
- eager_deoptimization_entry_code_->Free(EXECUTABLE);
- eager_deoptimization_entry_code_ = NULL;
- }
- if (lazy_deoptimization_entry_code_ != NULL) {
- lazy_deoptimization_entry_code_->Free(EXECUTABLE);
- lazy_deoptimization_entry_code_ = NULL;
- }
-}
-
-
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
unsigned id,
SharedFunctionInfo* shared) {
}
-int Deoptimizer::GetDeoptimizedCodeCount() {
+int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
- DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+ DeoptimizingCodeListNode* node =
+ isolate->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
length++;
node = node->next();
int tos_index = stack_index + extra_slot_count;
int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
- Handle<Object> num = Factory::NewNumber(val);
+ Handle<Object> num = isolate_->factory()->NewNumber(val);
frame->SetExpression(index, *num);
}
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- Heap::arguments_marker()->ShortPrint();
+ isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; arguments object\n");
}
- intptr_t value = reinterpret_cast<intptr_t>(Heap::arguments_marker());
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
output_[frame_index]->SetFrameSlot(output_offset, value);
return;
}
Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
- DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+ DeoptimizingCodeListNode* node =
+ Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
if (node->code()->contains(addr)) return *node->code();
node = node->next();
void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
- ASSERT(deoptimizing_code_list_ != NULL);
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ ASSERT(data->deoptimizing_code_list_ != NULL);
// Run through the code objects to find this one and remove it.
DeoptimizingCodeListNode* prev = NULL;
- DeoptimizingCodeListNode* current = deoptimizing_code_list_;
+ DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
while (current != NULL) {
if (*current->code() == code) {
// Unlink from list. If prev is NULL we are looking at the first element.
if (prev == NULL) {
- deoptimizing_code_list_ = current->next();
+ data->deoptimizing_code_list_ = current->next();
} else {
prev->set_next(current->next());
}
Handle<ByteArray> TranslationBuffer::CreateByteArray() {
int length = contents_.length();
- Handle<ByteArray> result = Factory::NewByteArray(length, TENURED);
+ Handle<ByteArray> result =
+ Isolate::Current()->factory()->NewByteArray(length, TENURED);
memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Globalize the code object and make it weak.
- code_ = Handle<Code>::cast((GlobalHandles::Create(code)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- Deoptimizer::HandleWeakDeoptimizedCode);
+ code_ = Handle<Code>::cast(global_handles->Create(code));
+ global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
+ this,
+ Deoptimizer::HandleWeakDeoptimizedCode);
}
DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(code_.location()));
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
}
};
+class Deoptimizer;
+
+
+class DeoptimizerData {
+ public:
+ DeoptimizerData();
+ ~DeoptimizerData();
+
+ private:
+ LargeObjectChunk* eager_deoptimization_entry_code_;
+ LargeObjectChunk* lazy_deoptimization_entry_code_;
+ Deoptimizer* current_;
+
+ // List of deoptimized code which still have references from active stack
+ // frames. These code objects are needed by the deoptimizer when deoptimizing
+ // a frame for which the code object for the function function has been
+ // changed from the code present when deoptimizing was done.
+ DeoptimizingCodeListNode* deoptimizing_code_list_;
+
+ friend class Deoptimizer;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
+};
+
+
class Deoptimizer : public Malloced {
public:
enum BailoutType {
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta);
- static Deoptimizer* Grab();
+ int fp_to_sp_delta,
+ Isolate* isolate);
+ static Deoptimizer* Grab(Isolate* isolate);
// Deoptimize the function now. Its current optimized code will never be run
// again and any activations of the optimized code will get deoptimized when
void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
- static void ComputeOutputFrames(Deoptimizer* deoptimizer);
+ static void ComputeOutputFrames(Deoptimizer* deoptimizer, Isolate* isolate);
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
unsigned node_id,
SharedFunctionInfo* shared);
- static void Setup();
- static void TearDown();
-
// Code generation support.
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
static int output_count_offset() {
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
- static int GetDeoptimizedCodeCount();
+ static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
private:
static const int kNumberOfEntries = 4096;
- Deoptimizer(JSFunction* function,
+ Deoptimizer(Isolate* isolate,
+ JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
- static LargeObjectChunk* eager_deoptimization_entry_code_;
- static LargeObjectChunk* lazy_deoptimization_entry_code_;
- static Deoptimizer* current_;
-
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- static DeoptimizingCodeListNode* deoptimizing_code_list_;
-
+ Isolate* isolate_;
JSFunction* function_;
Code* optimized_code_;
unsigned bailout_id_;
virtual const char* NameOfAddress(byte* addr) const;
virtual const char* NameOfConstant(byte* addr) const;
virtual const char* NameInCode(byte* addr) const;
+
+ protected:
+ v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
};
Code* code() const { return code_; }
private:
Code* code_;
+
+ EmbeddedVector<char, 128> v8_buffer_;
};
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- static v8::internal::EmbeddedVector<char, 128> buffer;
-
- const char* name = Builtins::Lookup(pc);
+ const char* name = Isolate::Current()->builtins()->Lookup(pc);
if (name != NULL) {
- OS::SNPrintF(buffer, "%s (%p)", name, pc);
- return buffer.start();
+ OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
+ return v8_buffer_.start();
}
if (code_ != NULL) {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(buffer, "%d (%p)", offs, pc);
- return buffer.start();
+ OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
+ return v8_buffer_.start();
}
}
NoHandleAllocation ha;
AssertNoAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
+ Heap* heap = HEAP;
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
} else if (kind == Code::STUB) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
- Object* obj = Heap::code_stubs()->SlowReverseLookup(code);
- if (obj != Heap::undefined_value()) {
+ Object* obj = heap->code_stubs()->SlowReverseLookup(code);
+ if (obj != heap->undefined_value()) {
ASSERT(obj->IsSmi());
// Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(obj)->value();
namespace internal {
+StackGuard::StackGuard()
+ : isolate_(NULL) {
+}
+
+
+void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
+ ASSERT(isolate_ != NULL);
+ // Ignore attempts to interrupt when interrupts are postponed.
+ if (should_postpone_interrupts(lock)) return;
+ thread_local_.jslimit_ = kInterruptLimit;
+ thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+}
+
+
+void StackGuard::reset_limits(const ExecutionAccess& lock) {
+ ASSERT(isolate_ != NULL);
+ thread_local_.jslimit_ = thread_local_.real_jslimit_;
+ thread_local_.climit_ = thread_local_.real_climit_;
+ isolate_->heap()->SetStackLimits();
+}
+
+
static Handle<Object> Invoke(bool construct,
Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Object*** args,
bool* has_pending_exception) {
+ Isolate* isolate = func->GetIsolate();
+
// Entering JavaScript.
- VMState state(JS);
+ VMState state(isolate, JS);
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
- SaveContext save;
+ SaveContext save(isolate);
NoHandleAllocation na;
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Update the pending exception flag and return the value.
*has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == Top::has_pending_exception());
+ ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception());
if (*has_pending_exception) {
- Top::ReportPendingMessages();
- if (Top::pending_exception() == Failure::OutOfMemoryException()) {
- if (!HandleScopeImplementer::instance()->ignore_out_of_memory()) {
+ isolate->ReportPendingMessages();
+ if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
+ if (!isolate->handle_scope_implementer()->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("JS", true);
}
}
return Handle<Object>();
} else {
- Top::clear_pending_message();
+ isolate->clear_pending_message();
}
- return Handle<Object>(value->ToObjectUnchecked());
+ return Handle<Object>(value->ToObjectUnchecked(), isolate);
}
Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
Object*** args, bool* pending_exception) {
- return Invoke(true, func, Top::global(), argc, args, pending_exception);
+ return Invoke(true, func, Isolate::Current()->global(), argc, args,
+ pending_exception);
}
if (*caught_exception) {
ASSERT(catcher.HasCaught());
- ASSERT(Top::has_pending_exception());
- ASSERT(Top::external_caught_exception());
- if (Top::pending_exception() == Heap::termination_exception()) {
- result = Factory::termination_exception();
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->has_pending_exception());
+ ASSERT(isolate->external_caught_exception());
+ if (isolate->pending_exception() ==
+ isolate->heap()->termination_exception()) {
+ result = isolate->factory()->termination_exception();
} else {
result = v8::Utils::OpenHandle(*catcher.Exception());
}
- Top::OptionalRescheduleException(true);
+ isolate->OptionalRescheduleException(true);
}
- ASSERT(!Top::has_pending_exception());
- ASSERT(!Top::external_caught_exception());
+ ASSERT(!Isolate::Current()->has_pending_exception());
+ ASSERT(!Isolate::Current()->external_caught_exception());
return result;
}
// Regular expressions can be called as functions in both Firefox
// and Safari so we allow it too.
if (object->IsJSRegExp()) {
- Handle<String> exec = Factory::exec_symbol();
+ Handle<String> exec = FACTORY->exec_symbol();
// TODO(lrn): Bug 617. We should use the default function here, not the
// one on the RegExp object.
Object* exec_function;
// This can lose an exception, but the alternative is to put a failure
// object in a handle, which is not GC safe.
if (!maybe_exec_function->ToObject(&exec_function)) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
}
return Handle<Object>(exec_function);
if (object->IsHeapObject() &&
HeapObject::cast(*object)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- Top::global_context()->call_as_function_delegate());
+ Isolate::Current()->global_context()->call_as_function_delegate());
}
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
if (object->IsHeapObject() &&
HeapObject::cast(*object)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- Top::global_context()->call_as_constructor_delegate());
+ Isolate::Current()->global_context()->call_as_constructor_delegate());
}
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
-// Static state for stack guards.
-StackGuard::ThreadLocal StackGuard::thread_local_;
-
-
bool StackGuard::IsStackOverflow() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
thread_local_.climit_ != kInterruptLimit);
}
void StackGuard::EnableInterrupts() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
if (has_pending_interrupts(access)) {
set_interrupt_limits(access);
}
void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
void StackGuard::DisableInterrupts() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
reset_limits(access);
}
bool StackGuard::IsInterrupted() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & INTERRUPT;
}
void StackGuard::Interrupt() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= INTERRUPT;
set_interrupt_limits(access);
}
bool StackGuard::IsPreempted() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & PREEMPT;
}
void StackGuard::Preempt() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= PREEMPT;
set_interrupt_limits(access);
}
bool StackGuard::IsTerminateExecution() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & TERMINATE;
}
void StackGuard::TerminateExecution() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= TERMINATE;
set_interrupt_limits(access);
}
bool StackGuard::IsRuntimeProfilerTick() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
}
void StackGuard::RequestRuntimeProfilerTick() {
// Ignore calls if we're not optimizing or if we can't get the lock.
- if (FLAG_opt && ExecutionAccess::TryLock()) {
+ if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- Heap::SetStackLimits();
+ isolate_->heap()->SetStackLimits();
}
- ExecutionAccess::Unlock();
+ ExecutionAccess::Unlock(isolate_);
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & DEBUGBREAK;
}
void StackGuard::DebugBreak() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= DEBUGBREAK;
set_interrupt_limits(access);
}
bool StackGuard::IsDebugCommand() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & DEBUGCOMMAND;
}
void StackGuard::DebugCommand() {
if (FLAG_debugger_auto_break) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
set_interrupt_limits(access);
}
#endif
void StackGuard::Continue(InterruptFlag after_what) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
reset_limits(access);
}
-int StackGuard::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal);
-}
-
-
char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
+
+ // Set the stack limits using the old thread_local_.
+ // TODO(isolates): This was the old semantics of constructing a ThreadLocal
+ // (as the ctor called SetStackLimits, which looked at the
+ // current thread_local_ from StackGuard)-- but is this
+ // really what was intended?
+ isolate_->heap()->SetStackLimits();
thread_local_ = blank;
+
return to + sizeof(ThreadLocal);
}
char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- Heap::SetStackLimits();
+ isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
-static internal::Thread::LocalStorageKey stack_limit_key =
- internal::Thread::CreateThreadLocalKey();
-
-
void StackGuard::FreeThreadResources() {
- Thread::SetThreadLocal(
- stack_limit_key,
- reinterpret_cast<void*>(thread_local_.real_climit_));
+ Isolate::CurrentPerIsolateThreadData()->set_stack_limit(
+ thread_local_.real_climit_);
}
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
- Heap::SetStackLimits();
}
-void StackGuard::ThreadLocal::Initialize() {
+bool StackGuard::ThreadLocal::Initialize() {
+ bool should_set_stack_limits = false;
if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
real_climit_ = limit;
climit_ = limit;
- Heap::SetStackLimits();
+ should_set_stack_limits = true;
}
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
+ return should_set_stack_limits;
}
void StackGuard::ClearThread(const ExecutionAccess& lock) {
thread_local_.Clear();
+ isolate_->heap()->SetStackLimits();
}
void StackGuard::InitThread(const ExecutionAccess& lock) {
- thread_local_.Initialize();
- void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+ if (thread_local_.Initialize()) isolate_->heap()->SetStackLimits();
+ uintptr_t stored_limit =
+ Isolate::CurrentPerIsolateThreadData()->stack_limit();
// You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != NULL) {
- StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+ if (stored_limit != 0) {
+ StackGuard::SetStackLimit(stored_limit);
}
}
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
- do { \
- Object** args[argc] = argv; \
- ASSERT(has_pending_exception != NULL); \
- return Call(Top::name##_fun(), Top::builtins(), argc, args, \
- has_pending_exception); \
+#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
+ do { \
+ Object** args[argc] = argv; \
+ ASSERT(has_pending_exception != NULL); \
+ return Call(Isolate::Current()->name##_fun(), \
+ Isolate::Current()->js_builtins_object(), argc, args, \
+ has_pending_exception); \
} while (false)
double value = obj->Number();
result = !((value == 0) || isnan(value));
}
- return Handle<Object>(Heap::ToBoolean(result));
+ return Handle<Object>(HEAP->ToBoolean(result));
}
Handle<Object> Execution::NewDate(double time, bool* exc) {
- Handle<Object> time_obj = Factory::NewNumber(time);
+ Handle<Object> time_obj = FACTORY->NewNumber(time);
RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
}
Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags,
bool* exc) {
+ Handle<JSFunction> function = Handle<JSFunction>(
+ pattern->GetIsolate()->global_context()->regexp_function());
Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
- Handle<JSFunction>(Top::global_context()->regexp_function()),
- pattern,
- flags,
- exc);
+ function, pattern, flags, exc);
if (*exc) return Handle<JSRegExp>();
return Handle<JSRegExp>::cast(re_obj);
}
Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
int int_index = static_cast<int>(index);
if (int_index < 0 || int_index >= string->length()) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
Handle<Object> char_at =
- GetProperty(Top::builtins(), Factory::char_at_symbol());
+ GetProperty(Isolate::Current()->js_builtins_object(),
+ FACTORY->char_at_symbol());
if (!char_at->IsJSFunction()) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
bool caught_exception;
- Handle<Object> index_object = Factory::NewNumberFromInt(int_index);
+ Handle<Object> index_object = FACTORY->NewNumberFromInt(int_index);
Object** index_arg[] = { index_object.location() };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string,
index_arg,
&caught_exception);
if (caught_exception) {
- return Factory::undefined_value();
+ return FACTORY->undefined_value();
}
return result;
}
Handle<FunctionTemplateInfo> data, bool* exc) {
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm = Top::global_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
+ Object* elm =
+ Isolate::Current()->global_context()->function_cache()->
+ GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
- Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+ Call(Isolate::Current()->instantiate_fun(),
+ Isolate::Current()->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result);
}
} else {
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
- Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+ Call(Isolate::Current()->instantiate_fun(),
+ Isolate::Current()->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result);
}
Handle<Object> instance_template,
bool* exc) {
Object** args[2] = { instance.location(), instance_template.location() };
- Execution::Call(Top::configure_instance_fun(), Top::builtins(), 2, args, exc);
+ Execution::Call(Isolate::Current()->configure_instance_fun(),
+ Isolate::Current()->js_builtins_object(), 2, args, exc);
}
pos.location(),
is_global.location() };
bool caught_exception = false;
- Handle<Object> result = TryCall(Top::get_stack_trace_line_fun(),
- Top::builtins(), argc, args,
- &caught_exception);
- if (caught_exception || !result->IsString()) return Factory::empty_symbol();
+ Handle<Object> result =
+ TryCall(Isolate::Current()->get_stack_trace_line_fun(),
+ Isolate::Current()->js_builtins_object(), argc, args,
+ &caught_exception);
+ if (caught_exception || !result->IsString()) return FACTORY->empty_symbol();
return Handle<String>::cast(result);
}
static Object* RuntimePreempt() {
+ Isolate* isolate = Isolate::Current();
+
// Clear the preempt request flag.
- StackGuard::Continue(PREEMPT);
+ isolate->stack_guard()->Continue(PREEMPT);
ContextSwitcher::PreemptionReceived();
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::InDebugger()) {
+ if (isolate->debug()->InDebugger()) {
// If currently in the debugger don't do any actual preemption but record
// that preemption occoured while in the debugger.
- Debug::PreemptionWhileInDebugger();
+ isolate->debug()->PreemptionWhileInDebugger();
} else {
// Perform preemption.
v8::Unlocker unlocker;
Thread::YieldCPU();
}
#else
- // Perform preemption.
- v8::Unlocker unlocker;
- Thread::YieldCPU();
+ { // NOLINT
+ // Perform preemption.
+ v8::Unlocker unlocker;
+ Thread::YieldCPU();
+ }
#endif
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* Execution::DebugBreakHelper() {
+ Isolate* isolate = Isolate::Current();
+
// Just continue if breaks are disabled.
- if (Debug::disable_break()) {
- return Heap::undefined_value();
+ if (isolate->debug()->disable_break()) {
+ return isolate->heap()->undefined_value();
}
// Ignore debug break during bootstrapping.
- if (Bootstrapper::IsActive()) {
- return Heap::undefined_value();
+ if (isolate->bootstrapper()->IsActive()) {
+ return isolate->heap()->undefined_value();
}
{
if (fun && fun->IsJSFunction()) {
// Don't stop in builtin functions.
if (JSFunction::cast(fun)->IsBuiltin()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
GlobalObject* global = JSFunction::cast(fun)->context()->global();
// Don't stop in debugger functions.
- if (Debug::IsDebugGlobal(global)) {
- return Heap::undefined_value();
+ if (isolate->debug()->IsDebugGlobal(global)) {
+ return isolate->heap()->undefined_value();
}
}
}
// Collect the break state before clearing the flags.
bool debug_command_only =
- StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
+ isolate->stack_guard()->IsDebugCommand() &&
+ !isolate->stack_guard()->IsDebugBreak();
// Clear the debug break request flag.
- StackGuard::Continue(DEBUGBREAK);
+ isolate->stack_guard()->Continue(DEBUGBREAK);
ProcessDebugMesssages(debug_command_only);
// Return to continue execution.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
void Execution::ProcessDebugMesssages(bool debug_command_only) {
// Clear the debug command request flag.
- StackGuard::Continue(DEBUGCOMMAND);
+ Isolate::Current()->stack_guard()->Continue(DEBUGCOMMAND);
HandleScope scope;
// Enter the debugger. Just continue if we fail to enter the debugger.
// Notify the debug event listeners. Indicate auto continue if the break was
// a debug command break.
- Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
+ Isolate::Current()->debugger()->OnDebugBreak(FACTORY->undefined_value(),
+ debug_command_only);
}
#endif
MaybeObject* Execution::HandleStackGuardInterrupt() {
- Counters::stack_interrupts.Increment();
- if (StackGuard::IsRuntimeProfilerTick()) {
- Counters::runtime_profiler_ticks.Increment();
- StackGuard::Continue(RUNTIME_PROFILER_TICK);
- RuntimeProfiler::OptimizeNow();
+ Isolate* isolate = Isolate::Current();
+ StackGuard* stack_guard = isolate->stack_guard();
+ isolate->counters()->stack_interrupts()->Increment();
+ if (stack_guard->IsRuntimeProfilerTick()) {
+ isolate->counters()->runtime_profiler_ticks()->Increment();
+ stack_guard->Continue(RUNTIME_PROFILER_TICK);
+ isolate->runtime_profiler()->OptimizeNow();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
+ if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();
}
#endif
- if (StackGuard::IsPreempted()) RuntimePreempt();
- if (StackGuard::IsTerminateExecution()) {
- StackGuard::Continue(TERMINATE);
- return Top::TerminateExecution();
+ if (stack_guard->IsPreempted()) RuntimePreempt();
+ if (stack_guard->IsTerminateExecution()) {
+ stack_guard->Continue(TERMINATE);
+ return isolate->TerminateExecution();
}
- if (StackGuard::IsInterrupted()) {
- StackGuard::Continue(INTERRUPT);
- return Top::StackOverflow();
+ if (stack_guard->IsInterrupted()) {
+ stack_guard->Continue(INTERRUPT);
+ return isolate->StackOverflow();
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
} } // namespace v8::internal
class ExecutionAccess;
+class Isolate;
// StackGuard contains the handling of the limits that are used to limit the
// number of nested invocations of JavaScript and the stack size used in each
// invocation.
-class StackGuard : public AllStatic {
+class StackGuard {
public:
// Pass the address beyond which the stack should not grow. The stack
// is assumed to grow downwards.
- static void SetStackLimit(uintptr_t limit);
+ void SetStackLimit(uintptr_t limit);
// Threading support.
- static char* ArchiveStackGuard(char* to);
- static char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread();
- static void FreeThreadResources();
+ char* ArchiveStackGuard(char* to);
+ char* RestoreStackGuard(char* from);
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
+ void FreeThreadResources();
// Sets up the default stack guard for this thread if it has not
// already been set up.
- static void InitThread(const ExecutionAccess& lock);
+ void InitThread(const ExecutionAccess& lock);
// Clears the stack guard for this thread so it does not look as if
// it has been set up.
- static void ClearThread(const ExecutionAccess& lock);
-
- static bool IsStackOverflow();
- static bool IsPreempted();
- static void Preempt();
- static bool IsInterrupted();
- static void Interrupt();
- static bool IsTerminateExecution();
- static void TerminateExecution();
- static bool IsRuntimeProfilerTick();
- static void RequestRuntimeProfilerTick();
+ void ClearThread(const ExecutionAccess& lock);
+
+ bool IsStackOverflow();
+ bool IsPreempted();
+ void Preempt();
+ bool IsInterrupted();
+ void Interrupt();
+ bool IsTerminateExecution();
+ void TerminateExecution();
+ bool IsRuntimeProfilerTick();
+ void RequestRuntimeProfilerTick();
#ifdef ENABLE_DEBUGGER_SUPPORT
- static bool IsDebugBreak();
- static void DebugBreak();
- static bool IsDebugCommand();
- static void DebugCommand();
+ bool IsDebugBreak();
+ void DebugBreak();
+ bool IsDebugCommand();
+ void DebugCommand();
#endif
- static void Continue(InterruptFlag after_what);
+ void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
- static uintptr_t climit() {
+ uintptr_t climit() {
return thread_local_.climit_;
}
- static uintptr_t real_climit() {
+ uintptr_t real_climit() {
return thread_local_.real_climit_;
}
- static uintptr_t jslimit() {
+ uintptr_t jslimit() {
return thread_local_.jslimit_;
}
- static uintptr_t real_jslimit() {
+ uintptr_t real_jslimit() {
return thread_local_.real_jslimit_;
}
- static Address address_of_jslimit() {
+ Address address_of_jslimit() {
return reinterpret_cast<Address>(&thread_local_.jslimit_);
}
- static Address address_of_real_jslimit() {
+ Address address_of_real_jslimit() {
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
}
private:
+ StackGuard();
+
// You should hold the ExecutionAccess lock when calling this method.
- static bool has_pending_interrupts(const ExecutionAccess& lock) {
+ bool has_pending_interrupts(const ExecutionAccess& lock) {
// Sanity check: We shouldn't be asking about pending interrupts
// unless we're not postponing them anymore.
ASSERT(!should_postpone_interrupts(lock));
}
// You should hold the ExecutionAccess lock when calling this method.
- static bool should_postpone_interrupts(const ExecutionAccess& lock) {
+ bool should_postpone_interrupts(const ExecutionAccess& lock) {
return thread_local_.postpone_interrupts_nesting_ > 0;
}
// You should hold the ExecutionAccess lock when calling this method.
- static void set_interrupt_limits(const ExecutionAccess& lock) {
- // Ignore attempts to interrupt when interrupts are postponed.
- if (should_postpone_interrupts(lock)) return;
- thread_local_.jslimit_ = kInterruptLimit;
- thread_local_.climit_ = kInterruptLimit;
- Heap::SetStackLimits();
- }
+ inline void set_interrupt_limits(const ExecutionAccess& lock);
// Reset limits to actual values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
- static void reset_limits(const ExecutionAccess& lock) {
- thread_local_.jslimit_ = thread_local_.real_jslimit_;
- thread_local_.climit_ = thread_local_.real_climit_;
- Heap::SetStackLimits();
- }
+ inline void reset_limits(const ExecutionAccess& lock);
// Enable or disable interrupts.
- static void EnableInterrupts();
- static void DisableInterrupts();
+ void EnableInterrupts();
+ void DisableInterrupts();
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
ThreadLocal() { Clear(); }
// You should hold the ExecutionAccess lock when you call Initialize or
// Clear.
- void Initialize();
void Clear();
+ // Returns true if the heap's stack limits should be set, false if not.
+ bool Initialize();
+
// The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and
// JavaScript stacks are separate. Each of the two stack limits have two
int interrupt_flags_;
};
- static ThreadLocal thread_local_;
+ // TODO(isolates): Technically this could be calculated directly from a
+ // pointer to StackGuard.
+ Isolate* isolate_;
+ ThreadLocal thread_local_;
+ friend class Isolate;
friend class StackLimitCheck;
friend class PostponeInterruptsScope;
-};
-
-// Support for checking for stack-overflows in C++ code.
-class StackLimitCheck BASE_EMBEDDED {
- public:
- bool HasOverflowed() const {
- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
- // stack guard and the limits are not set to interrupt values.
- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
- // code should probably always use the initial C++ limit.
- return (reinterpret_cast<uintptr_t>(this) < StackGuard::climit()) &&
- StackGuard::IsStackOverflow();
- }
+ DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope BASE_EMBEDDED {
- public:
- PostponeInterruptsScope() {
- StackGuard::thread_local_.postpone_interrupts_nesting_++;
- StackGuard::DisableInterrupts();
- }
-
- ~PostponeInterruptsScope() {
- if (--StackGuard::thread_local_.postpone_interrupts_nesting_ == 0) {
- StackGuard::EnableInterrupts();
- }
- }
-};
-
} } // namespace v8::internal
#endif // V8_EXECUTION_H_
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
- i::ExternalStringTable::AddString(*string);
+ HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
} else {
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
- i::ExternalStringTable::AddString(*string);
+ HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
}
if (args.Length() >= 1 && args[0]->IsBoolean()) {
compact = args[0]->BooleanValue();
}
- Heap::CollectAllGarbage(compact);
+ HEAP->CollectAllGarbage(compact);
return v8::Undefined();
}
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(Heap::AllocateFixedArray(size, pretenure), FixedArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFixedArray(size, pretenure),
+ FixedArray);
}
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
PretenureFlag pretenure) {
ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size, pretenure),
- FixedArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFixedArrayWithHoles(size, pretenure),
+ FixedArray);
}
Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(StringDictionary::Allocate(at_least_space_for),
+ CALL_HEAP_FUNCTION(isolate(),
+ StringDictionary::Allocate(at_least_space_for),
StringDictionary);
}
Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(NumberDictionary::Allocate(at_least_space_for),
+ CALL_HEAP_FUNCTION(isolate(),
+ NumberDictionary::Allocate(at_least_space_for),
NumberDictionary);
}
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
- CALL_HEAP_FUNCTION(DescriptorArray::Allocate(number_of_descriptors),
+ CALL_HEAP_FUNCTION(isolate(),
+ DescriptorArray::Allocate(number_of_descriptors),
DescriptorArray);
}
int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(DeoptimizationInputData::Allocate(deopt_entry_count,
+ CALL_HEAP_FUNCTION(isolate(),
+ DeoptimizationInputData::Allocate(deopt_entry_count,
pretenure),
DeoptimizationInputData);
}
int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(DeoptimizationOutputData::Allocate(deopt_entry_count,
+ CALL_HEAP_FUNCTION(isolate(),
+ DeoptimizationOutputData::Allocate(deopt_entry_count,
pretenure),
DeoptimizationOutputData);
}
// Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
- CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupSymbol(string),
+ String);
}
Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
- CALL_HEAP_FUNCTION(Heap::LookupAsciiSymbol(string), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupAsciiSymbol(string),
+ String);
}
Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
- CALL_HEAP_FUNCTION(Heap::LookupTwoByteSymbol(string), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupTwoByteSymbol(string),
+ String);
}
Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromAscii(string, pretenure), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStringFromAscii(string, pretenure),
+ String);
}
Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromUtf8(string, pretenure), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStringFromUtf8(string, pretenure),
+ String);
}
Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromTwoByte(string, pretenure),
- String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStringFromTwoByte(string, pretenure),
+ String);
}
Handle<String> Factory::NewRawAsciiString(int length,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(length, pretenure), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateRawAsciiString(length, pretenure),
+ String);
}
Handle<String> Factory::NewRawTwoByteString(int length,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(length, pretenure), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
+ String);
}
Handle<String> Factory::NewConsString(Handle<String> first,
Handle<String> second) {
- CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateConsString(*first, *second),
+ String);
}
Handle<String> Factory::NewSubString(Handle<String> str,
int begin,
int end) {
- CALL_HEAP_FUNCTION(str->SubString(begin, end), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ str->SubString(begin, end),
+ String);
}
Handle<String> Factory::NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
- CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromAscii(resource), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExternalStringFromAscii(resource),
+ String);
}
Handle<String> Factory::NewExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
- CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromTwoByte(resource), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
+ String);
}
Handle<Context> Factory::NewGlobalContext() {
- CALL_HEAP_FUNCTION(Heap::AllocateGlobalContext(), Context);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateGlobalContext(),
+ Context);
}
Handle<Context> Factory::NewFunctionContext(int length,
Handle<JSFunction> closure) {
- CALL_HEAP_FUNCTION(Heap::AllocateFunctionContext(length, *closure), Context);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunctionContext(length, *closure),
+ Context);
}
Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<JSObject> extension,
bool is_catch_context) {
- CALL_HEAP_FUNCTION(Heap::AllocateWithContext(*previous,
- *extension,
- is_catch_context),
- Context);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateWithContext(*previous,
+ *extension,
+ is_catch_context),
+ Context);
}
Handle<Struct> Factory::NewStruct(InstanceType type) {
- CALL_HEAP_FUNCTION(Heap::AllocateStruct(type), Struct);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStruct(type),
+ Struct);
}
Handle<Script> Factory::NewScript(Handle<String> source) {
// Generate id for this script.
int id;
- if (Heap::last_script_id()->IsUndefined()) {
+ Heap* heap = isolate()->heap();
+ if (heap->last_script_id()->IsUndefined()) {
// Script ids start from one.
id = 1;
} else {
// Increment id, wrap when positive smi is exhausted.
- id = Smi::cast(Heap::last_script_id())->value();
+ id = Smi::cast(heap->last_script_id())->value();
id++;
if (!Smi::IsValid(id)) {
id = 0;
}
}
- Heap::SetLastScriptId(Smi::FromInt(id));
+ heap->SetLastScriptId(Smi::FromInt(id));
// Create and initialize script object.
- Handle<Proxy> wrapper = Factory::NewProxy(0, TENURED);
+ Handle<Proxy> wrapper = NewProxy(0, TENURED);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
script->set_source(*source);
- script->set_name(Heap::undefined_value());
- script->set_id(Heap::last_script_id());
+ script->set_name(heap->undefined_value());
+ script->set_id(heap->last_script_id());
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
- script->set_data(Heap::undefined_value());
- script->set_context_data(Heap::undefined_value());
+ script->set_data(heap->undefined_value());
+ script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_wrapper(*wrapper);
- script->set_line_ends(Heap::undefined_value());
- script->set_eval_from_shared(Heap::undefined_value());
+ script->set_line_ends(heap->undefined_value());
+ script->set_eval_from_shared(heap->undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));
return script;
Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateProxy(addr, pretenure), Proxy);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateProxy(addr, pretenure),
+ Proxy);
}
Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(Heap::AllocateByteArray(length, pretenure), ByteArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateByteArray(length, pretenure),
+ ByteArray);
}
void* external_pointer,
PretenureFlag pretenure) {
ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(Heap::AllocateExternalArray(length,
- array_type,
- external_pointer,
- pretenure), ExternalArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExternalArray(length,
+ array_type,
+ external_pointer,
+ pretenure),
+ ExternalArray);
}
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalPropertyCell(*value),
- JSGlobalPropertyCell);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
+ JSGlobalPropertyCell);
}
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
- CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateMap(type, instance_size),
+ Map);
}
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(Heap::AllocateFunctionPrototype(*function), JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunctionPrototype(*function),
+ JSObject);
}
Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->CopyDropDescriptors(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->CopyDropDescriptors(), Map);
}
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map);
}
Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->GetFastElementsMap(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
}
Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->GetSlowElementsMap(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
}
Handle<Map> Factory::NewExternalArrayElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->NewExternalArrayElementsMap(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->NewExternalArrayElementsMap(), Map);
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
}
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
- *function_info,
- Heap::the_hole_value(),
- pretenure),
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunction(*function_map,
+ *function_info,
+ isolate()->heap()->the_hole_value(),
+ pretenure),
JSFunction);
}
Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
function_info,
function_info->strict_mode()
- ? Top::strict_mode_function_map()
- : Top::function_map(),
+ ? isolate()->strict_mode_function_map()
+ : isolate()->function_map(),
pretenure);
result->set_context(*context);
int number_of_literals = function_info->num_literals();
- Handle<FixedArray> literals =
- Factory::NewFixedArray(number_of_literals, pretenure);
+ Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
context->global_context());
}
result->set_literals(*literals);
- result->set_next_function_link(Heap::undefined_value());
+ result->set_next_function_link(isolate()->heap()->undefined_value());
if (V8::UseCrankshaft() &&
FLAG_always_opt &&
Handle<Object> Factory::NewNumber(double value,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::NumberFromDouble(value, pretenure), Object);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->NumberFromDouble(value, pretenure), Object);
}
Handle<Object> Factory::NewNumberFromInt(int value) {
- CALL_HEAP_FUNCTION(Heap::NumberFromInt32(value), Object);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->NumberFromInt32(value), Object);
}
Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
- CALL_HEAP_FUNCTION(Heap::NumberFromUint32(value), Object);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->NumberFromUint32(value), Object);
}
Handle<JSObject> Factory::NewNeanderObject() {
- CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(Heap::neander_map()),
- JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObjectFromMap(
+ isolate()->heap()->neander_map()),
+ JSObject);
}
Handle<Object> Factory::NewError(const char* maker, const char* type,
Vector< Handle<Object> > args) {
v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
- Handle<FixedArray> array = Factory::NewFixedArray(args.length());
+ Handle<FixedArray> array = NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
array->set(i, *args[i]);
}
- Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
+ Handle<JSArray> object = NewJSArrayWithElements(array);
Handle<Object> result = NewError(maker, type, object);
return result.EscapeFrom(&scope);
}
Handle<Object> Factory::NewError(const char* maker,
const char* type,
Handle<JSArray> args) {
- Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
- Handle<Object> fun_obj(Top::builtins()->GetPropertyNoExceptionThrown(
- *make_str));
+ Handle<String> make_str = LookupAsciiSymbol(maker);
+ Handle<Object> fun_obj(
+ isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction())
- return Factory::undefined_value();
+ return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
+ Handle<Object> type_obj = LookupAsciiSymbol(type);
Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() };
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- Top::builtins(),
- 2,
- argv,
- &caught_exception);
+ isolate()->js_builtins_object(), 2, argv, &caught_exception);
return result;
}
Handle<Object> Factory::NewError(const char* constructor,
Handle<String> message) {
- Handle<String> constr = Factory::LookupAsciiSymbol(constructor);
- Handle<JSFunction> fun =
- Handle<JSFunction>(
- JSFunction::cast(
- Top::builtins()->GetPropertyNoExceptionThrown(*constr)));
+ Handle<String> constr = LookupAsciiSymbol(constructor);
+ Handle<JSFunction> fun = Handle<JSFunction>(
+ JSFunction::cast(isolate()->js_builtins_object()->
+ GetPropertyNoExceptionThrown(*constr)));
Object** argv[1] = { Handle<Object>::cast(message).location() };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- Top::builtins(),
- 1,
- argv,
- &caught_exception);
+ isolate()->js_builtins_object(), 1, argv, &caught_exception);
return result;
}
// property that refers to the function.
SetPrototypeProperty(function, prototype);
// Currently safe because it is only invoked from Genesis.
- SetLocalPropertyNoThrow(
- prototype, Factory::constructor_symbol(), function, DONT_ENUM);
+ SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
return function;
}
Code::Flags flags,
Handle<Object> self_ref,
bool immovable) {
- CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref, immovable), Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CreateCode(
+ desc, flags, self_ref, immovable),
+ Code);
}
Handle<Code> Factory::CopyCode(Handle<Code> code) {
- CALL_HEAP_FUNCTION(Heap::CopyCode(*code), Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyCode(*code),
+ Code);
}
Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
- CALL_HEAP_FUNCTION(Heap::CopyCode(*code, reloc_info), Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyCode(*code, reloc_info),
+ Code);
}
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(DoCopyInsert(*array, *key, *value, attributes),
+ CALL_HEAP_FUNCTION(isolate(),
+ DoCopyInsert(*array, *key, *value, attributes),
DescriptorArray);
}
Handle<String> Factory::SymbolFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(Heap::LookupSymbol(*value), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupSymbol(*value), String);
}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSObject(*constructor, pretenure), JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
}
Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(Heap::AllocateGlobalObject(*constructor),
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateGlobalObject(*constructor),
GlobalObject);
}
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, NOT_TENURED),
- JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
+ JSObject);
}
Handle<JSArray> Factory::NewJSArray(int capacity,
PretenureFlag pretenure) {
- Handle<JSObject> obj = NewJSObject(Top::array_function(), pretenure);
- CALL_HEAP_FUNCTION(Handle<JSArray>::cast(obj)->Initialize(capacity), JSArray);
+ Handle<JSObject> obj = NewJSObject(isolate()->array_function(), pretenure);
+ CALL_HEAP_FUNCTION(isolate(),
+ Handle<JSArray>::cast(obj)->Initialize(capacity),
+ JSArray);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
PretenureFlag pretenure) {
Handle<JSArray> result =
- Handle<JSArray>::cast(NewJSObject(Top::array_function(), pretenure));
+ Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
+ pretenure));
result->SetContent(*elements);
return result;
}
Handle<Object> script,
Handle<Object> stack_trace,
Handle<Object> stack_frames) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSMessageObject(*type,
- *arguments,
- start_position,
- end_position,
- *script,
- *stack_trace,
- *stack_frames),
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateJSMessageObject(*type,
+ *arguments,
+ start_position,
+ end_position,
+ *script,
+ *stack_trace,
+ *stack_frames),
JSMessageObject);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
- CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateSharedFunctionInfo(*name),
SharedFunctionInfo);
}
Handle<String> Factory::NumberToString(Handle<Object> number) {
- CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->NumberToString(*number), String);
}
Handle<NumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
- CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), NumberDictionary);
+ CALL_HEAP_FUNCTION(isolate(),
+ dictionary->AtNumberPut(key, *value),
+ NumberDictionary);
}
Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<Object> prototype) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- CALL_HEAP_FUNCTION(Heap::AllocateFunction(*Top::function_map(),
- *function_share,
- *prototype),
- JSFunction);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunction(*isolate()->function_map(),
+ *function_share,
+ *prototype),
+ JSFunction);
}
Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<Object> prototype) {
Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(Top::context()->global_context());
+ fun->set_context(isolate()->context()->global_context());
return fun;
}
StrictModeFlag strict_mode) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
Handle<Map> map = strict_mode == kStrictMode
- ? Top::strict_mode_function_without_prototype_map()
- : Top::function_without_prototype_map();
- CALL_HEAP_FUNCTION(Heap::AllocateFunction(
+ ? isolate()->strict_mode_function_without_prototype_map()
+ : isolate()->function_without_prototype_map();
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFunction(
*map,
*function_share,
*the_hole_value()),
Handle<String> name,
StrictModeFlag strict_mode) {
Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
- fun->set_context(Top::context()->global_context());
+ fun->set_context(isolate()->context()->global_context());
return fun;
}
Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(object->ToObject(), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
}
Handle<Object> Factory::ToObject(Handle<Object> object,
Handle<Context> global_context) {
- CALL_HEAP_FUNCTION(object->ToObject(*global_context), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(*global_context), Object);
}
// debug info object to avoid allocation while setting up the debug info
// object.
Handle<FixedArray> break_points(
- Factory::NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
+ NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
// Create and set up the debug info object. Debug info contains function, a
// copy of the original code, the executing code and initial fixed array for
// active break points.
Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(Factory::NewStruct(DEBUG_INFO_TYPE));
+ Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
debug_info->set_shared(*shared);
debug_info->set_original_code(*original_code);
debug_info->set_code(*code);
Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
int length) {
- CALL_HEAP_FUNCTION(Heap::AllocateArgumentsObject(*callee, length), JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateArgumentsObject(*callee, length), JSObject);
}
Handle<JSFunction> Factory::CreateApiFunction(
Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::HandleApiCall));
+ Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
+ Builtins::HandleApiCall));
Handle<Code> construct_stub =
- Handle<Code>(Builtins::builtin(Builtins::JSConstructStubApi));
+ Handle<Code>(isolate()->builtins()->builtin(
+ Builtins::JSConstructStubApi));
int internal_field_count = 0;
if (!obj->instance_template()->IsUndefined()) {
ASSERT(type != INVALID_TYPE);
Handle<JSFunction> result =
- Factory::NewFunction(Factory::empty_symbol(),
- type,
- instance_size,
- code,
- true);
+ NewFunction(Factory::empty_symbol(),
+ type,
+ instance_size,
+ code,
+ true);
// Set class name.
Handle<Object> class_name = Handle<Object>(obj->class_name());
if (class_name->IsString()) {
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
- array = Factory::CopyAppendCallbackDescriptors(array, props);
+ array = CopyAppendCallbackDescriptors(array, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
- CALL_HEAP_FUNCTION(MapCache::Allocate(at_least_space_for), MapCache);
+ CALL_HEAP_FUNCTION(isolate(),
+ MapCache::Allocate(at_least_space_for), MapCache);
}
Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map) {
- CALL_HEAP_FUNCTION(UpdateMapCacheWith(*context, *keys, *map), MapCache);
+ CALL_HEAP_FUNCTION(isolate(),
+ UpdateMapCacheWith(*context, *keys, *map), MapCache);
}
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kIrregexpASCIICodeIndex, Heap::the_hole_value());
- store->set(JSRegExp::kIrregexpUC16CodeIndex, Heap::the_hole_value());
+ store->set(JSRegExp::kIrregexpASCIICodeIndex, HEAP->the_hole_value());
+ store->set(JSRegExp::kIrregexpUC16CodeIndex, HEAP->the_hole_value());
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
store->set(JSRegExp::kIrregexpCaptureCountIndex,
Smi::FromInt(capture_count));
#define V8_FACTORY_H_
#include "globals.h"
+#include "handles.h"
#include "heap.h"
namespace v8 {
// Interface for handle based allocation.
-class Factory : public AllStatic {
+class Factory {
public:
// Allocate a new fixed array with undefined entries.
- static Handle<FixedArray> NewFixedArray(
+ Handle<FixedArray> NewFixedArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed array with non-existing entries (the hole).
- static Handle<FixedArray> NewFixedArrayWithHoles(
+ Handle<FixedArray> NewFixedArrayWithHoles(
int size,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
+ Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
- static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+ Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
- static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
- static Handle<DeoptimizationInputData> NewDeoptimizationInputData(
+ Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+ Handle<DeoptimizationInputData> NewDeoptimizationInputData(
int deopt_entry_count,
PretenureFlag pretenure);
- static Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
+ Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure);
- static Handle<String> LookupSymbol(Vector<const char> str);
- static Handle<String> LookupAsciiSymbol(Vector<const char> str);
- static Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
- static Handle<String> LookupAsciiSymbol(const char* str) {
+ Handle<String> LookupSymbol(Vector<const char> str);
+ Handle<String> LookupAsciiSymbol(Vector<const char> str);
+ Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
+ Handle<String> LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
// two byte.
//
// ASCII strings are pretenured when used as keys in the SourceCodeCache.
- static Handle<String> NewStringFromAscii(
+ Handle<String> NewStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
- static Handle<String> NewStringFromUtf8(
+ Handle<String> NewStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<String> NewStringFromTwoByte(
+ Handle<String> NewStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- static Handle<String> NewRawAsciiString(
+ Handle<String> NewRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<String> NewRawTwoByteString(
+ Handle<String> NewRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
// Create a new cons string object which consists of a pair of strings.
- static Handle<String> NewConsString(Handle<String> first,
- Handle<String> second);
+ Handle<String> NewConsString(Handle<String> first,
+ Handle<String> second);
// Create a new string object which holds a substring of a string.
- static Handle<String> NewSubString(Handle<String> str,
- int begin,
- int end);
+ Handle<String> NewSubString(Handle<String> str,
+ int begin,
+ int end);
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer.
- static Handle<String> NewExternalStringFromAscii(
+ Handle<String> NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
- static Handle<String> NewExternalStringFromTwoByte(
+ Handle<String> NewExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
- static Handle<Context> NewGlobalContext();
+ Handle<Context> NewGlobalContext();
// Create a function context.
- static Handle<Context> NewFunctionContext(int length,
- Handle<JSFunction> closure);
+ Handle<Context> NewFunctionContext(int length,
+ Handle<JSFunction> closure);
// Create a 'with' context.
- static Handle<Context> NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context);
+ Handle<Context> NewWithContext(Handle<Context> previous,
+ Handle<JSObject> extension,
+ bool is_catch_context);
// Return the Symbol matching the passed in string.
- static Handle<String> SymbolFromString(Handle<String> value);
+ Handle<String> SymbolFromString(Handle<String> value);
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
- static Handle<Struct> NewStruct(InstanceType type);
+ Handle<Struct> NewStruct(InstanceType type);
- static Handle<AccessorInfo> NewAccessorInfo();
+ Handle<AccessorInfo> NewAccessorInfo();
- static Handle<Script> NewScript(Handle<String> source);
+ Handle<Script> NewScript(Handle<String> source);
// Proxies are pretenured when allocated by the bootstrapper.
- static Handle<Proxy> NewProxy(Address addr,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Proxy> NewProxy(Address addr,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocate a new proxy. The proxy is pretenured (allocated directly in
// the old generation).
- static Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
+ Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
- static Handle<ByteArray> NewByteArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<ByteArray> NewByteArray(int length,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<ExternalArray> NewExternalArray(
+ Handle<ExternalArray> NewExternalArray(
int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
+ Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
- static Handle<Map> NewMap(InstanceType type, int instance_size);
+ Handle<Map> NewMap(InstanceType type, int instance_size);
- static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+ Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
- static Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
+ Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without
// overflowing the instance size.
- static Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
+ Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
- static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+ Handle<Map> CopyMapDropTransitions(Handle<Map> map);
- static Handle<Map> GetFastElementsMap(Handle<Map> map);
+ Handle<Map> GetFastElementsMap(Handle<Map> map);
- static Handle<Map> GetSlowElementsMap(Handle<Map> map);
+ Handle<Map> GetSlowElementsMap(Handle<Map> map);
- static Handle<Map> NewExternalArrayElementsMap(Handle<Map> map);
+ Handle<Map> NewExternalArrayElementsMap(Handle<Map> map);
- static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.
- static Handle<Object> NewNumber(double value,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumber(double value,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<Object> NewNumberFromInt(int value);
- static Handle<Object> NewNumberFromUint(uint32_t value);
+ Handle<Object> NewNumberFromInt(int value);
+ Handle<Object> NewNumberFromUint(uint32_t value);
// These objects are used by the api to create env-independent data
// structures in the heap.
- static Handle<JSObject> NewNeanderObject();
+ Handle<JSObject> NewNeanderObject();
- static Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
+ Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
+ PretenureFlag pretenure = NOT_TENURED);
// Global objects are pretenured.
- static Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
+ Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
+ Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS arrays are pretenured when allocated by the parser.
- static Handle<JSArray> NewJSArray(int capacity,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(int capacity,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<JSArray> NewJSArrayWithElements(
+ Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<JSFunction> NewFunction(Handle<String> name,
- Handle<Object> prototype);
+ Handle<JSFunction> NewFunction(Handle<String> name,
+ Handle<Object> prototype);
- static Handle<JSFunction> NewFunctionWithoutPrototype(
+ Handle<JSFunction> NewFunctionWithoutPrototype(
Handle<String> name,
StrictModeFlag strict_mode);
- static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
+ Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
- static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
+ Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
PretenureFlag pretenure);
- static Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
- static Handle<Code> NewCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
+ Handle<Code> NewCode(const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false);
- static Handle<Code> CopyCode(Handle<Code> code);
+ Handle<Code> CopyCode(Handle<Code> code);
- static Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
+ Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
- static Handle<Object> ToObject(Handle<Object> object);
- static Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> global_context);
+ Handle<Object> ToObject(Handle<Object> object);
+ Handle<Object> ToObject(Handle<Object> object,
+ Handle<Context> global_context);
// Interface for creating error objects.
- static Handle<Object> NewError(const char* maker, const char* type,
- Handle<JSArray> args);
- static Handle<Object> NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewError(Handle<String> message);
- static Handle<Object> NewError(const char* constructor,
- Handle<String> message);
+ Handle<Object> NewError(const char* maker, const char* type,
+ Handle<JSArray> args);
+ Handle<Object> NewError(const char* maker, const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewError(Handle<String> message);
+ Handle<Object> NewError(const char* constructor,
+ Handle<String> message);
- static Handle<Object> NewTypeError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewTypeError(Handle<String> message);
+ Handle<Object> NewTypeError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewTypeError(Handle<String> message);
- static Handle<Object> NewRangeError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewRangeError(Handle<String> message);
+ Handle<Object> NewRangeError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewRangeError(Handle<String> message);
- static Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
- static Handle<Object> NewSyntaxError(Handle<String> message);
+ Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
+ Handle<Object> NewSyntaxError(Handle<String> message);
- static Handle<Object> NewReferenceError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewReferenceError(Handle<String> message);
+ Handle<Object> NewReferenceError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewReferenceError(Handle<String> message);
- static Handle<Object> NewEvalError(const char* type,
- Vector< Handle<Object> > args);
+ Handle<Object> NewEvalError(const char* type,
+ Vector< Handle<Object> > args);
- static Handle<JSFunction> NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map);
+ Handle<JSFunction> NewFunction(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<Code> code,
+ bool force_initial_map);
- static Handle<JSFunction> NewFunction(Handle<Map> function_map,
+ Handle<JSFunction> NewFunction(Handle<Map> function_map,
Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
- static Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map);
+ Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<JSObject> prototype,
+ Handle<Code> code,
+ bool force_initial_map);
- static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code);
+ Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
+ Handle<Code> code);
- static Handle<DescriptorArray> CopyAppendProxyDescriptor(
+ Handle<DescriptorArray> CopyAppendProxyDescriptor(
Handle<DescriptorArray> array,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes);
- static Handle<String> NumberToString(Handle<Object> number);
+ Handle<String> NumberToString(Handle<Object> number);
enum ApiInstanceType {
JavaScriptObject,
OuterGlobalObject
};
- static Handle<JSFunction> CreateApiFunction(
+ Handle<JSFunction> CreateApiFunction(
Handle<FunctionTemplateInfo> data,
ApiInstanceType type = JavaScriptObject);
- static Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
+ Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
// Installs interceptors on the instance. 'desc' is a function template,
// and instance is an object instance created by the function of this
// function template.
- static void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception);
+ void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
+ Handle<JSObject> instance,
+ bool* pending_exception);
#define ROOT_ACCESSOR(type, name, camel_name) \
- static inline Handle<type> name() { \
+ inline Handle<type> name() { \
return Handle<type>(BitCast<type**>( \
- &Heap::roots_[Heap::k##camel_name##RootIndex])); \
+ &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) \
- static inline Handle<String> name() { \
+#define SYMBOL_ACCESSOR(name, str) \
+ inline Handle<String> name() { \
return Handle<String>(BitCast<String**>( \
- &Heap::roots_[Heap::k##name##RootIndex])); \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
- static Handle<String> hidden_symbol() {
- return Handle<String>(&Heap::hidden_symbol_);
+ Handle<String> hidden_symbol() {
+ return Handle<String>(&isolate()->heap()->hidden_symbol_);
}
- static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
Handle<Code> code,
Handle<SerializedScopeInfo> scope_info);
- static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
- static Handle<JSMessageObject> NewJSMessageObject(
+ Handle<JSMessageObject> NewJSMessageObject(
Handle<String> type,
Handle<JSArray> arguments,
int start_position,
Handle<Object> stack_trace,
Handle<Object> stack_frames);
- static Handle<NumberDictionary> DictionaryAtNumberPut(
+ Handle<NumberDictionary> DictionaryAtNumberPut(
Handle<NumberDictionary>,
uint32_t key,
Handle<Object> value);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
+ Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
// Return a map using the map cache in the global context.
// The key the an ordered set of property names.
- static Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys);
+ Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
+ Handle<FixedArray> keys);
// Creates a new FixedArray that holds the data associated with the
// atom regexp and stores it in the regexp.
- static void SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<Object> match_pattern);
+ void SetRegExpAtomData(Handle<JSRegExp> regexp,
+ JSRegExp::Type type,
+ Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<Object> match_pattern);
// Creates a new FixedArray that holds the data associated with the
// irregexp regexp and stores it in the regexp.
- static void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count);
+ void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+ JSRegExp::Type type,
+ Handle<String> source,
+ JSRegExp::Flags flags,
+ int capture_count);
private:
- static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype);
+ Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
- static Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
+ Handle<JSFunction> NewFunctionHelper(Handle<String> name,
+ Handle<Object> prototype);
+
+ Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
Handle<String> name,
StrictModeFlag strict_mode);
- static Handle<DescriptorArray> CopyAppendCallbackDescriptors(
+ Handle<DescriptorArray> CopyAppendCallbackDescriptors(
Handle<DescriptorArray> array,
Handle<Object> descriptors);
// Create a new map cache.
- static Handle<MapCache> NewMapCache(int at_least_space_for);
+ Handle<MapCache> NewMapCache(int at_least_space_for);
// Update the map cache in the global context with (keys, map)
- static Handle<MapCache> AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map);
+ Handle<MapCache> AddToMapCache(Handle<Context> context,
+ Handle<FixedArray> keys,
+ Handle<Map> map);
};
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
+DEFINE_bool(trace_isolates, false, "trace isolate state changes")
+
// VM state
DEFINE_bool(log_state_changes, false, "Log state changes.")
namespace v8 {
namespace internal {
-FrameElement::ZoneObjectList* FrameElement::ConstantList() {
- static ZoneObjectList list(10);
- return &list;
-}
-
} } // namespace v8::internal
return result;
}
- // Static indirection table for handles to constants. If a frame
- // element represents a constant, the data contains an index into
- // this table of handles to the actual constants.
- typedef ZoneList<Handle<Object> > ZoneObjectList;
-
- static ZoneObjectList* ConstantList();
-
static bool ConstantPoolOverflowed() {
- return !DataField::is_valid(ConstantList()->length());
- }
-
- // Clear the constants indirection table.
- static void ClearConstantList() {
- ConstantList()->Clear();
+ return !DataField::is_valid(
+ Isolate::Current()->frame_element_constant_list()->length());
}
bool is_synced() const { return SyncedField::decode(value_); }
Handle<Object> handle() const {
ASSERT(is_constant());
- return ConstantList()->at(DataField::decode(value_));
+ return Isolate::Current()->frame_element_constant_list()->
+ at(DataField::decode(value_));
}
int index() const {
// Used to construct constant elements.
FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
+ ZoneObjectList* constant_list =
+ Isolate::Current()->frame_element_constant_list();
value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| TypeInfoField::encode(info.ToInt())
- | DataField::encode(ConstantList()->length());
- ConstantList()->Add(value);
+ | DataField::encode(constant_list->length());
+ constant_list->Add(value);
}
Type type() const { return TypeField::decode(value_); }
#include "frames.h"
+#include "isolate.h"
+
#if V8_TARGET_ARCH_IA32
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
}
+inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
+ return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
+}
+
+
inline Object* StandardFrame::GetExpression(int index) const {
return Memory::Object_at(GetExpressionAddress(index));
}
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "string-stream.h"
-#include "top.h"
namespace v8 {
namespace internal {
-PcToCodeCache::PcToCodeCacheEntry
- PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
int SafeStackFrameIterator::active_count_ = 0;
#define INITIALIZE_SINGLETON(type, field) field##_(this),
StackFrameIterator::StackFrameIterator()
: STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread_(Top::GetCurrentThread()),
+ frame_(NULL), handler_(NULL),
+ thread_(Isolate::Current()->thread_local_top()),
fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
Reset();
}
fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
Reset();
}
-StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
+StackFrameIterator::StackFrameIterator(Isolate* isolate,
+ bool use_top, Address fp, Address sp)
: STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL),
- thread_(use_top ? Top::GetCurrentThread() : NULL),
+ thread_(use_top ? isolate->thread_local_top() : NULL),
fp_(use_top ? NULL : fp), sp_(sp),
advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
&StackFrameIterator::AdvanceWithoutHandler) {
StackFrame::State state;
StackFrame::Type type;
if (thread_ != NULL) {
- type = ExitFrame::GetStateForFramePointer(Top::c_entry_fp(thread_), &state);
- handler_ = StackHandler::FromAddress(Top::handler(thread_));
+ type = ExitFrame::GetStateForFramePointer(
+ Isolate::c_entry_fp(thread_), &state);
+ handler_ = StackHandler::FromAddress(
+ Isolate::handler(thread_));
} else {
ASSERT(fp_ != NULL);
state.fp = fp_;
SafeStackFrameIterator::SafeStackFrameIterator(
+ Isolate* isolate,
Address fp, Address sp, Address low_bound, Address high_bound) :
maintainer_(),
stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(low_bound, high_bound)),
+ is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
is_working_iterator_(is_valid_top_ || is_valid_fp_),
iteration_done_(!is_working_iterator_),
- iterator_(is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
+ iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
}
-bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
- Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
+ Address low_bound, Address high_bound) {
+ ThreadLocalTop* top = isolate->thread_local_top();
+ Address fp = Isolate::c_entry_fp(top);
ExitFrameValidator validator(low_bound, high_bound);
if (!validator.IsValidFP(fp)) return false;
- return Top::handler(Top::GetCurrentThread()) != NULL;
+ return Isolate::handler(top) != NULL;
}
#ifdef ENABLE_LOGGING_AND_PROFILING
SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
+ Isolate* isolate,
Address fp, Address sp, Address low_bound, Address high_bound) :
- SafeJavaScriptFrameIterator(fp, sp, low_bound, high_bound) {
+ SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
if (!done() && !frame()->is_java_script()) Advance();
}
Code* StackFrame::GetSafepointData(Address pc,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
- PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
+ Isolate* isolate = Isolate::Current();
+ PcToCodeCache::PcToCodeCacheEntry* entry =
+ isolate->pc_to_code_cache()->GetCacheEntry(pc);
SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
// into the heap to determine the state. This is safe as long
// as nobody tries to GC...
if (SafeStackFrameIterator::is_active()) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(*(state->pc_address))->kind();
+ Code::Kind kind = GetContainingCode(Isolate::Current(),
+ *(state->pc_address))->kind();
ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
}
Code* EntryFrame::unchecked_code() const {
- return Heap::raw_unchecked_js_entry_code();
+ return HEAP->raw_unchecked_js_entry_code();
}
Code* EntryConstructFrame::unchecked_code() const {
- return Heap::raw_unchecked_js_construct_entry_code();
+ return HEAP->raw_unchecked_js_construct_entry_code();
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
v->VisitPointer(&code_slot());
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC ||
- SafeStackFrameIterator::is_active()) {
+ if (SafeStackFrameIterator::is_active() ||
+ HEAP->gc_state() != Heap::NOT_IN_GC) {
// If the we are currently iterating the safe stack the
// arguments for frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
ASSERT(functions->length() == 0);
- Code* code_pointer = code();
+ Code* code_pointer = LookupCode(Isolate::Current());
int offset = static_cast<int>(pc() - code_pointer->address());
FrameSummary summary(receiver(),
JSFunction::cast(function()),
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
- code = PcToCodeCache::GcSafeFindCodeForPc(pc());
+ code = Isolate::Current()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
Code* ArgumentsAdaptorFrame::unchecked_code() const {
- return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline);
}
ASSERT(!it.done());
StackHandler* handler = it.handler();
ASSERT(handler->is_entry());
- handler->Iterate(v, code());
+ handler->Iterate(v, LookupCode(Isolate::Current()));
#ifdef DEBUG
// Make sure that the entry frame does not contain more than one
// stack handler.
it.Advance();
ASSERT(it.done());
#endif
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
}
v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself.
- handler->Iterate(v, code());
+ handler->Iterate(v, LookupCode(Isolate::Current()));
}
v->VisitPointers(base, limit);
}
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
IterateArguments(v);
}
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
}
Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+ Heap* heap = isolate_->heap();
// Check if the pc points into a large object chunk.
- LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
+ LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
// Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
Page* page = Page::FromAddress(pc);
- HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
+ HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
HeapObject* previous = NULL;
while (true) {
HeapObject* next = iterator.next();
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
- Counters::pc_to_code.Increment();
+ COUNTERS->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kPcToCodeCacheSize - 1);
PcToCodeCacheEntry* entry = cache(index);
if (entry->pc == pc) {
- Counters::pc_to_code_cached.Increment();
+ COUNTERS->pc_to_code_cached()->Increment();
ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else {
// Because this code may be interrupted by a profiling signal that
}
-int JSCallerSavedCode(int n) {
- static int reg_code[kNumJSCallerSaved];
- static bool initialized = false;
- if (!initialized) {
- initialized = true;
+struct JSCallerSavedCodeData {
+ JSCallerSavedCodeData() {
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0)
ASSERT(i == kNumJSCallerSaved);
}
+ int reg_code[kNumJSCallerSaved];
+};
+
+
+static const JSCallerSavedCodeData kCallerSavedCodeData;
+
+
+int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
- return reg_code[n];
+ return kCallerSavedCodeData.reg_code[n];
}
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
+#include "handles.h"
#include "safepoint-table.h"
namespace v8 {
// Forward declarations.
class StackFrameIterator;
-class Top;
class ThreadLocalTop;
+class Isolate;
-
-class PcToCodeCache : AllStatic {
+class PcToCodeCache {
public:
struct PcToCodeCacheEntry {
Address pc;
SafepointEntry safepoint_entry;
};
- static PcToCodeCacheEntry* cache(int index) {
- return &cache_[index];
+ explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
+ Flush();
}
- static Code* GcSafeFindCodeForPc(Address pc);
- static Code* GcSafeCastToCode(HeapObject* object, Address pc);
+ Code* GcSafeFindCodeForPc(Address pc);
+ Code* GcSafeCastToCode(HeapObject* object, Address pc);
- static void FlushPcToCodeCache() {
+ void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
- static PcToCodeCacheEntry* GetCacheEntry(Address pc);
+ PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
+ PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+
+ Isolate* isolate_;
+
static const int kPcToCodeCacheSize = 1024;
- static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+ PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+
+ DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
};
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
- Code* code() const { return GetContainingCode(pc()); }
+ Code* LookupCode(Isolate* isolate) const {
+ return GetContainingCode(isolate, pc());
+ }
// Get the code object that contains the given pc.
- static Code* GetContainingCode(Address pc) {
- return PcToCodeCache::GetCacheEntry(pc)->code;
- }
+ static inline Code* GetContainingCode(Isolate* isolate, Address pc);
// Get the code object containing the given pc and fill in the
// safepoint entry and the number of stack slots. The pc must be at
// An iterator that can start from a given FP address.
// If use_top, then work as usual, if fp isn't NULL, use it,
// otherwise, do nothing.
- StackFrameIterator(bool use_top, Address fp, Address sp);
+ StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
StackFrame* frame() const {
ASSERT(!done());
if (!done()) Advance();
}
+ JavaScriptFrameIteratorTemp(Isolate* isolate,
+ Address fp, Address sp,
+ Address low_bound, Address high_bound) :
+ iterator_(isolate, fp, sp, low_bound, high_bound) {
+ if (!done()) Advance();
+ }
+
inline JavaScriptFrame* frame() const;
bool done() const { return iterator_.done(); }
class SafeStackFrameIterator BASE_EMBEDDED {
public:
- SafeStackFrameIterator(Address fp, Address sp,
+ SafeStackFrameIterator(Isolate* isolate,
+ Address fp, Address sp,
Address low_bound, Address high_bound);
StackFrame* frame() const {
bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Address low_bound, Address high_bound);
+ static bool IsValidTop(Isolate* isolate,
+ Address low_bound, Address high_bound);
// This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This
};
ActiveCountMaintainer maintainer_;
+ // TODO(isolates): this is dangerous.
static int active_count_;
StackAddressValidator stack_validator_;
const bool is_valid_top_;
class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
public:
- explicit SafeStackTraceFrameIterator(Address fp, Address sp,
+ explicit SafeStackTraceFrameIterator(Isolate* isolate,
+ Address fp, Address sp,
Address low_bound, Address high_bound);
void Advance();
};
#define __ ACCESS_MASM(masm())
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
+ Isolate* isolate = Isolate::Current();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
- Counters::total_full_codegen_source_size.Increment(len);
+ isolate->counters()->total_full_codegen_source_size()->Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Full Compiler - ");
FullCodeGenerator cgen(&masm);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
+ ASSERT(!isolate->has_pending_exception());
return false;
}
unsigned table_offset = cgen.EmitStackCheckTable();
if (!info_->HasDeoptimizationSupport()) return;
int length = bailout_entries_.length();
Handle<DeoptimizationOutputData> data =
- Factory::NewDeoptimizationOutputData(length, TENURED);
+ isolate()->factory()->
+ NewDeoptimizationOutputData(length, TENURED);
for (int i = 0; i < length; i++) {
data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
// Compute array of global variable and function declarations.
// Do nothing in case of no declared global functions or variables.
if (globals > 0) {
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ Handle<FixedArray> array =
+ isolate()->factory()->NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
if (FLAG_debug_info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!Debugger::IsDebuggerActive()) {
+ if (!isolate()->debugger()->IsDebuggerActive()) {
CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
} else {
// Check if the statement will be breakable without adding a debug break
void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
if (FLAG_debug_info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!Debugger::IsDebuggerActive()) {
+ if (!isolate()->debugger()->IsDebuggerActive()) {
CodeGenerator::RecordPositions(masm_, pos);
} else {
// Check if the expression will be breakable without adding a debug break
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
Handle<String> name = node->name();
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
ASSERT(function != NULL);
ASSERT(function->intrinsic_type == Runtime::INLINE);
InlineFunctionGenerator generator =
};
explicit FullCodeGenerator(MacroAssembler* masm)
- : masm_(masm),
+ : isolate_(Isolate::Current()),
+ masm_(masm),
info_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
loop_depth_--;
}
+ Isolate* isolate() { return isolate_; }
MacroAssembler* masm() { return masm_; }
class ExpressionContext;
codegen_->set_new_context(old_);
}
+ Isolate* isolate() const { return codegen_->isolate(); }
+
// Convert constant control flow (true or false) to the result expected for
// this expression context.
virtual void Plug(bool flag) const = 0;
virtual bool IsEffect() const { return true; }
};
+ Isolate* isolate_;
MacroAssembler* masm_;
CompilationInfo* info_;
Label return_label_;
// Enclosing name is a name of a constructor function. To check
// that it is really a constructor, we check that it is not empty
// and starts with a capital letter.
- if (name->length() > 0 && Runtime::IsUpperCaseChar(name->Get(0))) {
+ if (name->length() > 0 && Runtime::IsUpperCaseChar(
+ Isolate::Current()->runtime_state(), name->Get(0))) {
names_stack_.Add(name);
}
}
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !Heap::prototype_symbol()->Equals(*name)) {
+ if (IsOpen() && !HEAP->prototype_symbol()->Equals(*name)) {
names_stack_.Add(name);
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !Heap::result_symbol()->Equals(*name)) {
+ if (IsOpen() && !HEAP->result_symbol()->Equals(*name)) {
names_stack_.Add(name);
}
}
Handle<String> FuncNameInferrer::MakeNameFromStack() {
if (names_stack_.is_empty()) {
- return Factory::empty_string();
+ return FACTORY->empty_string();
} else {
return MakeNameFromStackHelper(1, names_stack_.at(0));
}
if (pos >= names_stack_.length()) {
return prev;
} else {
- Handle<String> curr = Factory::NewConsString(dot_, names_stack_.at(pos));
- return MakeNameFromStackHelper(pos + 1, Factory::NewConsString(prev, curr));
+ Handle<String> curr = FACTORY->NewConsString(dot_, names_stack_.at(pos));
+ return MakeNameFromStackHelper(pos + 1, FACTORY->NewConsString(prev, curr));
}
}
: entries_stack_(10),
names_stack_(5),
funcs_to_infer_(4),
- dot_(Factory::NewStringFromAscii(CStrVector("."))) {
+ dot_(FACTORY->NewStringFromAscii(CStrVector("."))) {
}
// Returns whether we have entered name collection state.
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_GDB_JIT_INTERFACE
+#include "v8.h"
#include "gdb-jit.h"
#include "bootstrapper.h"
namespace v8 {
namespace internal {
+
+ObjectGroup::~ObjectGroup() {
+ if (info_ != NULL) info_->Dispose();
+}
+
+
class GlobalHandles::Node : public Malloced {
public:
}
~Node() {
- if (state_ != DESTROYED) Destroy();
+ if (state_ != DESTROYED) Destroy(Isolate::Current()->global_handles());
#ifdef DEBUG
// Zap the values for eager trapping.
object_ = NULL;
#endif
}
- void Destroy() {
+ void Destroy(GlobalHandles* global_handles) {
if (state_ == WEAK || IsNearDeath()) {
- GlobalHandles::number_of_weak_handles_--;
+ global_handles->number_of_weak_handles_--;
if (object_->IsJSGlobalObject()) {
- GlobalHandles::number_of_global_object_weak_handles_--;
+ global_handles->number_of_global_object_weak_handles_--;
}
}
state_ = DESTROYED;
Handle<Object> handle() { return Handle<Object>(&object_); }
// Make this handle weak.
- void MakeWeak(void* parameter, WeakReferenceCallback callback) {
- LOG(HandleEvent("GlobalHandle::MakeWeak", handle().location()));
+ void MakeWeak(GlobalHandles* global_handles, void* parameter,
+ WeakReferenceCallback callback) {
+ LOG(global_handles->isolate(),
+ HandleEvent("GlobalHandle::MakeWeak", handle().location()));
ASSERT(state_ != DESTROYED);
if (state_ != WEAK && !IsNearDeath()) {
- GlobalHandles::number_of_weak_handles_++;
+ global_handles->number_of_weak_handles_++;
if (object_->IsJSGlobalObject()) {
- GlobalHandles::number_of_global_object_weak_handles_++;
+ global_handles->number_of_global_object_weak_handles_++;
}
}
state_ = WEAK;
callback_ = callback;
}
- void ClearWeakness() {
- LOG(HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
+ void ClearWeakness(GlobalHandles* global_handles) {
+ LOG(global_handles->isolate(),
+ HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
ASSERT(state_ != DESTROYED);
if (state_ == WEAK || IsNearDeath()) {
- GlobalHandles::number_of_weak_handles_--;
+ global_handles->number_of_weak_handles_--;
if (object_->IsJSGlobalObject()) {
- GlobalHandles::number_of_global_object_weak_handles_--;
+ global_handles->number_of_global_object_weak_handles_--;
}
}
state_ = NORMAL;
// Returns the callback for this weak handle.
WeakReferenceCallback callback() { return callback_; }
- bool PostGarbageCollectionProcessing() {
+ bool PostGarbageCollectionProcessing(Isolate* isolate,
+ GlobalHandles* global_handles) {
if (state_ != Node::PENDING) return false;
- LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
+ LOG(isolate, HandleEvent("GlobalHandle::Processing", handle().location()));
WeakReferenceCallback func = callback();
if (func == NULL) {
- Destroy();
+ Destroy(global_handles);
return false;
}
void* par = parameter();
// Forbid reuse of destroyed nodes as they might be already deallocated.
// It's fine though to reuse nodes that were destroyed in weak callback
// as those cannot be deallocated until we are back from the callback.
- set_first_free(NULL);
- if (first_deallocated()) {
- first_deallocated()->set_next(head());
+ global_handles->set_first_free(NULL);
+ if (global_handles->first_deallocated()) {
+ global_handles->first_deallocated()->set_next(global_handles->head());
}
// Check that we are not passing a finalized external string to
// the callback.
ASSERT(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
func(object, par);
}
// Absense of explicit cleanup or revival of weak handle
};
-class GlobalHandles::Pool BASE_EMBEDDED {
+class GlobalHandles::Pool {
public:
Pool() {
current_ = new Chunk();
};
-static GlobalHandles::Pool pool_;
+GlobalHandles::GlobalHandles(Isolate* isolate)
+ : isolate_(isolate),
+ number_of_weak_handles_(0),
+ number_of_global_object_weak_handles_(0),
+ head_(NULL),
+ first_free_(NULL),
+ first_deallocated_(NULL),
+ pool_(new Pool()),
+ post_gc_processing_count_(0),
+ object_groups_(4) {
+}
+
+
+GlobalHandles::~GlobalHandles() {
+ delete pool_;
+ pool_ = 0;
+}
Handle<Object> GlobalHandles::Create(Object* value) {
- Counters::global_handles.Increment();
+ isolate_->counters()->global_handles()->Increment();
Node* result;
if (first_free()) {
// Take the first node in the free list.
set_head(result);
} else {
// Allocate a new node.
- result = pool_.Allocate();
+ result = pool_->Allocate();
result->set_next(head());
set_head(result);
}
void GlobalHandles::Destroy(Object** location) {
- Counters::global_handles.Decrement();
+ isolate_->counters()->global_handles()->Decrement();
if (location == NULL) return;
Node* node = Node::FromLocation(location);
- node->Destroy();
+ node->Destroy(this);
// Link the destroyed.
node->set_next_free(first_free());
set_first_free(node);
void GlobalHandles::MakeWeak(Object** location, void* parameter,
WeakReferenceCallback callback) {
ASSERT(callback != NULL);
- Node::FromLocation(location)->MakeWeak(parameter, callback);
+ Node::FromLocation(location)->MakeWeak(this, parameter, callback);
}
void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness();
+ Node::FromLocation(location)->ClearWeakness(this);
}
if (current->state_ == Node::WEAK) {
if (f(¤t->object_)) {
current->state_ = Node::PENDING;
- LOG(HandleEvent("GlobalHandle::Pending", current->handle().location()));
+ LOG(isolate_,
+ HandleEvent("GlobalHandle::Pending", current->handle().location()));
}
}
}
}
-int post_gc_processing_count = 0;
-
bool GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes.
- ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
- const int initial_post_gc_processing_count = ++post_gc_processing_count;
+ ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
+ const int initial_post_gc_processing_count = ++post_gc_processing_count_;
bool next_gc_likely_to_collect_more = false;
Node** p = &head_;
while (*p != NULL) {
- if ((*p)->PostGarbageCollectionProcessing()) {
- if (initial_post_gc_processing_count != post_gc_processing_count) {
+ if ((*p)->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
// PostGarbageCollection processing. The current node might
// have been deleted in that round, so we need to bail out (or
set_head(NULL);
set_first_free(NULL);
set_first_deallocated(NULL);
- pool_.Release();
+ pool_->Release();
}
-int GlobalHandles::number_of_weak_handles_ = 0;
-int GlobalHandles::number_of_global_object_weak_handles_ = 0;
-
-GlobalHandles::Node* GlobalHandles::head_ = NULL;
-GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
-GlobalHandles::Node* GlobalHandles::first_deallocated_ = NULL;
-
void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
#endif
-List<ObjectGroup*>* GlobalHandles::ObjectGroups() {
- // Lazily initialize the list to avoid startup time static constructors.
- static List<ObjectGroup*> groups(4);
- return &groups;
-}
void GlobalHandles::AddObjectGroup(Object*** handles,
for (size_t i = 0; i < length; ++i) {
new_entry->objects_.Add(handles[i]);
}
- ObjectGroups()->Add(new_entry);
-}
-
-
-List<ImplicitRefGroup*>* GlobalHandles::ImplicitRefGroups() {
- // Lazily initialize the list to avoid startup time static constructors.
- static List<ImplicitRefGroup*> groups(4);
- return &groups;
+ object_groups_.Add(new_entry);
}
for (size_t i = 0; i < length; ++i) {
new_entry->children_.Add(children[i]);
}
- ImplicitRefGroups()->Add(new_entry);
+ implicit_ref_groups_.Add(new_entry);
}
void GlobalHandles::RemoveObjectGroups() {
- List<ObjectGroup*>* object_groups = ObjectGroups();
- for (int i = 0; i< object_groups->length(); i++) {
- delete object_groups->at(i);
+ for (int i = 0; i < object_groups_.length(); i++) {
+ delete object_groups_.at(i);
}
- object_groups->Clear();
+ object_groups_.Clear();
}
void GlobalHandles::RemoveImplicitRefGroups() {
- List<ImplicitRefGroup*>* ref_groups = ImplicitRefGroups();
- for (int i = 0; i< ref_groups->length(); i++) {
- delete ref_groups->at(i);
+ for (int i = 0; i < implicit_ref_groups_.length(); i++) {
+ delete implicit_ref_groups_.at(i);
}
- ref_groups->Clear();
+ implicit_ref_groups_.Clear();
}
ObjectGroup(size_t capacity, v8::RetainedObjectInfo* info)
: objects_(static_cast<int>(capacity)),
info_(info) { }
- ~ObjectGroup() { if (info_ != NULL) info_->Dispose(); }
+ ~ObjectGroup();
List<Object**> objects_;
v8::RetainedObjectInfo* info_;
typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
-class GlobalHandles : public AllStatic {
+class GlobalHandles {
public:
+ ~GlobalHandles();
+
// Creates a new global handle that is alive until Destroy is called.
- static Handle<Object> Create(Object* value);
+ Handle<Object> Create(Object* value);
// Destroy a global handle.
- static void Destroy(Object** location);
+ void Destroy(Object** location);
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
// function is invoked (for each handle) with the handle and corresponding
// parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
// reason is that Smi::FromInt(0) does not change during garage collection.
- static void MakeWeak(Object** location,
- void* parameter,
- WeakReferenceCallback callback);
+ void MakeWeak(Object** location,
+ void* parameter,
+ WeakReferenceCallback callback);
static void SetWrapperClassId(Object** location, uint16_t class_id);
// Returns the current number of weak handles.
- static int NumberOfWeakHandles() { return number_of_weak_handles_; }
+ int NumberOfWeakHandles() { return number_of_weak_handles_; }
- static void RecordStats(HeapStats* stats);
+ void RecordStats(HeapStats* stats);
// Returns the current number of weak handles to global objects.
// These handles are also included in NumberOfWeakHandles().
- static int NumberOfGlobalObjectWeakHandles() {
+ int NumberOfGlobalObjectWeakHandles() {
return number_of_global_object_weak_handles_;
}
// Clear the weakness of a global handle.
- static void ClearWeakness(Object** location);
+ void ClearWeakness(Object** location);
// Tells whether global handle is near death.
static bool IsNearDeath(Object** location);
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
- static bool PostGarbageCollectionProcessing();
+ bool PostGarbageCollectionProcessing();
// Iterates over all strong handles.
- static void IterateStrongRoots(ObjectVisitor* v);
+ void IterateStrongRoots(ObjectVisitor* v);
// Iterates over all handles.
- static void IterateAllRoots(ObjectVisitor* v);
+ void IterateAllRoots(ObjectVisitor* v);
// Iterates over all handles that have embedder-assigned class ID.
- static void IterateAllRootsWithClassIds(ObjectVisitor* v);
+ void IterateAllRootsWithClassIds(ObjectVisitor* v);
// Iterates over all weak roots in heap.
- static void IterateWeakRoots(ObjectVisitor* v);
+ void IterateWeakRoots(ObjectVisitor* v);
// Iterates over weak roots that are bound to a given callback.
- static void IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback);
+ void IterateWeakRoots(WeakReferenceGuest f,
+ WeakReferenceCallback callback);
// Find all weak handles satisfying the callback predicate, mark
// them as pending.
- static void IdentifyWeakHandles(WeakSlotCallback f);
+ void IdentifyWeakHandles(WeakSlotCallback f);
// Add an object group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a mark-compact collection.
- static void AddObjectGroup(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info);
+ void AddObjectGroup(Object*** handles,
+ size_t length,
+ v8::RetainedObjectInfo* info);
// Add an implicit references' group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a mark-compact collection.
- static void AddImplicitReferences(HeapObject* parent,
- Object*** children,
- size_t length);
+ void AddImplicitReferences(HeapObject* parent,
+ Object*** children,
+ size_t length);
// Returns the object groups.
- static List<ObjectGroup*>* ObjectGroups();
+ List<ObjectGroup*>* object_groups() { return &object_groups_; }
// Returns the implicit references' groups.
- static List<ImplicitRefGroup*>* ImplicitRefGroups();
+ List<ImplicitRefGroup*>* implicit_ref_groups() {
+ return &implicit_ref_groups_;
+ }
// Remove bags, this should only happen after GC.
- static void RemoveObjectGroups();
- static void RemoveImplicitRefGroups();
+ void RemoveObjectGroups();
+ void RemoveImplicitRefGroups();
// Tear down the global handle structure.
- static void TearDown();
+ void TearDown();
+
+ Isolate* isolate() { return isolate_; }
#ifdef DEBUG
- static void PrintStats();
- static void Print();
+ void PrintStats();
+ void Print();
#endif
class Pool;
private:
+ explicit GlobalHandles(Isolate* isolate);
+
// Internal node structure, one for each global handle.
class Node;
+ Isolate* isolate_;
+
// Field always containing the number of weak and near-death handles.
- static int number_of_weak_handles_;
+ int number_of_weak_handles_;
// Field always containing the number of weak and near-death handles
// to global objects. These objects are also included in
// number_of_weak_handles_.
- static int number_of_global_object_weak_handles_;
+ int number_of_global_object_weak_handles_;
// Global handles are kept in a single linked list pointed to by head_.
- static Node* head_;
- static Node* head() { return head_; }
- static void set_head(Node* value) { head_ = value; }
+ Node* head_;
+ Node* head() { return head_; }
+ void set_head(Node* value) { head_ = value; }
// Free list for DESTROYED global handles not yet deallocated.
- static Node* first_free_;
- static Node* first_free() { return first_free_; }
- static void set_first_free(Node* value) { first_free_ = value; }
+ Node* first_free_;
+ Node* first_free() { return first_free_; }
+ void set_first_free(Node* value) { first_free_ = value; }
// List of deallocated nodes.
// Deallocated nodes form a prefix of all the nodes and
// node node ... node node
// .next -> .next -> .next ->
// <- .next_free <- .next_free <- .next_free
- static Node* first_deallocated_;
- static Node* first_deallocated() { return first_deallocated_; }
- static void set_first_deallocated(Node* value) {
+ Node* first_deallocated_;
+ Node* first_deallocated() { return first_deallocated_; }
+ void set_first_deallocated(Node* value) {
first_deallocated_ = value;
}
+
+ Pool* pool_;
+ int post_gc_processing_count_;
+ List<ObjectGroup*> object_groups_;
+ List<ImplicitRefGroup*> implicit_ref_groups_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
};
#ifndef V8_HANDLES_INL_H_
#define V8_HANDLES_INL_H_
+#include "api.h"
#include "apiutils.h"
#include "handles.h"
-#include "api.h"
+#include "isolate.h"
namespace v8 {
namespace internal {
+inline Isolate* GetIsolateForHandle(Object* obj) {
+ return Isolate::Current();
+}
+
+inline Isolate* GetIsolateForHandle(HeapObject* obj) {
+ return obj->GetIsolate();
+}
+
template<typename T>
Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj);
+ location_ = HandleScope::CreateHandle(obj, GetIsolateForHandle(obj));
+}
+
+
+template<typename T>
+Handle<T>::Handle(T* obj, Isolate* isolate) {
+ ASSERT(!obj->IsFailure());
+ location_ = HandleScope::CreateHandle(obj, isolate);
}
}
+HandleScope::HandleScope() {
+ Isolate* isolate = Isolate::Current();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ current->level++;
+}
+
+
+HandleScope::HandleScope(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ current->level++;
+}
+
+
+HandleScope::~HandleScope() {
+ CloseScope();
+}
+
+void HandleScope::CloseScope() {
+ ASSERT(isolate_ == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ current->next = prev_next_;
+ current->level--;
+ if (current->limit != prev_limit_) {
+ current->limit = prev_limit_;
+ DeleteExtensions(isolate_);
+ }
+#ifdef DEBUG
+ ZapRange(prev_next_, prev_limit_);
+#endif
+}
+
+
+template <typename T>
+Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
+ T* value = *handle_value;
+ // Throw away all handles in the current scope.
+ CloseScope();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ // Allocate one handle in the parent scope.
+ ASSERT(current->level > 0);
+ Handle<T> result(CreateHandle<T>(value, isolate_));
+ // Reinitialize the current scope (so that it's ready
+ // to be used or closed again).
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ current->level++;
+ return result;
+}
+
+
+template <typename T>
+T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+
+ internal::Object** cur = current->next;
+ if (cur == current->limit) cur = Extend();
+ // Update the current next field, set the value in the created
+ // handle, and return the result.
+ ASSERT(cur < current->limit);
+ current->next = cur + 1;
+
+ T** result = reinterpret_cast<T**>(cur);
+ *result = value;
+ return result;
+}
+
+
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
+ Isolate::Current()->handle_scope_data();
+
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
current->limit = current->next;
inline NoHandleAllocation::~NoHandleAllocation() {
// Restore state in current handle scope to re-enable handle
// allocations.
- v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
- ASSERT_EQ(0, current->level);
- current->level = level_;
+ v8::ImplementationUtilities::HandleScopeData* data =
+ Isolate::Current()->handle_scope_data();
+ ASSERT_EQ(0, data->level);
+ data->level = level_;
}
#endif
namespace internal {
-v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
- { NULL, NULL, 0 };
-
-
int HandleScope::NumberOfHandles() {
- int n = HandleScopeImplementer::instance()->blocks()->length();
+ Isolate* isolate = Isolate::Current();
+ HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ int n = impl->blocks()->length();
if (n == 0) return 0;
return ((n - 1) * kHandleBlockSize) + static_cast<int>(
- (current_.next - HandleScopeImplementer::instance()->blocks()->last()));
+ (isolate->handle_scope_data()->next - impl->blocks()->last()));
}
Object** HandleScope::Extend() {
- Object** result = current_.next;
+ Isolate* isolate = Isolate::Current();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
- ASSERT(result == current_.limit);
+ Object** result = current->next;
+
+ ASSERT(result == current->limit);
// Make sure there's at least one scope on the stack and that the
// top of the scope stack isn't a barrier.
- if (current_.level == 0) {
+ if (current->level == 0) {
Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
"Cannot create a handle without a HandleScope");
return NULL;
}
- HandleScopeImplementer* impl = HandleScopeImplementer::instance();
+ HandleScopeImplementer* impl = isolate->handle_scope_implementer();
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
if (!impl->blocks()->is_empty()) {
Object** limit = &impl->blocks()->last()[kHandleBlockSize];
- if (current_.limit != limit) {
- current_.limit = limit;
- ASSERT(limit - current_.next < kHandleBlockSize);
+ if (current->limit != limit) {
+ current->limit = limit;
+ ASSERT(limit - current->next < kHandleBlockSize);
}
}
// If we still haven't found a slot for the handle, we extend the
// current handle scope by allocating a new handle block.
- if (result == current_.limit) {
+ if (result == current->limit) {
// If there's a spare block, use it for growing the current scope.
result = impl->GetSpareOrNewBlock();
// Add the extension to the global list of blocks, but count the
// extension as part of the current scope.
impl->blocks()->Add(result);
- current_.limit = &result[kHandleBlockSize];
+ current->limit = &result[kHandleBlockSize];
}
return result;
}
-void HandleScope::DeleteExtensions() {
- HandleScopeImplementer::instance()->DeleteExtensions(current_.limit);
+void HandleScope::DeleteExtensions(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}
Address HandleScope::current_level_address() {
- return reinterpret_cast<Address>(¤t_.level);
+ return reinterpret_cast<Address>(
+ &Isolate::Current()->handle_scope_data()->level);
}
Address HandleScope::current_next_address() {
- return reinterpret_cast<Address>(¤t_.next);
+ return reinterpret_cast<Address>(
+ &Isolate::Current()->handle_scope_data()->next);
}
Address HandleScope::current_limit_address() {
- return reinterpret_cast<Address>(¤t_.limit);
+ return reinterpret_cast<Address>(
+ &Isolate::Current()->handle_scope_data()->limit);
}
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
Handle<JSArray> array) {
- CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
+ CALL_HEAP_FUNCTION(content->GetHeap()->isolate(),
+ content->AddKeysFromJSArray(*array), FixedArray);
}
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second) {
- CALL_HEAP_FUNCTION(first->UnionOfKeys(*second), FixedArray);
+ CALL_HEAP_FUNCTION(first->GetHeap()->isolate(),
+ first->UnionOfKeys(*second), FixedArray);
}
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global) {
- CALL_HEAP_FUNCTION(Heap::ReinitializeJSGlobalProxy(*constructor, *global),
- JSGlobalProxy);
+ CALL_HEAP_FUNCTION(
+ constructor->GetHeap()->isolate(),
+ constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor, *global),
+ JSGlobalProxy);
}
func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) {
Handle<Map> new_initial_map =
- Factory::CopyMapDropTransitions(Handle<Map>(func->initial_map()));
+ func->GetIsolate()->factory()->CopyMapDropTransitions(
+ Handle<Map>(func->initial_map()));
new_initial_map->set_unused_property_fields(nof);
func->set_initial_map(*new_initial_map);
}
void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
- CALL_HEAP_FUNCTION_VOID(func->SetPrototype(*value));
+ CALL_HEAP_FUNCTION_VOID(func->GetHeap()->isolate(),
+ func->SetPrototype(*value));
}
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(
- mode,
- expected_additional_properties));
+ CALL_HEAP_FUNCTION_VOID(object->GetHeap()->isolate(),
+ object->NormalizeProperties(
+ mode,
+ expected_additional_properties));
}
void NormalizeElements(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->NormalizeElements());
+ CALL_HEAP_FUNCTION_VOID(object->GetHeap()->isolate(),
+ object->NormalizeElements());
}
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
CALL_HEAP_FUNCTION_VOID(
+ object->GetHeap()->isolate(),
object->TransformToFastProperties(unused_property_fields));
}
uint32_t index,
Handle<Object> value,
PropertyDetails details) {
- CALL_HEAP_FUNCTION_VOID(dictionary->Set(index, *value, details));
+ CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
+ dictionary->Set(index, *value, details));
}
void FlattenString(Handle<String> string) {
- CALL_HEAP_FUNCTION_VOID(string->TryFlatten());
+ CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
Handle<String> FlattenGetString(Handle<String> string) {
- CALL_HEAP_FUNCTION(string->TryFlatten(), String);
+ CALL_HEAP_FUNCTION(string->GetIsolate(), string->TryFlatten(), String);
}
Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(Accessors::FunctionSetPrototype(*function,
+ CALL_HEAP_FUNCTION(function->GetHeap()->isolate(),
+ Accessors::FunctionSetPrototype(*function,
*prototype,
NULL),
Object);
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes, strict_mode),
+ CALL_HEAP_FUNCTION(object->GetHeap()->isolate(),
+ object->SetProperty(*key, *value, attributes, strict_mode),
Object);
}
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
+ Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
- Runtime::SetObjectProperty(object, key, value, attributes, strict_mode),
+ isolate,
+ Runtime::SetObjectProperty(
+ isolate, object, key, value, attributes, strict_mode),
Object);
}
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(
+ isolate,
Runtime::ForceSetObjectProperty(
- object, key, value, attributes),
+ isolate, object, key, value, attributes),
Object);
}
Handle<String> key,
Handle<Object> value,
PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->SetNormalizedProperty(*key, *value, details),
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetNormalizedProperty(*key, *value, details),
Object);
}
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
- CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::ForceDeleteObjectProperty(isolate, object, key),
+ Object);
}
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(object->
- SetLocalPropertyIgnoreAttributes(*key, *value, attributes), Object);
+ CALL_HEAP_FUNCTION(
+ object->GetIsolate(),
+ object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+ Object);
}
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
- ASSERT(!Top::has_pending_exception());
+ ASSERT(!object->GetIsolate()->has_pending_exception());
CHECK(!SetLocalPropertyIgnoreAttributes(
object, key, value, attributes).is_null());
- CHECK(!Top::has_pending_exception());
+ CHECK(!object->GetIsolate()->has_pending_exception());
}
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key,
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetPropertyWithInterceptor(*key,
*value,
attributes,
strict_mode),
Handle<Object> GetProperty(Handle<JSObject> obj,
const char* name) {
- Handle<String> str = Factory::LookupAsciiSymbol(name);
- CALL_HEAP_FUNCTION(obj->GetProperty(*str), Object);
+ Isolate* isolate = obj->GetIsolate();
+ Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
+ CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
}
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key) {
- CALL_HEAP_FUNCTION(Runtime::GetObjectProperty(obj, key), Object);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::GetObjectProperty(isolate, obj, key), Object);
}
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index) {
- CALL_HEAP_FUNCTION(Runtime::GetElement(obj, index), Object);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
}
Handle<JSObject> holder,
Handle<String> name,
PropertyAttributes* attributes) {
- CALL_HEAP_FUNCTION(holder->GetPropertyWithInterceptor(*receiver,
+ Isolate* isolate = receiver->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ holder->GetPropertyWithInterceptor(*receiver,
*name,
attributes),
Object);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false;
- CALL_HEAP_FUNCTION(obj->SetPrototype(*value, skip_hidden_prototypes), Object);
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->SetPrototype(*value, skip_hidden_prototypes), Object);
}
Handle<Object> PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->PreventExtensions(), Object);
+ CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
}
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
+ Isolate* isolate = obj->GetIsolate();
Object* holder = obj->BypassGlobalProxy();
- if (holder->IsUndefined()) return Factory::undefined_value();
+ if (holder->IsUndefined()) return isolate->factory()->undefined_value();
obj = Handle<JSObject>(JSObject::cast(holder));
if (obj->HasFastProperties()) {
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = obj->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == Heap::hidden_symbol()) &&
+ (descriptors->GetKey(0) == isolate->heap()->hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
// Hidden properties object not found. Allocate a new hidden properties
// object if requested. Otherwise return the undefined value.
if (create_if_needed) {
- Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
- CALL_HEAP_FUNCTION(obj->SetHiddenPropertiesObject(*hidden_obj), Object);
+ Handle<Object> hidden_obj =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ CALL_HEAP_FUNCTION(isolate,
+ obj->SetHiddenPropertiesObject(*hidden_obj), Object);
} else {
- return Factory::undefined_value();
+ return isolate->factory()->undefined_value();
}
}
- return Handle<Object>(obj->GetHiddenPropertiesObject());
+ return Handle<Object>(obj->GetHiddenPropertiesObject(), isolate);
}
Handle<Object> DeleteElement(Handle<JSObject> obj,
uint32_t index) {
- CALL_HEAP_FUNCTION(obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->DeleteElement(index, JSObject::NORMAL_DELETION),
Object);
}
Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
Object);
}
Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
- CALL_HEAP_FUNCTION(Heap::LookupSingleCharacterStringFromCode(index), Object);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate,
+ isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
}
int start,
int end,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->SubString(start, end, pretenure), String);
+ CALL_HEAP_FUNCTION(str->GetIsolate(),
+ str->SubString(start, end, pretenure), String);
}
value = number;
}
}
- CALL_HEAP_FUNCTION(object->SetElement(index, *value, strict_mode), Object);
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetElement(index, *value, strict_mode), Object);
}
Handle<Object> value,
StrictModeFlag strict_mode) {
ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(object->SetElement(index, *value, strict_mode, false),
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetElement(index, *value, strict_mode, false),
Object);
}
Handle<JSObject> Copy(Handle<JSObject> obj) {
- CALL_HEAP_FUNCTION(Heap::CopyJSObject(*obj), JSObject);
+ Isolate* isolate = obj->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*obj), JSObject);
}
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->DefineAccessor(*info), Object);
+ CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
}
Proxy* proxy = Script::cast(wrapper->value())->wrapper();
ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
proxy->set_proxy(0);
- GlobalHandles::Destroy(cache.location());
- Counters::script_wrappers.Decrement();
+ Isolate::Current()->global_handles()->Destroy(cache.location());
+ COUNTERS->script_wrappers()->Decrement();
}
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
+ Isolate* isolate = Isolate::Current();
if (script->wrapper()->proxy() != NULL) {
// Return the script wrapper directly from the cache.
return Handle<JSValue>(
}
// Construct a new script wrapper.
- Counters::script_wrappers.Increment();
- Handle<JSFunction> constructor = Top::script_function();
+ isolate->counters()->script_wrappers()->Increment();
+ Handle<JSFunction> constructor = isolate->script_function();
Handle<JSValue> result =
- Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+ Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
result->set_value(*script);
// Create a new weak global handle and use it to cache the wrapper
// for future use. The cache will automatically be cleared by the
// garbage collector when it is not used anymore.
- Handle<Object> handle = GlobalHandles::Create(*result);
- GlobalHandles::MakeWeak(handle.location(), NULL, &ClearWrapperCache);
+ Handle<Object> handle = isolate->global_handles()->Create(*result);
+ isolate->global_handles()->MakeWeak(handle.location(), NULL,
+ &ClearWrapperCache);
script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
return result;
}
if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined());
- Handle<FixedArray> empty = Factory::NewFixedArray(0);
+ Handle<FixedArray> empty =
+ script->GetIsolate()->factory()->NewFixedArray(0);
script->set_line_ends(*empty);
ASSERT(script->line_ends()->IsFixedArray());
return;
Handle<FixedArray> array = CalculateLineEnds(src, true);
- if (*array != Heap::empty_fixed_array()) {
- array->set_map(Heap::fixed_cow_array_map());
+ if (*array != HEAP->empty_fixed_array()) {
+ array->set_map(HEAP->fixed_cow_array_map());
}
script->set_line_ends(*array);
template <typename SourceChar>
-static void CalculateLineEnds(List<int>* line_ends,
+static void CalculateLineEnds(Isolate* isolate,
+ List<int>* line_ends,
Vector<const SourceChar> src,
bool with_last_line) {
const int src_len = src.length();
- StringSearch<char, SourceChar> search(CStrVector("\n"));
+ StringSearch<char, SourceChar> search(isolate, CStrVector("\n"));
// Find and record line ends.
int position = 0;
List<int> line_ends(line_count_estimate);
{
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
+ Isolate* isolate = src->GetIsolate();
// Dispatch on type of strings.
if (src->IsAsciiRepresentation()) {
- CalculateLineEnds(&line_ends, src->ToAsciiVector(), with_last_line);
+ CalculateLineEnds(isolate,
+ &line_ends,
+ src->ToAsciiVector(),
+ with_last_line);
} else {
- CalculateLineEnds(&line_ends, src->ToUC16Vector(), with_last_line);
+ CalculateLineEnds(isolate,
+ &line_ends,
+ src->ToUC16Vector(),
+ with_last_line);
}
}
int line_count = line_ends.length();
- Handle<FixedArray> array = Factory::NewFixedArray(line_count);
+ Handle<FixedArray> array = FACTORY->NewFixedArray(line_count);
for (int i = 0; i < line_count; i++) {
array->set(i, Smi::FromInt(line_ends[i]));
}
// Compute the property keys from the interceptor.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
+ Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- CustomArguments args(interceptor->data(), *receiver, *object);
+ CustomArguments args(isolate, interceptor->data(), *receiver, *object);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
- LOG(ApiObjectAccess("interceptor-named-enum", *object));
+ LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = enum_fun(info);
}
}
// Compute the element keys from the interceptor.
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
+ Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- CustomArguments args(interceptor->data(), *receiver, *object);
+ CustomArguments args(isolate, interceptor->data(), *receiver, *object);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
- LOG(ApiObjectAccess("interceptor-indexed-enum", *object));
+ LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = enum_fun(info);
}
}
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type) {
USE(ContainsOnlyValidKeys);
- Handle<FixedArray> content = Factory::empty_fixed_array();
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSObject> arguments_boilerplate =
Handle<JSObject>(
- Top::context()->global_context()->arguments_boilerplate());
+ isolate->context()->global_context()->arguments_boilerplate());
Handle<JSFunction> arguments_function =
Handle<JSFunction>(
JSFunction::cast(arguments_boilerplate->map()->constructor()));
// Only collect keys if access is permitted.
for (Handle<Object> p = object;
- *p != Heap::null_value();
+ *p != isolate->heap()->null_value();
p = Handle<Object>(p->GetPrototype())) {
Handle<JSObject> current(JSObject::cast(*p));
// Check access rights if required.
if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*current, Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccess(*current,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
break;
}
// Compute the element keys.
Handle<FixedArray> element_keys =
- Factory::NewFixedArray(current->NumberOfEnumElements());
+ isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
current->GetEnumElementKeys(*element_keys);
content = UnionOfKeys(content, element_keys);
ASSERT(ContainsOnlyValidKeys(content));
Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
- Counters::for_in.Increment();
+ Isolate* isolate = object->GetIsolate();
+ isolate->counters()->for_in()->Increment();
Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
INCLUDE_PROTOS);
- return Factory::NewJSArrayWithElements(elements);
+ return isolate->factory()->NewJSArrayWithElements(elements);
}
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result) {
int index = 0;
+ Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
- Counters::enum_cache_hits.Increment();
+ isolate->counters()->enum_cache_hits()->Increment();
DescriptorArray* desc = object->map()->instance_descriptors();
return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()));
}
- Counters::enum_cache_misses.Increment();
+ isolate->counters()->enum_cache_misses()->Increment();
int num_enum = object->NumberOfEnumProperties();
- Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
+ Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors());
for (int i = 0; i < descs->number_of_descriptors(); i++) {
(*storage)->SortPairs(*sort_array, sort_array->length());
if (cache_result) {
Handle<FixedArray> bridge_storage =
- Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
+ isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors();
desc->SetEnumCache(*bridge_storage, *storage);
}
return storage;
} else {
int num_enum = object->NumberOfEnumProperties();
- Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
+ Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
return storage;
}
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!Top::has_pending_exception());
+ ASSERT(!info->isolate()->has_pending_exception());
bool result = Compiler::CompileLazy(info);
- ASSERT(result != Top::has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
+ ASSERT(result != Isolate::Current()->has_pending_exception());
+ if (!result && flag == CLEAR_EXCEPTION) {
+ info->isolate()->clear_pending_exception();
+ }
return result;
}
public:
INLINE(explicit Handle(T** location)) { location_ = location; }
INLINE(explicit Handle(T* obj));
+ INLINE(Handle(T* obj, Isolate* isolate));
INLINE(Handle()) : location_(NULL) {}
}
static Handle<T> null() { return Handle<T>(); }
- bool is_null() { return location_ == NULL; }
+ bool is_null() const { return location_ == NULL; }
// Closes the given scope, but lets this handle escape. See
// implementation in api.h.
// for which the handle scope has been deleted is undefined.
class HandleScope {
public:
- HandleScope() : prev_next_(current_.next), prev_limit_(current_.limit) {
- current_.level++;
- }
+ inline HandleScope();
+ explicit inline HandleScope(Isolate* isolate);
- ~HandleScope() {
- CloseScope();
- }
+ inline ~HandleScope();
// Counts the number of allocated handles.
static int NumberOfHandles();
// Creates a new handle with the given value.
template <typename T>
- static inline T** CreateHandle(T* value) {
- internal::Object** cur = current_.next;
- if (cur == current_.limit) cur = Extend();
- // Update the current next field, set the value in the created
- // handle, and return the result.
- ASSERT(cur < current_.limit);
- current_.next = cur + 1;
-
- T** result = reinterpret_cast<T**>(cur);
- *result = value;
- return result;
- }
+ static inline T** CreateHandle(T* value, Isolate* isolate);
// Deallocates any extensions used by the current scope.
- static void DeleteExtensions();
+ static void DeleteExtensions(Isolate* isolate);
static Address current_next_address();
static Address current_limit_address();
// a Handle backed by the parent scope holding the
// value of the argument handle.
template <typename T>
- Handle<T> CloseAndEscape(Handle<T> handle_value) {
- T* value = *handle_value;
- // Throw away all handles in the current scope.
- CloseScope();
- // Allocate one handle in the parent scope.
- ASSERT(current_.level > 0);
- Handle<T> result(CreateHandle<T>(value));
- // Reinitialize the current scope (so that it's ready
- // to be used or closed again).
- prev_next_ = current_.next;
- prev_limit_ = current_.limit;
- current_.level++;
- return result;
- }
+ Handle<T> CloseAndEscape(Handle<T> handle_value);
+
+ Isolate* isolate() { return isolate_; }
private:
// Prevent heap allocation or illegal handle scopes.
void* operator new(size_t size);
void operator delete(void* size_t);
- inline void CloseScope() {
- current_.next = prev_next_;
- current_.level--;
- if (current_.limit != prev_limit_) {
- current_.limit = prev_limit_;
- DeleteExtensions();
- }
-#ifdef DEBUG
- ZapRange(prev_next_, prev_limit_);
-#endif
- }
+ inline void CloseScope();
- static v8::ImplementationUtilities::HandleScopeData current_;
- // Holds values on entry. The prev_next_ value is never NULL
- // on_entry, but is set to NULL when this scope is closed.
+ Isolate* isolate_;
Object** prev_next_;
Object** prev_limit_;
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
- HashMap(MatchFun match,
- Allocator* allocator = &DefaultAllocator,
- uint32_t initial_capacity = 8);
+ explicit HashMap(MatchFun match,
+ Allocator* allocator = &DefaultAllocator,
+ uint32_t initial_capacity = 8);
~HashMap();
#include "heap.h"
#include "objects.h"
+#include "isolate.h"
#include "v8-counters.h"
namespace v8 {
namespace internal {
+void PromotionQueue::insert(HeapObject* target, int size) {
+ *(--rear_) = reinterpret_cast<intptr_t>(target);
+ *(--rear_) = size;
+ // Assert no overflow into live objects.
+ ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
+}
+
+
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
Heap::allocation_timeout_-- <= 0) {
return Failure::RetryAfterGC(space);
}
- Counters::objs_since_last_full.Increment();
- Counters::objs_since_last_young.Increment();
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
#endif
MaybeObject* result;
if (NEW_SPACE == space) {
MaybeObject* Heap::AllocateRawMap() {
#ifdef DEBUG
- Counters::objs_since_last_full.Increment();
- Counters::objs_since_last_young.Increment();
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
#endif
MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
MaybeObject* Heap::AllocateRawCell() {
#ifdef DEBUG
- Counters::objs_since_last_full.Increment();
- Counters::objs_since_last_young.Increment();
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
#endif
MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
- if (Heap::InNewSpace(Memory::Object_at(dst))) {
+ if (InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
}
+void Heap::ScavengePointer(HeapObject** p) {
+ ScavengeObject(p, *p);
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
+ ASSERT(HEAP->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
roots_[kLastScriptIdRootIndex] = last_script_id;
}
+Isolate* Heap::isolate() {
+ return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
+ reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
+}
+
#ifdef DEBUG
#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) v8::internal::Heap::GarbageCollectionGreedyCheck()
+ if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
#else
#define GC_GREEDY_CHECK() { }
#endif
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
do { \
GC_GREEDY_CHECK(); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- Heap::CollectGarbage( \
- Failure::cast(__maybe_object__)->allocation_space()); \
+ ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
+ allocation_space()); \
__maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory()) { \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- Counters::gc_last_resort_from_handles.Increment(); \
- Heap::CollectAllAvailableGarbage(); \
+ ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
+ ISOLATE->heap()->CollectAllAvailableGarbage(); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \
} while (false)
-#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY(FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__)), \
+// TODO(isolates): cache isolate: either accept as a parameter or
+// set to some known symbol (__CUR_ISOLATE__?)
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
+ CALL_AND_RETRY(ISOLATE, \
+ FUNCTION_CALL, \
+ return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
return Handle<TYPE>())
-#define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
- CALL_AND_RETRY(FUNCTION_CALL, return, return)
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+ CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
#ifdef DEBUG
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
- if (Heap::InNewSpace(string)) {
+ if (heap_->InNewSpace(string)) {
new_space_strings_.Add(string);
} else {
old_space_strings_.Add(string);
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
- ASSERT(Heap::InNewSpace(new_space_strings_[i]));
- ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
+ ASSERT(heap_->InNewSpace(new_space_strings_[i]));
+ ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
- ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
- ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
+ ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
+ ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
}
#endif
}
void ExternalStringTable::AddOldString(String* string) {
ASSERT(string->IsExternalString());
- ASSERT(!Heap::InNewSpace(string));
+ ASSERT(!heap_->InNewSpace(string));
old_space_strings_.Add(string);
}
Verify();
}
+
+void Heap::ClearInstanceofCache() {
+ set_instanceof_cache_function(the_hole_value());
+}
+
+
+Object* Heap::ToBoolean(bool condition) {
+ return condition ? true_value() : false_value();
+}
+
+
+void Heap::CompletelyClearInstanceofCache() {
+ set_instanceof_cache_map(the_hole_value());
+ set_instanceof_cache_function(the_hole_value());
+}
+
+
+MaybeObject* TranscendentalCache::Get(Type type, double input) {
+ SubCache* cache = caches_[type];
+ if (cache == NULL) {
+ caches_[type] = cache = new SubCache(type);
+ }
+ return cache->Get(input);
+}
+
+
+Address TranscendentalCache::cache_array_address() {
+ return reinterpret_cast<Address>(caches_);
+}
+
+
+double TranscendentalCache::SubCache::Calculate(double input) {
+ switch (type_) {
+ case ACOS:
+ return acos(input);
+ case ASIN:
+ return asin(input);
+ case ATAN:
+ return atan(input);
+ case COS:
+ return cos(input);
+ case EXP:
+ return exp(input);
+ case LOG:
+ return log(input);
+ case SIN:
+ return sin(input);
+ case TAN:
+ return tan(input);
+ default:
+ return 0.0; // Never happens.
+ }
+}
+
+
+MaybeObject* TranscendentalCache::SubCache::Get(double input) {
+ Converter c;
+ c.dbl = input;
+ int hash = Hash(c);
+ Element e = elements_[hash];
+ if (e.in[0] == c.integers[0] &&
+ e.in[1] == c.integers[1]) {
+ ASSERT(e.output != NULL);
+ isolate_->counters()->transcendental_cache_hit()->Increment();
+ return e.output;
+ }
+ double answer = Calculate(input);
+ isolate_->counters()->transcendental_cache_miss()->Increment();
+ Object* heap_number;
+ { MaybeObject* maybe_heap_number =
+ isolate_->heap()->AllocateHeapNumber(answer);
+ if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
+ }
+ elements_[hash].in[0] = c.integers[0];
+ elements_[hash].in[1] = c.integers[1];
+ elements_[hash].output = heap_number;
+ return heap_number;
+}
+
+
+Heap* _inline_get_heap_() {
+ return HEAP;
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj) {
+ tracer_->increment_marked_count();
+#ifdef DEBUG
+ UpdateLiveObjectCount(obj);
+#endif
+ obj->SetMark();
+}
+
+
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
String* constructor = GetConstructorNameForHeapProfile(
JSObject::cast(js_obj));
// Differentiate Object and Array instances.
- if (fine_grain && (constructor == Heap::Object_symbol() ||
- constructor == Heap::Array_symbol())) {
+ if (fine_grain && (constructor == HEAP->Object_symbol() ||
+ constructor == HEAP->Array_symbol())) {
return JSObjectsCluster(constructor, obj);
} else {
return JSObjectsCluster(constructor);
}
} else if (obj->IsString()) {
- return JSObjectsCluster(Heap::String_symbol());
+ return JSObjectsCluster(HEAP->String_symbol());
} else if (obj->IsJSGlobalPropertyCell()) {
return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
} else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
int size = obj->Size();
// If 'properties' and 'elements' are non-empty (thus, non-shared),
// take their size into account.
- if (obj->properties() != Heap::empty_fixed_array()) {
+ if (obj->properties() != HEAP->empty_fixed_array()) {
size += obj->properties()->Size();
}
- if (obj->elements() != Heap::empty_fixed_array()) {
+ if (obj->elements() != HEAP->empty_fixed_array()) {
size += obj->elements()->Size();
}
// For functions, also account non-empty context and literals sizes.
HeapStringAllocator allocator;
StringStream stream(&allocator);
cluster.Print(&stream);
- LOG(HeapSampleJSRetainersEvent(
+ LOG(ISOLATE,
+ HeapSampleJSRetainersEvent(
*(stream.ToCString()), *(retainers.ToCString())));
}
};
}
-HeapProfiler* HeapProfiler::singleton_ = NULL;
-
HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) {
void HeapProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (singleton_ == NULL) {
- singleton_ = new HeapProfiler();
+ Isolate* isolate = Isolate::Current();
+ if (isolate->heap_profiler() == NULL) {
+ isolate->set_heap_profiler(new HeapProfiler());
}
#endif
}
void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- delete singleton_;
- singleton_ = NULL;
+ Isolate* isolate = Isolate::Current();
+ delete isolate->heap_profiler();
+ isolate->set_heap_profiler(NULL);
#endif
}
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control) {
- ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name, type, control);
+ ASSERT(Isolate::Current()->heap_profiler() != NULL);
+ return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
+ type,
+ control);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
int type,
v8::ActivityControl* control) {
- ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name, type, control);
+ ASSERT(Isolate::Current()->heap_profiler() != NULL);
+ return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
+ type,
+ control);
}
void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
- ASSERT(singleton_ != NULL);
ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
- if (singleton_->wrapper_callbacks_.length() <= class_id) {
- singleton_->wrapper_callbacks_.AddBlock(
- NULL, class_id - singleton_->wrapper_callbacks_.length() + 1);
+ if (wrapper_callbacks_.length() <= class_id) {
+ wrapper_callbacks_.AddBlock(
+ NULL, class_id - wrapper_callbacks_.length() + 1);
}
- singleton_->wrapper_callbacks_[class_id] = callback;
+ wrapper_callbacks_[class_id] = callback;
}
v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
uint16_t class_id, Object** wrapper) {
- ASSERT(singleton_ != NULL);
- if (singleton_->wrapper_callbacks_.length() <= class_id) return NULL;
- return singleton_->wrapper_callbacks_[class_id](
+ if (wrapper_callbacks_.length() <= class_id) return NULL;
+ return wrapper_callbacks_[class_id](
class_id, Utils::ToLocal(Handle<Object>(wrapper)));
}
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
}
case HeapSnapshot::kAggregated: {
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
AggregatedHeapSnapshot agg_snapshot;
AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
generator.GenerateSnapshot();
int HeapProfiler::GetSnapshotsCount() {
- ASSERT(singleton_ != NULL);
- return singleton_->snapshots_->snapshots()->length();
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ return profiler->snapshots_->snapshots()->length();
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- ASSERT(singleton_ != NULL);
- return singleton_->snapshots_->snapshots()->at(index);
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ return profiler->snapshots_->snapshots()->at(index);
}
HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- ASSERT(singleton_ != NULL);
- return singleton_->snapshots_->GetSnapshot(uid);
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ return profiler->snapshots_->GetSnapshot(uid);
}
void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- ASSERT(singleton_ != NULL);
- singleton_->snapshots_->ObjectMoveEvent(from, to);
+ snapshots_->ObjectMoveEvent(from, to);
}
HeapStringAllocator allocator;
StringStream stream(&allocator);
cluster.Print(&stream);
- LOG(HeapSampleJSConstructorEvent(*(stream.ToCString()),
+ LOG(ISOLATE,
+ HeapSampleJSConstructorEvent(*(stream.ToCString()),
number_and_size.number(),
number_and_size.bytes()));
}
aggregator_(NULL) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
- Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ HEAP->IterateRoots(&extractor, VISIT_ONLY_STRONG);
}
String* constructor = GetConstructorNameForHeapProfile(JSObject::cast(obj));
SmartPointer<char> s_name(
constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- LOG(HeapSampleJSProducerEvent(GetConstructorName(*s_name),
+ LOG(ISOLATE,
+ HeapSampleJSProducerEvent(GetConstructorName(*s_name),
reinterpret_cast<Address*>(trace)));
}
void HeapProfiler::WriteSample() {
- LOG(HeapSampleBeginEvent("Heap", "allocated"));
- LOG(HeapSampleStats(
- "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
+ Isolate* isolate = Isolate::Current();
+ LOG(isolate, HeapSampleBeginEvent("Heap", "allocated"));
+ LOG(isolate,
+ HeapSampleStats(
+ "Heap", "allocated", HEAP->CommittedMemory(), HEAP->SizeOfObjects()));
AggregatedHeapSnapshot snapshot;
AggregatedHeapSnapshotGenerator generator(&snapshot);
i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
++i) {
if (info[i].bytes() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ LOG(isolate,
+ HeapSampleItemEvent(info[i].name(), info[i].number(),
info[i].bytes()));
}
}
snapshot.js_cons_profile()->PrintStats();
snapshot.js_retainer_profile()->PrintStats();
- GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
- StackWeakReferenceCallback);
+ isolate->global_handles()->IterateWeakRoots(PrintProducerStackTrace,
+ StackWeakReferenceCallback);
- LOG(HeapSampleEndEvent("Heap", "allocated"));
+ LOG(isolate, HeapSampleEndEvent("Heap", "allocated"));
}
}
-bool ProducerHeapProfile::can_log_ = false;
-
void ProducerHeapProfile::Setup() {
can_log_ = true;
}
stack[i++] = it.frame()->pc();
}
stack[i] = NULL;
- Handle<Object> handle = GlobalHandles::Create(obj);
- GlobalHandles::MakeWeak(handle.location(),
- static_cast<void*>(stack.start()),
- StackWeakReferenceCallback);
+ Handle<Object> handle = isolate_->global_handles()->Create(obj);
+ isolate_->global_handles()->MakeWeak(handle.location(),
+ static_cast<void*>(stack.start()),
+ StackWeakReferenceCallback);
}
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
+#include "isolate.h"
#include "zone-inl.h"
namespace v8 {
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(Call) \
- do { \
- if (v8::internal::HeapProfiler::is_profiling()) { \
- v8::internal::HeapProfiler::Call; \
- } \
+#define HEAP_PROFILE(heap, call) \
+ do { \
+ v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
+ if (profiler != NULL && profiler->is_profiling()) { \
+ profiler->call; \
+ } \
} while (false)
#else
-#define HEAP_PROFILE(Call) ((void) 0)
+#define HEAP_PROFILE(heap, call) ((void) 0)
#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
- static void ObjectMoveEvent(Address from, Address to);
+ void ObjectMoveEvent(Address from, Address to);
- static void DefineWrapperClass(
+ void DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
- static v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
- Object** wrapper);
- static INLINE(bool is_profiling()) {
- return singleton_ != NULL && singleton_->snapshots_->is_tracking_objects();
+ v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
+ Object** wrapper);
+ INLINE(bool is_profiling()) {
+ return snapshots_->is_tracking_objects();
}
// Obsolete interface.
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- static HeapProfiler* singleton_;
#endif // ENABLE_LOGGING_AND_PROFILING
};
// We use symbols that are illegal JS identifiers to identify special cases.
// Their actual value is irrelevant for us.
switch (special) {
- case ROOTS: return Heap::result_symbol();
- case GLOBAL_PROPERTY: return Heap::code_symbol();
- case CODE: return Heap::arguments_shadow_symbol();
- case SELF: return Heap::catch_var_symbol();
+ case ROOTS: return HEAP->result_symbol();
+ case GLOBAL_PROPERTY: return HEAP->code_symbol();
+ case CODE: return HEAP->arguments_shadow_symbol();
+ case SELF: return HEAP->catch_var_symbol();
default:
UNREACHABLE();
return NULL;
class HeapEntriesMap;
class HeapEntriesAllocator;
-class HeapSnapshot;
class AggregatedHeapSnapshotGenerator {
public:
};
-class ProducerHeapProfile : public AllStatic {
+class ProducerHeapProfile {
public:
- static void Setup();
- static void RecordJSObjectAllocation(Object* obj) {
+ void Setup();
+ void RecordJSObjectAllocation(Object* obj) {
if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
}
private:
- static void DoRecordJSObjectAllocation(Object* obj);
- static bool can_log_;
+ ProducerHeapProfile() : can_log_(false) { }
+
+ void DoRecordJSObjectAllocation(Object* obj);
+ Isolate* isolate_;
+ bool can_log_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(ProducerHeapProfile);
};
#endif // ENABLE_LOGGING_AND_PROFILING
#include "arm/regexp-macro-assembler-arm.h"
#endif
-
namespace v8 {
namespace internal {
-String* Heap::hidden_symbol_;
-Object* Heap::roots_[Heap::kRootListLength];
-Object* Heap::global_contexts_list_;
-
-
-NewSpace Heap::new_space_;
-OldSpace* Heap::old_pointer_space_ = NULL;
-OldSpace* Heap::old_data_space_ = NULL;
-OldSpace* Heap::code_space_ = NULL;
-MapSpace* Heap::map_space_ = NULL;
-CellSpace* Heap::cell_space_ = NULL;
-LargeObjectSpace* Heap::lo_space_ = NULL;
-
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
-intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
-intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
-int Heap::old_gen_exhausted_ = false;
+static Mutex* gc_initializer_mutex = OS::CreateMutex();
-int Heap::amount_of_external_allocated_memory_ = 0;
-int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
+Heap::Heap()
+ : isolate_(NULL),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-static const int default_max_semispace_size_ = 2*MB;
-intptr_t Heap::max_old_generation_size_ = 192*MB;
-int Heap::initial_semispace_size_ = 128*KB;
-intptr_t Heap::code_range_size_ = 0;
-intptr_t Heap::max_executable_size_ = max_old_generation_size_;
+ reserved_semispace_size_(2*MB),
+ max_semispace_size_(2*MB),
+ initial_semispace_size_(128*KB),
+ max_old_generation_size_(192*MB),
+ max_executable_size_(max_old_generation_size_),
+ code_range_size_(0),
#elif defined(V8_TARGET_ARCH_X64)
-static const int default_max_semispace_size_ = 16*MB;
-intptr_t Heap::max_old_generation_size_ = 1*GB;
-int Heap::initial_semispace_size_ = 1*MB;
-intptr_t Heap::code_range_size_ = 512*MB;
-intptr_t Heap::max_executable_size_ = 256*MB;
-#else
-static const int default_max_semispace_size_ = 8*MB;
-intptr_t Heap::max_old_generation_size_ = 512*MB;
-int Heap::initial_semispace_size_ = 512*KB;
-intptr_t Heap::code_range_size_ = 0;
-intptr_t Heap::max_executable_size_ = 128*MB;
-#endif
-
-// Allow build-time customization of the max semispace size. Building
-// V8 with snapshots and a non-default max semispace size is much
-// easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
-int Heap::max_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+ reserved_semispace_size_(16*MB),
+ max_semispace_size_(16*MB),
+ initial_semispace_size_(1*MB),
+ max_old_generation_size_(1*GB),
+ max_executable_size_(256*MB),
+ code_range_size_(512*MB),
#else
-int Heap::max_semispace_size_ = default_max_semispace_size_;
+ reserved_semispace_size_(8*MB),
+ max_semispace_size_(8*MB),
+ initial_semispace_size_(512*KB),
+ max_old_generation_size_(512*MB),
+ max_executable_size_(128*MB),
+ code_range_size_(0),
#endif
-
-// The snapshot semispace size will be the default semispace size if
-// snapshotting is used and will be the requested semispace size as
-// set up by ConfigureHeap otherwise.
-int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
-
-List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
-List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
-
-GCCallback Heap::global_gc_prologue_callback_ = NULL;
-GCCallback Heap::global_gc_epilogue_callback_ = NULL;
-HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
-
// Variables set based on semispace_size_ and old_generation_size_ in
-// ConfigureHeap.
-
+// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
-int Heap::survived_since_last_expansion_ = 0;
-intptr_t Heap::external_allocation_limit_ = 0;
-
-Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
-
-int Heap::mc_count_ = 0;
-int Heap::ms_count_ = 0;
-unsigned int Heap::gc_count_ = 0;
-
-GCTracer* Heap::tracer_ = NULL;
-
-int Heap::unflattened_strings_length_ = 0;
-
-int Heap::always_allocate_scope_depth_ = 0;
-int Heap::linear_allocation_scope_depth_ = 0;
-int Heap::contexts_disposed_ = 0;
-
-int Heap::young_survivors_after_last_gc_ = 0;
-int Heap::high_survival_rate_period_length_ = 0;
-double Heap::survival_rate_ = 0;
-Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
-Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
-
+ survived_since_last_expansion_(0),
+ always_allocate_scope_depth_(0),
+ linear_allocation_scope_depth_(0),
+ contexts_disposed_(0),
+ new_space_(this),
+ old_pointer_space_(NULL),
+ old_data_space_(NULL),
+ code_space_(NULL),
+ map_space_(NULL),
+ cell_space_(NULL),
+ lo_space_(NULL),
+ gc_state_(NOT_IN_GC),
+ mc_count_(0),
+ ms_count_(0),
+ gc_count_(0),
+ unflattened_strings_length_(0),
#ifdef DEBUG
-bool Heap::allocation_allowed_ = true;
-
-int Heap::allocation_timeout_ = 0;
-bool Heap::disallow_allocation_failure_ = false;
+ allocation_allowed_(true),
+ allocation_timeout_(0),
+ disallow_allocation_failure_(false),
+ debug_utils_(NULL),
#endif // DEBUG
+ old_gen_promotion_limit_(kMinimumPromotionLimit),
+ old_gen_allocation_limit_(kMinimumAllocationLimit),
+ external_allocation_limit_(0),
+ amount_of_external_allocated_memory_(0),
+ amount_of_external_allocated_memory_at_last_global_gc_(0),
+ old_gen_exhausted_(false),
+ hidden_symbol_(NULL),
+ global_gc_prologue_callback_(NULL),
+ global_gc_epilogue_callback_(NULL),
+ gc_safe_size_of_old_object_(NULL),
+ tracer_(NULL),
+ young_survivors_after_last_gc_(0),
+ high_survival_rate_period_length_(0),
+ survival_rate_(0),
+ previous_survival_rate_trend_(Heap::STABLE),
+ survival_rate_trend_(Heap::STABLE),
+ max_gc_pause_(0),
+ max_alive_after_gc_(0),
+ min_in_mutator_(kMaxInt),
+ alive_after_last_gc_(0),
+ last_gc_end_timestamp_(0.0),
+ page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+ number_idle_notifications_(0),
+ last_idle_notification_gc_count_(0),
+ last_idle_notification_gc_count_init_(false),
+ configured_(false),
+ is_safe_to_read_maps_(true) {
+ // Allow build-time customization of the max semispace size. Building
+ // V8 with snapshots and a non-default max semispace size is much
+ // easier if you can define it as part of the build environment.
+#if defined(V8_MAX_SEMISPACE_SIZE)
+ max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+#endif
+
+ memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
+ global_contexts_list_ = NULL;
+ mark_compact_collector_.heap_ = this;
+ external_string_table_.heap_ = this;
+}
-intptr_t GCTracer::alive_after_last_gc_ = 0;
-double GCTracer::last_gc_end_timestamp_ = 0.0;
-int GCTracer::max_gc_pause_ = 0;
-intptr_t GCTracer::max_alive_after_gc_ = 0;
-int GCTracer::min_in_mutator_ = kMaxInt;
intptr_t Heap::Capacity() {
if (!HasBeenSetup()) return 0;
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetup()) return 0;
- return MemoryAllocator::SizeExecutable();
+ return isolate()->memory_allocator()->SizeExecutable();
}
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
- ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
MapWord map_word = object->map_word();
map_word.ClearMark();
map_word.ClearOverflow();
int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
- ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
- ASSERT(MarkCompactCollector::are_map_pointers_encoded());
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
uint32_t marker = Memory::uint32_at(object->address());
if (marker == MarkCompactCollector::kSingleFreeEncoding) {
return kIntSize;
return Memory::int_at(object->address() + kIntSize);
} else {
MapWord map_word = object->map_word();
- Address map_address = map_word.DecodeMapAddress(Heap::map_space());
+ Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
return object->SizeFromMap(map);
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
- Counters::gc_compactor_caused_by_request.Increment();
+ isolate_->counters()->gc_compactor_caused_by_request()->Increment();
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
- Counters::gc_compactor_caused_by_promoted_data.Increment();
+ isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
return MARK_COMPACTOR;
}
// Have allocation in OLD and LO failed?
if (old_gen_exhausted_) {
- Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+ isolate_->counters()->
+ gc_compactor_caused_by_oldspace_exhaustion()->Increment();
return MARK_COMPACTOR;
}
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
- Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+ if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+ isolate_->counters()->
+ gc_compactor_caused_by_oldspace_exhaustion()->Increment();
return MARK_COMPACTOR;
}
if (!FLAG_trace_gc_verbose) return;
PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
- MemoryAllocator::Size(),
- MemoryAllocator::Available());
+ isolate_->memory_allocator()->Size(),
+ isolate_->memory_allocator()->Available());
PrintF("New space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
Heap::new_space_.Size(),
void Heap::GarbageCollectionPrologue() {
- TranscendentalCache::Clear();
+ isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
Verify();
}
- if (FLAG_print_global_handles) GlobalHandles::Print();
+ if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
- Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
+ isolate_->counters()->alive_after_last_gc()->Set(
+ static_cast<int>(SizeOfObjects()));
- Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
- Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
+ isolate_->counters()->symbol_table_capacity()->Set(
+ symbol_table()->Capacity());
+ isolate_->counters()->number_of_symbols()->Set(
+ symbol_table()->NumberOfElements());
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsAfterGC();
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::AfterGarbageCollection();
+ isolate_->debug()->AfterGarbageCollection();
#endif
}
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- MarkCompactCollector::SetForceCompaction(force_compaction);
+ mark_compact_collector_.SetForceCompaction(force_compaction);
CollectGarbage(OLD_POINTER_SPACE);
- MarkCompactCollector::SetForceCompaction(false);
+ mark_compact_collector_.SetForceCompaction(false);
}
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- MarkCompactCollector::SetForceCompaction(true);
+ mark_compact_collector()->SetForceCompaction(true);
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
break;
}
}
- MarkCompactCollector::SetForceCompaction(false);
+ mark_compact_collector()->SetForceCompaction(false);
}
bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
// The VM is in the GC state until exiting this function.
- VMState state(GC);
+ VMState state(isolate_, GC);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
bool next_gc_likely_to_collect_more = false;
- { GCTracer tracer;
+ { GCTracer tracer(this);
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
tracer.set_collector(collector);
HistogramTimer* rate = (collector == SCAVENGER)
- ? &Counters::gc_scavenger
- : &Counters::gc_compactor;
+ ? isolate_->counters()->gc_scavenger()
+ : isolate_->counters()->gc_compactor();
rate->Start();
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, &tracer);
void Heap::PerformScavenge() {
- GCTracer tracer;
+ GCTracer tracer(this);
PerformGarbageCollection(SCAVENGER, &tracer);
}
// Helper class for verifying the symbol table.
class SymbolTableVerifier : public ObjectVisitor {
public:
- SymbolTableVerifier() { }
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
static void VerifySymbolTable() {
#ifdef DEBUG
SymbolTableVerifier verifier;
- Heap::symbol_table()->IterateElements(&verifier);
+ HEAP->symbol_table()->IterateElements(&verifier);
#endif // DEBUG
}
void Heap::ClearJSFunctionResultCaches() {
- if (Bootstrapper::IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
}
+
void Heap::ClearNormalizedMapCaches() {
- if (Bootstrapper::IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
bool next_gc_likely_to_collect_more = false;
if (collector != SCAVENGER) {
- PROFILE(CodeMovingGCEvent());
+ PROFILE(isolate_, CodeMovingGCEvent());
}
VerifySymbolTable();
UpdateSurvivalRateTrend(start_new_space_size);
}
- Counters::objs_since_last_young.Set(0);
+ isolate_->counters()->objs_since_last_young()->Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
- GlobalHandles::PostGarbageCollectionProcessing();
+ isolate_->global_handles()->PostGarbageCollectionProcessing();
}
// Update relocatables.
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
- LOG(ResourceEvent("markcompact", "begin"));
+ LOG(isolate_, ResourceEvent("markcompact", "begin"));
- MarkCompactCollector::Prepare(tracer);
+ mark_compact_collector_.Prepare(tracer);
- bool is_compacting = MarkCompactCollector::IsCompacting();
+ bool is_compacting = mark_compact_collector_.IsCompacting();
if (is_compacting) {
mc_count_++;
MarkCompactPrologue(is_compacting);
- MarkCompactCollector::CollectGarbage();
+ is_safe_to_read_maps_ = false;
+ mark_compact_collector_.CollectGarbage();
+ is_safe_to_read_maps_ = true;
- LOG(ResourceEvent("markcompact", "end"));
+ LOG(isolate_, ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
Shrink();
- Counters::objs_since_last_full.Set(0);
+ isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
}
void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
- KeyedLookupCache::Clear();
- ContextSlotCache::Clear();
- DescriptorLookupCache::Clear();
+ isolate_->keyed_lookup_cache()->Clear();
+ isolate_->context_slot_cache()->Clear();
+ isolate_->descriptor_lookup_cache()->Clear();
- CompilationCache::MarkCompactPrologue();
+ isolate_->compilation_cache()->MarkCompactPrologue();
CompletelyClearInstanceofCache();
// Helper class for copying HeapObjects
class ScavengeVisitor: public ObjectVisitor {
public:
+ explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
void VisitPointer(Object** p) { ScavengePointer(p); }
private:
void ScavengePointer(Object** p) {
Object* object = *p;
- if (!Heap::InNewSpace(object)) return;
+ if (!heap_->InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
-};
-
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-class PromotionQueue {
- public:
- void Initialize(Address start_address) {
- front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
- }
-
- bool is_empty() { return front_ <= rear_; }
-
- void insert(HeapObject* target, int size) {
- *(--rear_) = reinterpret_cast<intptr_t>(target);
- *(--rear_) = size;
- // Assert no overflow into live objects.
- ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
- }
-
- void remove(HeapObject** target, int* size) {
- *target = reinterpret_cast<HeapObject*>(*(--front_));
- *size = static_cast<int>(*(--front_));
- // Assert no underflow.
- ASSERT(front_ >= rear_);
- }
-
- private:
- // The front of the queue is higher in memory than the rear.
- intptr_t* front_;
- intptr_t* rear_;
+ Heap* heap_;
};
-// Shared state read by the scavenge collector and set by ScavengeObject.
-static PromotionQueue promotion_queue;
-
-
#ifdef DEBUG
// Visitor class to verify pointers in code or data space do not point into
// new space.
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
+ ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
}
}
}
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(Heap::code_space());
+ HeapObjectIterator code_it(HEAP->code_space());
for (HeapObject* object = code_it.next();
object != NULL; object = code_it.next())
object->Iterate(&v);
- HeapObjectIterator data_it(Heap::old_data_space());
+ HeapObjectIterator data_it(HEAP->old_data_space());
for (HeapObject* object = data_it.next();
object != NULL; object = data_it.next())
object->Iterate(&v);
gc_state_ = SCAVENGE;
- Page::FlipMeaningOfInvalidatedWatermarkFlag();
+ Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
#ifdef DEBUG
VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
VerifyPageWatermarkValidity(map_space_, ALL_VALID);
map_space_->FlushTopPageWatermark();
// Implements Cheney's copying algorithm
- LOG(ResourceEvent("scavenge", "begin"));
+ LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Clear descriptor cache.
- DescriptorLookupCache::Clear();
+ isolate_->descriptor_lookup_cache()->Clear();
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSize();
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceLow();
- promotion_queue.Initialize(new_space_.ToSpaceHigh());
+ promotion_queue_.Initialize(new_space_.ToSpaceHigh());
- ScavengeVisitor scavenge_visitor;
+ is_safe_to_read_maps_ = false;
+ ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateDirtyRegions(old_pointer_space_,
- &IteratePointersInDirtyRegion,
+ &Heap::IteratePointersInDirtyRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
&UpdateNewSpaceReferenceInExternalStringTableEntry);
LiveObjectList::UpdateReferencesForScavengeGC();
- RuntimeProfiler::UpdateSamplesAfterScavenge();
+ isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
ASSERT(new_space_front == new_space_.top());
+ is_safe_to_read_maps_ = true;
+
// Set age mark.
new_space_.set_age_mark(new_space_.top());
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
- LOG(ResourceEvent("scavenge", "end"));
+ LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
}
-String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- FinalizeExternalString(String::cast(*p));
+ heap->FinalizeExternalString(String::cast(*p));
return NULL;
}
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
- ExternalStringTable::Verify();
+ external_string_table_.Verify();
- if (ExternalStringTable::new_space_strings_.is_empty()) return;
+ if (external_string_table_.new_space_strings_.is_empty()) return;
- Object** start = &ExternalStringTable::new_space_strings_[0];
- Object** end = start + ExternalStringTable::new_space_strings_.length();
+ Object** start = &external_string_table_.new_space_strings_[0];
+ Object** end = start + external_string_table_.new_space_strings_.length();
Object** last = start;
for (Object** p = start; p < end; ++p) {
- ASSERT(Heap::InFromSpace(*p));
- String* target = updater_func(p);
+ ASSERT(InFromSpace(*p));
+ String* target = updater_func(this, p);
if (target == NULL) continue;
ASSERT(target->IsExternalString());
- if (Heap::InNewSpace(target)) {
+ if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
} else {
// String got promoted. Move it to the old string list.
- ExternalStringTable::AddOldString(target);
+ external_string_table_.AddOldString(target);
}
}
ASSERT(last <= end);
- ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
+ external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
}
-static Object* ProcessFunctionWeakReferences(Object* function,
+static Object* ProcessFunctionWeakReferences(Heap* heap,
+ Object* function,
WeakObjectRetainer* retainer) {
- Object* head = Heap::undefined_value();
+ Object* head = heap->undefined_value();
JSFunction* tail = NULL;
Object* candidate = function;
- while (!candidate->IsUndefined()) {
+ while (candidate != heap->undefined_value()) {
// Check whether to keep the candidate in the list.
JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head->IsUndefined()) {
+ if (head == heap->undefined_value()) {
// First element in the list.
head = candidate_function;
} else {
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(Heap::undefined_value());
+ tail->set_next_function_link(heap->undefined_value());
}
return head;
Object* head = undefined_value();
Context* tail = NULL;
Object* candidate = global_contexts_list_;
- while (!candidate->IsUndefined()) {
+ while (candidate != undefined_value()) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head->IsUndefined()) {
+ if (head == undefined_value()) {
// First element in the list.
head = candidate_context;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+ tail->set_unchecked(this,
+ Context::NEXT_CONTEXT_LINK,
candidate_context,
UPDATE_WRITE_BARRIER);
}
// Process the weak list of optimized functions for the context.
Object* function_list_head =
ProcessFunctionWeakReferences(
+ this,
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
retainer);
- candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
+ candidate_context->set_unchecked(this,
+ Context::OPTIMIZED_FUNCTIONS_LIST,
function_list_head,
UPDATE_WRITE_BARRIER);
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+ tail->set_unchecked(this,
+ Context::NEXT_CONTEXT_LINK,
Heap::undefined_value(),
UPDATE_WRITE_BARRIER);
}
// Update the head of the list of contexts.
- Heap::global_contexts_list_ = head;
+ global_contexts_list_ = head;
}
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
- static inline void VisitPointer(Object** p) {
+ static inline void VisitPointer(Heap* heap, Object** p) {
Object* object = *p;
- if (!Heap::InNewSpace(object)) return;
+ if (!heap->InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
}
// Promote and process all the to-be-promoted objects.
- while (!promotion_queue.is_empty()) {
+ while (!promotion_queue_.is_empty()) {
HeapObject* target;
int size;
- promotion_queue.remove(&target, &size);
+ promotion_queue_.remove(&target, &size);
// Promoted object might be already partially visited
// during dirty regions iteration. Thus we search specificly
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- static void RecordCopiedObject(HeapObject* obj) {
+ static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
- if (Heap::new_space()->Contains(obj)) {
- Heap::new_space()->RecordAllocation(obj);
+ if (heap->new_space()->Contains(obj)) {
+ heap->new_space()->RecordAllocation(obj);
} else {
- Heap::new_space()->RecordPromotion(obj);
+ heap->new_space()->RecordPromotion(obj);
}
}
}
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
- INLINE(static HeapObject* MigrateObject(HeapObject* source,
+ INLINE(static HeapObject* MigrateObject(Heap* heap,
+ HeapObject* source,
HeapObject* target,
int size)) {
// Copy the content of source to target.
- Heap::CopyBlock(target->address(), source->address(), size);
+ heap->CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
- RecordCopiedObject(target);
+ RecordCopiedObject(heap, target);
#endif
- HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
+ HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
#if defined(ENABLE_LOGGING_AND_PROFILING)
- if (Logger::is_logging() || CpuProfiler::is_profiling()) {
+ Isolate* isolate = heap->isolate();
+ if (isolate->logger()->is_logging() ||
+ isolate->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
- PROFILE(SharedFunctionInfoMoveEvent(
+ PROFILE(isolate, SharedFunctionInfoMoveEvent(
source->address(), target->address()));
}
}
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
- if (Heap::ShouldBePromoted(object->address(), object_size)) {
+ Heap* heap = map->heap();
+ if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+ maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
} else {
if (object_contents == DATA_OBJECT) {
- maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
+ maybe_result = heap->old_data_space()->AllocateRaw(object_size);
} else {
- maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
+ maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
}
}
Object* result = NULL; // Initialization to please compiler.
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- *slot = MigrateObject(object, target, object_size);
+ *slot = MigrateObject(heap, object , target, object_size);
if (object_contents == POINTER_OBJECT) {
- promotion_queue.insert(target, object_size);
+ heap->promotion_queue()->insert(target, object_size);
}
- Heap::tracer()->increment_promoted_objects_size(object_size);
+ heap->tracer()->increment_promoted_objects_size(object_size);
return;
}
}
Object* result =
- Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
- *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+ heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
+ *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
return;
}
HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
- if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
+ if (ConsString::cast(object)->unchecked_second() ==
+ map->heap()->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
- if (!Heap::InNewSpace(first)) {
+ if (!map->heap()->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
+ ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
}
-void Heap::ScavengePointer(HeapObject** p) {
- ScavengeObject(p, *p);
-}
-
-
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
- reinterpret_cast<Map*>(result)->
- set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+ reinterpret_cast<Map*>(result)->set_visitor_id(
+ StaticVisitorBase::GetVisitorId(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_null_value(obj);
+ Oddball::cast(obj)->set_kind(Oddball::kNull);
// Allocate the empty descriptor array.
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
}
set_message_object_map(Map::cast(obj));
- ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+ ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
MaybeObject* Heap::CreateOddball(const char* to_string,
- Object* to_number) {
+ Object* to_number,
+ byte kind) {
Object* result;
{ MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return Oddball::cast(result)->Initialize(to_string, to_number);
+ return Oddball::cast(result)->Initialize(to_string, to_number, kind);
}
}
set_neander_map(Map::cast(obj));
- { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
if (!maybe_obj->ToObject(&obj)) return false;
}
Object* elements;
if (!maybe_obj->ToObject(&obj)) return false;
}
set_undefined_value(obj);
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
ASSERT(!InNewSpace(undefined_value()));
// Allocate initial symbol table.
// Allocate the null_value
{ MaybeObject* maybe_obj =
- Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
+ Oddball::cast(null_value())->Initialize("null",
+ Smi::FromInt(0),
+ Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
- { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
+ { MaybeObject* maybe_obj = CreateOddball("true",
+ Smi::FromInt(1),
+ Oddball::kTrue);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_true_value(obj);
- { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
+ { MaybeObject* maybe_obj = CreateOddball("false",
+ Smi::FromInt(0),
+ Oddball::kFalse);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_false_value(obj);
- { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
+ { MaybeObject* maybe_obj = CreateOddball("hole",
+ Smi::FromInt(-1),
+ Oddball::kTheHole);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_the_hole_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4));
+ Smi::FromInt(-4),
+ Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(obj);
- { MaybeObject* maybe_obj =
- CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
+ { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
+ Smi::FromInt(-2),
+ Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
- { MaybeObject* maybe_obj =
- CreateOddball("termination_exception", Smi::FromInt(-3));
+ { MaybeObject* maybe_obj = CreateOddball("termination_exception",
+ Smi::FromInt(-3),
+ Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
{ MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
if (!maybe_obj->ToObject(&obj)) return false;
}
- { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
+ { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
+ obj);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_intrinsic_function_names(StringDictionary::cast(obj));
}
set_natives_source_cache(FixedArray::cast(obj));
- // Handling of script id generation is in Factory::NewScript.
+ // Handling of script id generation is in FACTORY->NewScript.
set_last_script_id(undefined_value());
// Initialize keyed lookup cache.
- KeyedLookupCache::Clear();
+ isolate_->keyed_lookup_cache()->Clear();
// Initialize context slot cache.
- ContextSlotCache::Clear();
+ isolate_->context_slot_cache()->Clear();
// Initialize descriptor cache.
- DescriptorLookupCache::Clear();
+ isolate_->descriptor_lookup_cache()->Clear();
// Initialize compilation cache.
- CompilationCache::Clear();
+ isolate_->compilation_cache()->Clear();
return true;
}
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(i);
+ number_string_cache()->set_undefined(this, i);
}
}
MaybeObject* Heap::NumberToString(Object* number,
bool check_number_string_cache) {
- Counters::number_to_string_runtime.Increment();
+ isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
if (cached != undefined_value()) {
SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
share->set_name(name);
- Code* illegal = Builtins::builtin(Builtins::Illegal);
+ Code* illegal = isolate_->builtins()->builtin(Builtins::Illegal);
share->set_code(illegal);
share->set_scope_info(SerializedScopeInfo::Empty());
- Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* construct_stub = isolate_->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0);
share->set_length(0);
MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
+ Heap* heap,
uint32_t c1,
uint32_t c2) {
String* symbol;
// Numeric strings have a different hash algorithm not known by
// LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
+ heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
return symbol;
// Now we know the length is 2, we might as well make use of that fact
// when building the new string.
} else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
char* dest = SeqAsciiString::cast(result)->GetChars();
return result;
} else {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
uc16* dest = SeqTwoByteString::cast(result)->GetChars();
if (length == 2) {
unsigned c1 = first->Get(0);
unsigned c2 = second->Get(0);
- return MakeOrFindTwoCharacterString(c1, c2);
+ return MakeOrFindTwoCharacterString(this, c1, c2);
}
bool first_is_ascii = first->IsAsciiRepresentation();
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
- Top::context()->mark_out_of_memory();
+ isolate()->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
is_ascii_data_in_two_byte_string =
first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
if (is_ascii_data_in_two_byte_string) {
- Counters::string_add_runtime_ext_to_ascii.Increment();
+ isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
}
}
char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
+ isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
return result;
}
int length = end - start;
if (length == 1) {
- return Heap::LookupSingleCharacterStringFromCode(
- buffer->Get(start));
+ return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. Check whether we already have the string in the symbol
// table to prevent creation of many unneccesary strings.
unsigned c1 = buffer->Get(start);
unsigned c2 = buffer->Get(start + 1);
- return MakeOrFindTwoCharacterString(c1, c2);
+ return MakeOrFindTwoCharacterString(this, c1, c2);
}
// Make an attempt to flatten the buffer to reduce access time.
ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- Top::context()->mark_out_of_memory();
+ isolate()->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- Top::context()->mark_out_of_memory();
+ isolate()->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
bool is_ascii = length <= kAsciiCheckLengthLimit &&
String::IsAscii(resource->data(), static_cast<int>(length));
Map* map = is_ascii ?
- Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
+ external_string_with_ascii_data_map() : external_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
if (code <= String::kMaxAsciiCharCode) {
- Object* value = Heap::single_character_string_cache()->get(code);
- if (value != Heap::undefined_value()) return value;
+ Object* value = single_character_string_cache()->get(code);
+ if (value != undefined_value()) return value;
char buffer[1];
buffer[0] = static_cast<char>(code);
MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
if (!maybe_result->ToObject(&result)) return maybe_result;
- Heap::single_character_string_cache()->set(code, result);
+ single_character_string_cache()->set(code, result);
return result;
}
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
+ { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* answer = String::cast(result);
// Initialize the object
HeapObject::cast(result)->set_map(code_map());
Code* code = Code::cast(result);
- ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+ ASSERT(!isolate_->code_range()->exists() ||
+ isolate_->code_range()->contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
code->set_flags(flags);
CopyBlock(new_addr, old_addr, obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
- ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+ ASSERT(!isolate_->code_range()->exists() ||
+ isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
}
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
// Relocate the copy.
- ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+ ASSERT(!isolate_->code_range()->exists() ||
+ isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
#ifdef DEBUG
}
HeapObject::cast(result)->set_map(map);
#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile::RecordJSObjectAllocation(result);
+ isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
#endif
return result;
}
JSFunction::cast(callee)->shared()->strict_mode();
if (strict_mode_callee) {
boilerplate =
- Top::context()->global_context()->strict_mode_arguments_boilerplate();
+ isolate()->context()->global_context()->
+ strict_mode_arguments_boilerplate();
arguments_object_size = kArgumentsObjectSizeStrict;
} else {
- boilerplate = Top::context()->global_context()->arguments_boilerplate();
+ boilerplate =
+ isolate()->context()->global_context()->arguments_boilerplate();
arguments_object_size = kArgumentsObjectSize;
}
int instance_size = fun->shared()->CalculateInstanceSize();
int in_object_properties = fun->shared()->CalculateInObjectProperties();
Object* map_obj;
- { MaybeObject* maybe_map_obj =
- Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
+ { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
}
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
Object* value = descs->GetCallbacksObject(i);
- { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
+ { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
if (!maybe_value->ToObject(&value)) return maybe_value;
}
// Setup the global object as a normalized object.
global->set_map(new_map);
- global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
+ global->map()->set_instance_descriptors(empty_descriptor_array());
global->set_properties(dictionary);
// Make sure result is a global object with properties in dictionary.
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
- ASSERT(Heap::InNewSpace(clone));
+ ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(HeapObject::cast(clone)->address(),
}
// Return the new clone.
#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile::RecordJSObjectAllocation(clone);
+ isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
#endif
return clone;
}
// Count the number of characters in the UTF-8 string and check if
// it is an ASCII string.
Access<ScannerConstants::Utf8Decoder>
- decoder(ScannerConstants::utf8_decoder());
+ decoder(isolate_->scanner_constants()->utf8_decoder());
decoder->Reset(string.start(), string.length());
int chars = 0;
while (decoder->has_more()) {
// Find the corresponding symbol map for strings.
Map* map = string->map();
- if (map == ascii_string_map()) return ascii_symbol_map();
- if (map == string_map()) return symbol_map();
- if (map == cons_string_map()) return cons_symbol_map();
- if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
- if (map == external_string_map()) return external_symbol_map();
- if (map == external_ascii_string_map()) return external_ascii_symbol_map();
+ if (map == ascii_string_map()) {
+ return ascii_symbol_map();
+ }
+ if (map == string_map()) {
+ return symbol_map();
+ }
+ if (map == cons_string_map()) {
+ return cons_symbol_map();
+ }
+ if (map == cons_ascii_string_map()) {
+ return cons_ascii_symbol_map();
+ }
+ if (map == external_string_map()) {
+ return external_symbol_map();
+ }
+ if (map == external_ascii_string_map()) {
+ return external_ascii_symbol_map();
+ }
if (map == external_string_with_ascii_data_map()) {
return external_symbol_with_ascii_data_map();
}
{ MaybeObject* maybe_obj = AllocateRawFixedArray(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- if (Heap::InNewSpace(obj)) {
+ if (InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
dst->set_map(map);
CopyBlock(dst->address() + kPointerSize,
array->set_map(fixed_array_map());
array->set_length(length);
// Initialize body.
- ASSERT(!Heap::InNewSpace(undefined_value()));
+ ASSERT(!InNewSpace(undefined_value()));
MemsetPointer(array->data_start(), undefined_value(), length);
return result;
}
MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
+ Heap* heap,
int length,
PretenureFlag pretenure,
Object* filler) {
ASSERT(length >= 0);
- ASSERT(Heap::empty_fixed_array()->IsFixedArray());
- if (length == 0) return Heap::empty_fixed_array();
+ ASSERT(heap->empty_fixed_array()->IsFixedArray());
+ if (length == 0) return heap->empty_fixed_array();
- ASSERT(!Heap::InNewSpace(filler));
+ ASSERT(!heap->InNewSpace(filler));
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(Heap::fixed_array_map());
+ HeapObject::cast(result)->set_map(heap->fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+ return AllocateFixedArrayWithFiller(this,
+ length,
+ pretenure,
+ undefined_value());
}
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
+ return AllocateFixedArrayWithFiller(this,
+ length,
+ pretenure,
+ the_hole_value());
}
MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
MaybeObject* Heap::AllocateGlobalContext() {
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
+ AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
+ { MaybeObject* maybe_result = AllocateFixedArray(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
JSObject* extension,
bool is_catch_context) {
Object* result;
- { MaybeObject* maybe_result =
- Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
+ { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(is_catch_context ? catch_context_map() : context_map());
+ context->set_map(is_catch_context ? catch_context_map() :
+ context_map());
context->set_closure(previous->closure());
context->set_fcontext(previous->fcontext());
context->set_previous(previous);
MaybeObject* Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
-#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
+#define MAKE_CASE(NAME, Name, name) \
+ case NAME##_TYPE: map = name##_map(); break;
STRUCT_LIST(MAKE_CASE)
#undef MAKE_CASE
default:
AllocationSpace space =
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result;
- { MaybeObject* maybe_result = Heap::Allocate(map, space);
+ { MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Struct::cast(result)->InitializeBody(size);
static const int kIdlesBeforeMarkCompact = 8;
static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
static const unsigned int kGCsBetweenCleanup = 4;
- static int number_idle_notifications = 0;
- static unsigned int last_gc_count = gc_count_;
+
+ if (!last_idle_notification_gc_count_init_) {
+ last_idle_notification_gc_count_ = gc_count_;
+ last_idle_notification_gc_count_init_ = true;
+ }
bool uncommit = true;
bool finished = false;
// GCs have taken place. This allows another round of cleanup based
// on idle notifications if enough work has been carried out to
// provoke a number of garbage collections.
- if (gc_count_ - last_gc_count < kGCsBetweenCleanup) {
- number_idle_notifications =
- Min(number_idle_notifications + 1, kMaxIdleCount);
+ if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
+ number_idle_notifications_ =
+ Min(number_idle_notifications_ + 1, kMaxIdleCount);
} else {
- number_idle_notifications = 0;
- last_gc_count = gc_count_;
+ number_idle_notifications_ = 0;
+ last_idle_notification_gc_count_ = gc_count_;
}
- if (number_idle_notifications == kIdlesBeforeScavenge) {
+ if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
- HistogramTimerScope scope(&Counters::gc_context);
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(false);
} else {
CollectGarbage(NEW_SPACE);
}
new_space_.Shrink();
- last_gc_count = gc_count_;
- } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
+ last_idle_notification_gc_count_ = gc_count_;
+ } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
// Before doing the mark-sweep collections we clear the
// compilation cache to avoid hanging on to source code and
// generated code for cached functions.
- CompilationCache::Clear();
+ isolate_->compilation_cache()->Clear();
CollectAllGarbage(false);
new_space_.Shrink();
- last_gc_count = gc_count_;
+ last_idle_notification_gc_count_ = gc_count_;
- } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
+ } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
CollectAllGarbage(true);
new_space_.Shrink();
- last_gc_count = gc_count_;
+ last_idle_notification_gc_count_ = gc_count_;
+ number_idle_notifications_ = 0;
finished = true;
-
} else if (contexts_disposed_ > 0) {
if (FLAG_expose_gc) {
contexts_disposed_ = 0;
} else {
- HistogramTimerScope scope(&Counters::gc_context);
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(false);
- last_gc_count = gc_count_;
+ last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
// notification count to avoid letting idle notifications for
// context disposal garbage collections start a potentially too
// aggressive idle GC cycle.
- if (number_idle_notifications <= 1) {
- number_idle_notifications = 0;
+ if (number_idle_notifications_ <= 1) {
+ number_idle_notifications_ = 0;
uncommit = false;
}
- } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
+ } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
// If we have received more than kIdlesBeforeMarkCompact idle
// notifications we do not perform any cleanup because we don't
// expect to gain much by doing so.
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
ASSERT(contexts_disposed_ == 0);
- if (uncommit) Heap::UncommitFromSpace();
+ if (uncommit) UncommitFromSpace();
return finished;
}
void Heap::Print() {
if (!HasBeenSetup()) return;
- Top::PrintStack();
+ isolate()->PrintStack();
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
space->Print();
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
- GlobalHandles::PrintStats();
+ isolate_->global_handles()->PrintStats();
PrintF("\n");
PrintF("Heap statistics : ");
- MemoryAllocator::ReportStatistics();
+ isolate_->memory_allocator()->ReportStatistics();
PrintF("To space : ");
new_space_.ReportStatistics();
PrintF("Old pointer space : ");
Address start = page->ObjectAreaStart();
Address end = page->AllocationWatermark();
- Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
start,
end,
visit_dirty_region,
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
- Heap::InNewSpace(*slot);
+ HEAP->InNewSpace(*slot);
slot_address += kPointerSize;
}
}
#endif // DEBUG
-bool Heap::IteratePointersInDirtyRegion(Address start,
+bool Heap::IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Address slot_address = start;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (Heap::InNewSpace(*slot)) {
+ if (heap->InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
- if (Heap::InNewSpace(*slot)) {
+ if (heap->InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
pointers_to_new_space_found = true;
}
Address map_address = start;
bool pointers_to_new_space_found = false;
+ Heap* heap = HEAP;
while (map_address < end) {
- ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
ASSERT(Memory::Object_at(map_address)->IsMap());
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
- if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
+ if (Heap::IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)) {
pointers_to_new_space_found = true;
bool Heap::IteratePointersInDirtyMapsRegion(
+ Heap* heap,
Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Min(prev_map + Map::kPointerFieldsEndOffset, end);
contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(pointer_fields_start,
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(pointer_fields_start,
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (Heap::InFromSpace(*slot)) {
+ if (InFromSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
callback(reinterpret_cast<HeapObject**>(slot));
- if (Heap::InNewSpace(*slot)) {
+ if (InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
marks |= page->GetRegionMaskForAddress(slot_address);
}
Address region_end = Min(second_region, area_end);
if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
newmarks |= mask;
}
}
while (region_end <= area_end) {
if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ if (visit_dirty_region(this,
+ region_start,
+ region_end,
+ copy_object_func)) {
newmarks |= mask;
}
}
// with region end. Check whether region covering last part of area is
// dirty.
if (marks & mask) {
- if (visit_dirty_region(region_start, area_end, copy_object_func)) {
+ if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
newmarks |= mask;
}
}
v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
// Scavenge collections have special processing for this.
- ExternalStringTable::Iterate(v);
+ external_string_table_.Iterate(v);
}
v->Synchronize("external_string_table");
}
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
v->Synchronize("symbol");
- Bootstrapper::Iterate(v);
+ isolate_->bootstrapper()->Iterate(v);
v->Synchronize("bootstrapper");
- Top::Iterate(v);
+ isolate_->Iterate(v);
v->Synchronize("top");
Relocatable::Iterate(v);
v->Synchronize("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::Iterate(v);
+ isolate_->debug()->Iterate(v);
#endif
v->Synchronize("debug");
- CompilationCache::Iterate(v);
+ isolate_->compilation_cache()->Iterate(v);
v->Synchronize("compilationcache");
// Iterate over local handles in handle scopes.
- HandleScopeImplementer::Iterate(v);
+ isolate_->handle_scope_implementer()->Iterate(v);
v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
if (mode != VISIT_ALL_IN_SCAVENGE) {
- Builtins::IterateBuiltins(v);
+ isolate_->builtins()->IterateBuiltins(v);
}
v->Synchronize("builtins");
// Iterate over global handles.
if (mode == VISIT_ONLY_STRONG) {
- GlobalHandles::IterateStrongRoots(v);
+ isolate_->global_handles()->IterateStrongRoots(v);
} else {
- GlobalHandles::IterateAllRoots(v);
+ isolate_->global_handles()->IterateAllRoots(v);
}
v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads.
- ThreadManager::Iterate(v);
+ isolate_->thread_manager()->Iterate(v);
v->Synchronize("threadmanager");
// Iterate over the pointers the Serialization/Deserialization code is
}
-// Flag is set when the heap has been configured. The heap can be repeatedly
-// configured through the API until it is setup.
-static bool heap_configured = false;
-
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
// The old generation is paged.
max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
- heap_configured = true;
+ configured_ = true;
return true;
}
*stats->cell_space_size = cell_space_->Size();
*stats->cell_space_capacity = cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
- GlobalHandles::RecordStats(stats);
- *stats->memory_allocator_size = MemoryAllocator::Size();
+ isolate_->global_handles()->RecordStats(stats);
+ *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
*stats->memory_allocator_capacity =
- MemoryAllocator::Size() + MemoryAllocator::Available();
+ isolate()->memory_allocator()->Size() +
+ isolate()->memory_allocator()->Available();
*stats->os_error = OS::GetLastError();
+ isolate()->memory_allocator()->Available();
if (take_snapshot) {
HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
for (HeapObject* obj = iterator.next();
- amount_of_external_allocated_memory_at_last_global_gc_;
}
+#ifdef DEBUG
+
+// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+static const int kMarkTag = 2;
+
+
+class HeapDebugUtils {
+ public:
+ explicit HeapDebugUtils(Heap* heap)
+ : search_for_any_global_(false),
+ search_target_(NULL),
+ found_target_(false),
+ object_stack_(20),
+ heap_(heap) {
+ }
+
+ class MarkObjectVisitor : public ObjectVisitor {
+ public:
+ explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->MarkObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+ void MarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (!map->IsHeapObject()) return; // visited before
+
+ if (found_target_) return; // stop if target found
+ object_stack_.Add(obj);
+ if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
+ (!search_for_any_global_ && (obj == search_target_))) {
+ found_target_ = true;
+ return;
+ }
+
+ // not visited yet
+ Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
+
+ Address map_addr = map_p->address();
+
+ obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+
+ MarkObjectRecursively(&map);
+
+ MarkObjectVisitor mark_visitor(this);
+
+ obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
+ &mark_visitor);
+
+ if (!found_target_) // don't pop if found the target
+ object_stack_.RemoveLast();
+ }
+
+
+ class UnmarkObjectVisitor : public ObjectVisitor {
+ public:
+ explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->UnmarkObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+
+ void UnmarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (map->IsHeapObject()) return; // unmarked already
+
+ Address map_addr = reinterpret_cast<Address>(map);
+
+ map_addr -= kMarkTag;
+
+ ASSERT_TAG_ALIGNED(map_addr);
+
+ HeapObject* map_p = HeapObject::FromAddress(map_addr);
+
+ obj->set_map(reinterpret_cast<Map*>(map_p));
+
+ UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
+
+ UnmarkObjectVisitor unmark_visitor(this);
+
+ obj->IterateBody(Map::cast(map_p)->instance_type(),
+ obj->SizeFromMap(Map::cast(map_p)),
+ &unmark_visitor);
+ }
+
+
+ void MarkRootObjectRecursively(Object** root) {
+ if (search_for_any_global_) {
+ ASSERT(search_target_ == NULL);
+ } else {
+ ASSERT(search_target_->IsHeapObject());
+ }
+ found_target_ = false;
+ object_stack_.Clear();
+
+ MarkObjectRecursively(root);
+ UnmarkObjectRecursively(root);
+
+ if (found_target_) {
+ PrintF("=====================================\n");
+ PrintF("==== Path to object ====\n");
+ PrintF("=====================================\n\n");
+
+ ASSERT(!object_stack_.is_empty());
+ for (int i = 0; i < object_stack_.length(); i++) {
+ if (i > 0) PrintF("\n |\n |\n V\n\n");
+ Object* obj = object_stack_[i];
+ obj->Print();
+ }
+ PrintF("=====================================\n");
+ }
+ }
+
+ // Helper class for visiting HeapObjects recursively.
+ class MarkRootVisitor: public ObjectVisitor {
+ public:
+ explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Visit all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->MarkRootObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+ bool search_for_any_global_;
+ Object* search_target_;
+ bool found_target_;
+ List<Object*> object_stack_;
+ Heap* heap_;
+
+ friend class Heap;
+};
+
+#endif
bool Heap::Setup(bool create_heap_objects) {
+#ifdef DEBUG
+ debug_utils_ = new HeapDebugUtils(this);
+#endif
+
// Initialize heap spaces and initial maps and objects. Whenever something
// goes wrong, just return false. The caller should check the results and
// call Heap::TearDown() to release allocated memory.
// Configuration is based on the flags new-space-size (really the semispace
// size) and old-space-size if set or the initial values of semispace_size_
// and old_generation_size_ otherwise.
- if (!heap_configured) {
+ if (!configured_) {
if (!ConfigureHeapDefault()) return false;
}
- ScavengingVisitor::Initialize();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
+ gc_initializer_mutex->Lock();
+ static bool initialized_gc = false;
+ if (!initialized_gc) {
+ initialized_gc = true;
+ ScavengingVisitor::Initialize();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
+ }
+ gc_initializer_mutex->Unlock();
MarkMapPointersAsEncoded(false);
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
// are contiguous and aligned to their size.
- if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
+ if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
+ return false;
void* chunk =
- MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
+ isolate_->memory_allocator()->ReserveInitialChunk(
+ 4 * reserved_semispace_size_);
if (chunk == NULL) return false;
// Align the pair of semispaces to their size, which must be a power
// Initialize old pointer space.
old_pointer_space_ =
- new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
+ new OldSpace(this,
+ max_old_generation_size_,
+ OLD_POINTER_SPACE,
+ NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->Setup(NULL, 0)) return false;
// Initialize old data space.
old_data_space_ =
- new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
+ new OldSpace(this,
+ max_old_generation_size_,
+ OLD_DATA_SPACE,
+ NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->Setup(NULL, 0)) return false;
// On 64-bit platform(s), we put all code objects in a 2 GB range of
// virtual address space, so that they can call each other with near calls.
if (code_range_size_ > 0) {
- if (!CodeRange::Setup(code_range_size_)) {
+ if (!isolate_->code_range()->Setup(code_range_size_)) {
return false;
}
}
code_space_ =
- new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+ new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(FLAG_use_big_map_space
+ map_space_ = new MapSpace(this, FLAG_use_big_map_space
? max_old_generation_size_
: MapSpace::kMaxMapPageIndex * Page::kPageSize,
FLAG_max_map_space_pages,
if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
- cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
+ cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(LO_SPACE);
+ lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
global_contexts_list_ = undefined_value();
}
- LOG(IntPtrTEvent("heap-capacity", Capacity()));
- LOG(IntPtrTEvent("heap-available", Available()));
+ LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
+ LOG(isolate_, IntPtrTEvent("heap-available", Available()));
#ifdef ENABLE_LOGGING_AND_PROFILING
// This should be called only after initial objects have been created.
- ProducerHeapProfile::Setup();
+ isolate_->producer_heap_profile()->Setup();
#endif
return true;
void Heap::SetStackLimits() {
+ ASSERT(isolate_ != NULL);
+ ASSERT(isolate_ == isolate());
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
// These are actually addresses, but the tag makes the GC ignore it.
roots_[kStackLimitRootIndex] =
reinterpret_cast<Object*>(
- (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
+ (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
roots_[kRealStackLimitRootIndex] =
reinterpret_cast<Object*>(
- (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
+ (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
PrintF("mark_compact_count=%d ", mc_count_);
- PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
- PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
+ PrintF("max_gc_pause=%d ", get_max_gc_pause());
+ PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
- GCTracer::get_max_alive_after_gc());
+ get_max_alive_after_gc());
PrintF("\n\n");
}
- GlobalHandles::TearDown();
+ isolate_->global_handles()->TearDown();
- ExternalStringTable::TearDown();
+ external_string_table_.TearDown();
new_space_.TearDown();
lo_space_ = NULL;
}
- MemoryAllocator::TearDown();
+ isolate_->memory_allocator()->TearDown();
+
+#ifdef DEBUG
+ delete debug_utils_;
+ debug_utils_ = NULL;
+#endif
}
void Heap::PrintHandles() {
PrintF("Handles:\n");
PrintHandleVisitor v;
- HandleScopeImplementer::Iterate(&v);
+ isolate_->handle_scope_implementer()->Iterate(&v);
}
#endif
Space* AllSpaces::next() {
switch (counter_++) {
case NEW_SPACE:
- return Heap::new_space();
+ return HEAP->new_space();
case OLD_POINTER_SPACE:
- return Heap::old_pointer_space();
+ return HEAP->old_pointer_space();
case OLD_DATA_SPACE:
- return Heap::old_data_space();
+ return HEAP->old_data_space();
case CODE_SPACE:
- return Heap::code_space();
+ return HEAP->code_space();
case MAP_SPACE:
- return Heap::map_space();
+ return HEAP->map_space();
case CELL_SPACE:
- return Heap::cell_space();
+ return HEAP->cell_space();
case LO_SPACE:
- return Heap::lo_space();
+ return HEAP->lo_space();
default:
return NULL;
}
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return Heap::old_pointer_space();
+ return HEAP->old_pointer_space();
case OLD_DATA_SPACE:
- return Heap::old_data_space();
+ return HEAP->old_data_space();
case CODE_SPACE:
- return Heap::code_space();
+ return HEAP->code_space();
case MAP_SPACE:
- return Heap::map_space();
+ return HEAP->map_space();
case CELL_SPACE:
- return Heap::cell_space();
+ return HEAP->cell_space();
default:
return NULL;
}
OldSpace* OldSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return Heap::old_pointer_space();
+ return HEAP->old_pointer_space();
case OLD_DATA_SPACE:
- return Heap::old_data_space();
+ return HEAP->old_data_space();
case CODE_SPACE:
- return Heap::code_space();
+ return HEAP->code_space();
default:
return NULL;
}
switch (current_space_) {
case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(Heap::new_space(), size_func_);
+ iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
break;
case OLD_POINTER_SPACE:
- iterator_ = new HeapObjectIterator(Heap::old_pointer_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
break;
case OLD_DATA_SPACE:
- iterator_ = new HeapObjectIterator(Heap::old_data_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
break;
case CODE_SPACE:
- iterator_ = new HeapObjectIterator(Heap::code_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
break;
case MAP_SPACE:
- iterator_ = new HeapObjectIterator(Heap::map_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
break;
case CELL_SPACE:
- iterator_ = new HeapObjectIterator(Heap::cell_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
break;
case LO_SPACE:
- iterator_ = new LargeObjectIterator(Heap::lo_space(), size_func_);
+ iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
break;
}
private:
void MarkFreeListNodes() {
- Heap::old_pointer_space()->MarkFreeListNodes();
- Heap::old_data_space()->MarkFreeListNodes();
- MarkCodeSpaceFreeListNodes();
- Heap::map_space()->MarkFreeListNodes();
- Heap::cell_space()->MarkFreeListNodes();
+ Heap* heap = HEAP;
+ heap->old_pointer_space()->MarkFreeListNodes();
+ heap->old_data_space()->MarkFreeListNodes();
+ MarkCodeSpaceFreeListNodes(heap);
+ heap->map_space()->MarkFreeListNodes();
+ heap->cell_space()->MarkFreeListNodes();
}
- void MarkCodeSpaceFreeListNodes() {
+ void MarkCodeSpaceFreeListNodes(Heap* heap) {
// For code space, using FreeListNode::IsFreeListNode is OK.
- HeapObjectIterator iter(Heap::code_space());
+ HeapObjectIterator iter(heap->code_space());
for (HeapObject* obj = iter.next_object();
obj != NULL;
obj = iter.next_object()) {
obj->SetMark();
}
UnmarkingVisitor visitor;
- Heap::IterateRoots(&visitor, VISIT_ALL);
+ HEAP->IterateRoots(&visitor, VISIT_ALL);
while (visitor.can_process())
visitor.ProcessNext();
}
}
-GCTracer::GCTracer()
+GCTracer::GCTracer(Heap* heap)
: start_time_(0.0),
start_size_(0),
gc_count_(0),
marked_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
- promoted_objects_size_(0) {
+ promoted_objects_size_(0),
+ heap_(heap) {
// These two fields reflect the state of the previous full collection.
// Set them before they are changed by the collector.
- previous_has_compacted_ = MarkCompactCollector::HasCompacted();
- previous_marked_count_ = MarkCompactCollector::previous_marked_count();
+ previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
+ previous_marked_count_ =
+ heap_->mark_compact_collector_.previous_marked_count();
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
- start_size_ = Heap::SizeOfObjects();
+ start_size_ = heap_->SizeOfObjects();
for (int i = 0; i < Scope::kNumberOfScopes; i++) {
scopes_[i] = 0;
in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
- allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
+ allocated_since_last_gc_ =
+ heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
- if (last_gc_end_timestamp_ > 0) {
- spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
+ if (heap_->last_gc_end_timestamp_ > 0) {
+ spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
}
}
// Printf ONE line iff flag is set.
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
- bool first_gc = (last_gc_end_timestamp_ == 0);
+ bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
- alive_after_last_gc_ = Heap::SizeOfObjects();
- last_gc_end_timestamp_ = OS::TimeCurrentMillis();
+ heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
+ heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
- int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
+ int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
- max_gc_pause_ = Max(max_gc_pause_, time);
- max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
+ heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
+ heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
+ heap_->alive_after_last_gc_);
if (!first_gc) {
- min_in_mutator_ = Min(min_in_mutator_,
- static_cast<int>(spent_in_mutator_));
+ heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
+ static_cast<int>(spent_in_mutator_));
}
}
PrintF("s");
break;
case MARK_COMPACTOR:
- PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
+ PrintF("%s",
+ heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
break;
default:
UNREACHABLE();
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
- PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
+ PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
in_free_list_or_wasted_before_gc_);
PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
}
#if defined(ENABLE_LOGGING_AND_PROFILING)
- Heap::PrintShortHeapStatistics();
+ heap_->PrintShortHeapStatistics();
#endif
}
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
- return MarkCompactCollector::HasCompacted() ? "Mark-compact"
- : "Mark-sweep";
+ return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
+ : "Mark-sweep";
}
return "Unknown GC";
}
if ((key.map == map) && key.name->Equals(name)) {
return field_offsets_[index];
}
- return -1;
+ return kNotFound;
}
void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
String* symbol;
- if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = Hash(map, symbol);
Key& key = keys_[index];
key.map = map;
}
-KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
-
-
-int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
-
-
void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
}
-DescriptorLookupCache::Key
-DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
-
-int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
-
-
#ifdef DEBUG
void Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
- if (Bootstrapper::IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
if (disallow_allocation_failure()) return;
CollectGarbage(NEW_SPACE);
}
#endif
-TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
- : type_(t) {
+TranscendentalCache::SubCache::SubCache(Type t)
+ : type_(t),
+ isolate_(Isolate::Current()) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
}
-TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
-
-
void TranscendentalCache::Clear() {
for (int i = 0; i < kNumberOfCaches; i++) {
if (caches_[i] != NULL) {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
- if (Heap::InNewSpace(new_space_strings_[i])) {
+ if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+ if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
old_space_strings_.Add(new_space_strings_[i]);
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
- ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
+ if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+ ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
}
-List<Object*> ExternalStringTable::new_space_strings_;
-List<Object*> ExternalStringTable::old_space_strings_;
-
} } // namespace v8::internal
#include "globals.h"
#include "list.h"
+#include "mark-compact.h"
#include "spaces.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
namespace v8 {
namespace internal {
+// TODO(isolates): remove HEAP here
+#define HEAP (_inline_get_heap_())
+class Heap;
+inline Heap* _inline_get_heap_();
+
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
// Forward declarations.
class GCTracer;
class HeapStats;
+class Isolate;
class WeakObjectRetainer;
-typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
+typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+ Object** pointer);
-typedef bool (*DirtyRegionCallback)(Address start,
+typedef bool (*DirtyRegionCallback)(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback copy_object_func);
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
-class Heap : public AllStatic {
+#ifdef DEBUG
+class HeapDebugUtils;
+#endif
+
+
+// A queue of objects promoted during scavenge. Each object is accompanied
+// by it's size to avoid dereferencing a map pointer for scanning.
+class PromotionQueue {
+ public:
+ PromotionQueue() : front_(NULL), rear_(NULL) { }
+
+ void Initialize(Address start_address) {
+ front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+ }
+
+ bool is_empty() { return front_ <= rear_; }
+
+ inline void insert(HeapObject* target, int size);
+
+ void remove(HeapObject** target, int* size) {
+ *target = reinterpret_cast<HeapObject*>(*(--front_));
+ *size = static_cast<int>(*(--front_));
+ // Assert no underflow.
+ ASSERT(front_ >= rear_);
+ }
+
+ private:
+ // The front of the queue is higher in memory than the rear.
+ intptr_t* front_;
+ intptr_t* rear_;
+
+ DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
+};
+
+
+// External strings table is a place where all external strings are
+// registered. We need to keep track of such strings to properly
+// finalize them.
+class ExternalStringTable {
+ public:
+ // Registers an external string.
+ inline void AddString(String* string);
+
+ inline void Iterate(ObjectVisitor* v);
+
+ // Restores internal invariant and gets rid of collected strings.
+ // Must be called after each Iterate() that modified the strings.
+ void CleanUp();
+
+ // Destroys all allocated memory.
+ void TearDown();
+
+ private:
+ ExternalStringTable() { }
+
+ friend class Heap;
+
+ inline void Verify();
+
+ inline void AddOldString(String* string);
+
+ // Notifies the table that only a prefix of the new list is valid.
+ inline void ShrinkNewStrings(int position);
+
+ // To speed up scavenge collections new space string are kept
+ // separate from old space strings.
+ List<Object*> new_space_strings_;
+ List<Object*> old_space_strings_;
+
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
+};
+
+
+class Heap {
public:
// Configure heap size before setup. Return false if the heap has been
// setup already.
- static bool ConfigureHeap(int max_semispace_size,
- int max_old_gen_size,
- int max_executable_size);
- static bool ConfigureHeapDefault();
+ bool ConfigureHeap(int max_semispace_size,
+ int max_old_gen_size,
+ int max_executable_size);
+ bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
// also creates the basic non-mutable objects.
// Returns whether it succeeded.
- static bool Setup(bool create_heap_objects);
+ bool Setup(bool create_heap_objects);
// Destroys all memory allocated by the heap.
- static void TearDown();
+ void TearDown();
// Set the stack limit in the roots_ array. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
- static void SetStackLimits();
+ void SetStackLimits();
// Returns whether Setup has been called.
- static bool HasBeenSetup();
+ bool HasBeenSetup();
// Returns the maximum amount of memory reserved for the heap. For
// the young generation, we reserve 4 times the amount needed for a
// semi space. The young generation consists of two semi spaces and
// we reserve twice the amount needed for those in order to ensure
// that new space can be aligned to its size.
- static intptr_t MaxReserved() {
+ intptr_t MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_;
}
- static int MaxSemiSpaceSize() { return max_semispace_size_; }
- static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
- static int InitialSemiSpaceSize() { return initial_semispace_size_; }
- static intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- static intptr_t MaxExecutableSize() { return max_executable_size_; }
+ int MaxSemiSpaceSize() { return max_semispace_size_; }
+ int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+ int InitialSemiSpaceSize() { return initial_semispace_size_; }
+ intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ intptr_t MaxExecutableSize() { return max_executable_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
- static intptr_t Capacity();
+ intptr_t Capacity();
// Returns the amount of memory currently committed for the heap.
- static intptr_t CommittedMemory();
+ intptr_t CommittedMemory();
// Returns the amount of executable memory currently committed for the heap.
- static intptr_t CommittedMemoryExecutable();
+ intptr_t CommittedMemoryExecutable();
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
- static intptr_t Available();
+ intptr_t Available();
// Returns the maximum object size in paged space.
- static inline int MaxObjectSizeInPagedSpace();
+ inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
- static intptr_t SizeOfObjects();
+ intptr_t SizeOfObjects();
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
- static Address NewSpaceStart() { return new_space_.start(); }
- static uintptr_t NewSpaceMask() { return new_space_.mask(); }
- static Address NewSpaceTop() { return new_space_.top(); }
-
- static NewSpace* new_space() { return &new_space_; }
- static OldSpace* old_pointer_space() { return old_pointer_space_; }
- static OldSpace* old_data_space() { return old_data_space_; }
- static OldSpace* code_space() { return code_space_; }
- static MapSpace* map_space() { return map_space_; }
- static CellSpace* cell_space() { return cell_space_; }
- static LargeObjectSpace* lo_space() { return lo_space_; }
-
- static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
- static Address always_allocate_scope_depth_address() {
+ Address NewSpaceStart() { return new_space_.start(); }
+ uintptr_t NewSpaceMask() { return new_space_.mask(); }
+ Address NewSpaceTop() { return new_space_.top(); }
+
+ NewSpace* new_space() { return &new_space_; }
+ OldSpace* old_pointer_space() { return old_pointer_space_; }
+ OldSpace* old_data_space() { return old_data_space_; }
+ OldSpace* code_space() { return code_space_; }
+ MapSpace* map_space() { return map_space_; }
+ CellSpace* cell_space() { return cell_space_; }
+ LargeObjectSpace* lo_space() { return lo_space_; }
+
+ bool always_allocate() { return always_allocate_scope_depth_ != 0; }
+ Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
- static bool linear_allocation() {
+ bool linear_allocation() {
return linear_allocation_scope_depth_ != 0;
}
- static Address* NewSpaceAllocationTopAddress() {
+ Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
}
- static Address* NewSpaceAllocationLimitAddress() {
+ Address* NewSpaceAllocationLimitAddress() {
return new_space_.allocation_limit_address();
}
// Uncommit unused semi space.
- static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+ bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the heap by marking all spaces read-only/writable.
- static void Protect();
- static void Unprotect();
+ void Protect();
+ void Unprotect();
#endif
// Allocates and initializes a new JavaScript object based on a
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSObject(
+ MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateGlobalObject(
- JSFunction* constructor);
+ MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT static MaybeObject* CopyJSObject(JSObject* source);
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFunctionPrototype(
- JSFunction* function);
+ MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
// object that has been freshly allocated using the constructor.
- MUST_USE_RESULT static MaybeObject* ReinitializeJSGlobalProxy(
- JSFunction* constructor,
- JSGlobalProxy* global);
+ MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy(
+ JSFunction* constructor, JSGlobalProxy* global);
// Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSObjectFromMap(
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
Map* map, PretenureFlag pretenure = NOT_TENURED);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateMap(InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
+ int instance_size);
// Allocates a partial map for bootstrapping.
- MUST_USE_RESULT static MaybeObject* AllocatePartialMap(
- InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
+ int instance_size);
// Allocate a map for the specified function
- MUST_USE_RESULT static MaybeObject* AllocateInitialMap(JSFunction* fun);
+ MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
// Allocates an empty code cache.
- MUST_USE_RESULT static MaybeObject* AllocateCodeCache();
+ MUST_USE_RESULT MaybeObject* AllocateCodeCache();
// Clear the Instanceof cache (used when a prototype changes).
- static void ClearInstanceofCache() {
- set_instanceof_cache_function(the_hole_value());
- }
+ inline void ClearInstanceofCache();
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateStringFromAscii(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static inline MaybeObject* AllocateStringFromUtf8(
+ MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static MaybeObject* AllocateStringFromUtf8Slow(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static MaybeObject* AllocateStringFromTwoByte(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* AllocateSymbol(
- Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
- MUST_USE_RESULT static inline MaybeObject* AllocateAsciiSymbol(
+ MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol(
Vector<const char> str,
uint32_t hash_field);
- MUST_USE_RESULT static inline MaybeObject* AllocateTwoByteSymbol(
+ MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol(
Vector<const uc16> str,
uint32_t hash_field);
- MUST_USE_RESULT static MaybeObject* AllocateInternalSymbol(
+ MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
- MUST_USE_RESULT static MaybeObject* AllocateExternalSymbol(
+ MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
Vector<const char> str,
int chars);
-
// Allocates and partially initializes a String. There are two String
// encodings: ASCII and two byte. These functions allocate a string of the
// given length and set its map and length fields. The characters of the
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateRawAsciiString(
+ MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static MaybeObject* AllocateRawTwoByteString(
+ MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
// A cache is used for ascii codes.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* LookupSingleCharacterStringFromCode(
+ MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
uint16_t code);
// Allocate a byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
+ PretenureFlag pretenure);
// Allocate a non-tenured byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateByteArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateExternalArray(
+ MUST_USE_RESULT MaybeObject* AllocateExternalArray(
int length,
ExternalArrayType array_type,
void* external_pointer,
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSGlobalPropertyCell(
- Object* value);
+ MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFixedArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
+ PretenureFlag pretenure);
// Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT static MaybeObject* AllocateFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateUninitializedFixedArray(
- int length);
+ MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT static inline MaybeObject* CopyFixedArray(FixedArray* src);
+ MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
// Make a copy of src, set the map, and return the copy. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT static MaybeObject* CopyFixedArrayWithMap(FixedArray* src,
- Map* map);
+ MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithHoles(
+ MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles(
int length,
PretenureFlag pretenure = NOT_TENURED);
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
- MUST_USE_RESULT static MaybeObject* AllocateHashTable(
+ MUST_USE_RESULT MaybeObject* AllocateHashTable(
int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
- MUST_USE_RESULT static MaybeObject* AllocateGlobalContext();
+ MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
// Allocate a function context.
- MUST_USE_RESULT static MaybeObject* AllocateFunctionContext(
- int length,
- JSFunction* closure);
+ MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
+ JSFunction* closure);
// Allocate a 'with' context.
- MUST_USE_RESULT static MaybeObject* AllocateWithContext(
- Context* previous,
- JSObject* extension,
- bool is_catch_context);
+ MUST_USE_RESULT MaybeObject* AllocateWithContext(Context* previous,
+ JSObject* extension,
+ bool is_catch_context);
// Allocates a new utility object in the old generation.
- MUST_USE_RESULT static MaybeObject* AllocateStruct(InstanceType type);
+ MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
// Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFunction(
+ MUST_USE_RESULT MaybeObject* AllocateFunction(
Map* function_map,
SharedFunctionInfo* shared,
Object* prototype,
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateArgumentsObject(Object* callee,
- int length);
+ MUST_USE_RESULT MaybeObject* AllocateArgumentsObject(
+ Object* callee, int length);
// Same as NewNumberFromDouble, but may return a preallocated/immutable
// number object (e.g., minus_zero_value_, nan_value_)
- MUST_USE_RESULT static MaybeObject* NumberFromDouble(
+ MUST_USE_RESULT MaybeObject* NumberFromDouble(
double value, PretenureFlag pretenure = NOT_TENURED);
// Allocated a HeapNumber from value.
- MUST_USE_RESULT static MaybeObject* AllocateHeapNumber(
+ MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
double value,
PretenureFlag pretenure);
- // pretenure = NOT_TENURED.
- MUST_USE_RESULT static MaybeObject* AllocateHeapNumber(double value);
+ // pretenure = NOT_TENURED
+ MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* NumberFromInt32(int32_t value);
+ MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* NumberFromUint32(uint32_t value);
+ MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
// Allocates a new proxy object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateProxy(
- Address proxy,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateProxy(
+ Address proxy, PretenureFlag pretenure = NOT_TENURED);
// Allocates a new SharedFunctionInfo object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateSharedFunctionInfo(Object* name);
+ MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
// Allocates a new JSMessageObject object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note that this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSMessageObject(
+ MUST_USE_RESULT MaybeObject* AllocateJSMessageObject(
String* type,
JSArray* arguments,
int start_position,
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateConsString(String* first,
- String* second);
+ MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
+ String* second);
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateSubString(
+ MUST_USE_RESULT MaybeObject* AllocateSubString(
String* buffer,
int start,
int end,
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateExternalStringFromAscii(
+ MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
- MUST_USE_RESULT static MaybeObject* AllocateExternalStringFromTwoByte(
+ MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
- static inline void FinalizeExternalString(String* string);
+ inline void FinalizeExternalString(String* string);
// Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* AllocateRaw(
- int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes,
+ AllocationSpace space,
+ AllocationSpace retry_space);
// Initialize a filler object to keep the ability to iterate over the heap
// when shortening objects.
- static void CreateFillerObjectAt(Address addr, int size);
+ void CreateFillerObjectAt(Address addr, int size);
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
+ MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false);
- MUST_USE_RESULT static MaybeObject* CopyCode(Code* code);
+ MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
- MUST_USE_RESULT static MaybeObject* CopyCode(Code* code,
- Vector<byte> reloc_info);
+ MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
// Finds the symbol for string in the symbol table.
// If not found, a new symbol is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* LookupSymbol(Vector<const char> str);
- MUST_USE_RESULT static MaybeObject* LookupAsciiSymbol(Vector<const char> str);
- MUST_USE_RESULT static MaybeObject* LookupTwoByteSymbol(
+ MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
+ MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(
Vector<const uc16> str);
- MUST_USE_RESULT static MaybeObject* LookupAsciiSymbol(const char* str) {
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
- MUST_USE_RESULT static MaybeObject* LookupSymbol(String* str);
- static bool LookupSymbolIfExists(String* str, String** symbol);
- static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
+ MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
+ bool LookupSymbolIfExists(String* str, String** symbol);
+ bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
// Compute the matching symbol map for a string if possible.
// NULL is returned if string is in new space or not flattened.
- static Map* SymbolMapForString(String* str);
+ Map* SymbolMapForString(String* str);
// Tries to flatten a string before compare operation.
//
// string might stay non-flat even when not a failure is returned.
//
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* PrepareForCompare(String* str);
+ MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str);
// Converts the given boolean condition to JavaScript boolean value.
- static Object* ToBoolean(bool condition) {
- return condition ? true_value() : false_value();
- }
+ inline Object* ToBoolean(bool condition);
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
- static void GarbageCollectionPrologue();
- static void GarbageCollectionEpilogue();
+ void GarbageCollectionPrologue();
+ void GarbageCollectionEpilogue();
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- static bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
+ bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- inline static bool CollectGarbage(AllocationSpace space);
+ inline bool CollectGarbage(AllocationSpace space);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
- static void CollectAllGarbage(bool force_compaction);
+ void CollectAllGarbage(bool force_compaction);
// Last hope GC, should try to squeeze as much as possible.
- static void CollectAllAvailableGarbage();
+ void CollectAllAvailableGarbage();
// Notify the heap that a context has been disposed.
- static int NotifyContextDisposed() { return ++contexts_disposed_; }
+ int NotifyContextDisposed() { return ++contexts_disposed_; }
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
- static void PerformScavenge();
+ void PerformScavenge();
+
+ PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG
// Utility used with flag gc-greedy.
- static void GarbageCollectionGreedyCheck();
+ void GarbageCollectionGreedyCheck();
#endif
- static void AddGCPrologueCallback(
+ void AddGCPrologueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
- static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
+ void RemoveGCPrologueCallback(GCEpilogueCallback callback);
- static void AddGCEpilogueCallback(
+ void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
- static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
- static void SetGlobalGCPrologueCallback(GCCallback callback) {
+ void SetGlobalGCPrologueCallback(GCCallback callback) {
ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
global_gc_prologue_callback_ = callback;
}
- static void SetGlobalGCEpilogueCallback(GCCallback callback) {
+ void SetGlobalGCEpilogueCallback(GCCallback callback) {
ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
global_gc_epilogue_callback_ = callback;
}
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
#define ROOT_ACCESSOR(type, name, camel_name) \
- static inline type* name() { \
+ type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
} \
- static inline type* raw_unchecked_##name() { \
+ type* raw_unchecked_##name() { \
return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
}
ROOT_LIST(ROOT_ACCESSOR)
// Utility type maps
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- static inline Map* name##_map() { \
+ Map* name##_map() { \
return Map::cast(roots_[k##Name##MapRootIndex]); \
}
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) static inline String* name() { \
+#define SYMBOL_ACCESSOR(name, str) String* name() { \
return String::cast(roots_[k##name##RootIndex]); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
// The hidden_symbol is special because it is the empty string, but does
// not match the empty string.
- static String* hidden_symbol() { return hidden_symbol_; }
+ String* hidden_symbol() { return hidden_symbol_; }
- static void set_global_contexts_list(Object* object) {
+ void set_global_contexts_list(Object* object) {
global_contexts_list_ = object;
}
- static Object* global_contexts_list() { return global_contexts_list_; }
+ Object* global_contexts_list() { return global_contexts_list_; }
// Iterates over all roots in the heap.
- static void IterateRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
- static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all the other roots in the heap.
- static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
- static void IterateDirtyRegions(
+ void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
- static uint32_t IterateDirtyRegions(uint32_t marks,
- Address start,
- Address end,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback callback);
+ uint32_t IterateDirtyRegions(uint32_t marks,
+ Address start,
+ Address end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback);
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
// Update dirty marks for page containing start address.
- static void IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback);
+ void IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
- static bool IteratePointersInDirtyRegion(Address start,
+ static bool IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
- static bool IteratePointersInDirtyMapsRegion(Address start,
+ static bool IteratePointersInDirtyMapsRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
- static inline bool InNewSpace(Object* object);
- static inline bool InFromSpace(Object* object);
- static inline bool InToSpace(Object* object);
+ inline bool InNewSpace(Object* object);
+ inline bool InFromSpace(Object* object);
+ inline bool InToSpace(Object* object);
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
- static bool Contains(Address addr);
- static bool Contains(HeapObject* value);
+ bool Contains(Address addr);
+ bool Contains(HeapObject* value);
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- static bool InSpace(Address addr, AllocationSpace space);
- static bool InSpace(HeapObject* value, AllocationSpace space);
+ bool InSpace(Address addr, AllocationSpace space);
+ bool InSpace(HeapObject* value, AllocationSpace space);
// Finds out which space an object should get promoted to based on its type.
- static inline OldSpace* TargetSpace(HeapObject* object);
- static inline AllocationSpace TargetSpaceId(InstanceType type);
+ inline OldSpace* TargetSpace(HeapObject* object);
+ inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
- static void public_set_code_stubs(NumberDictionary* value) {
+ void public_set_code_stubs(NumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
// Support for computing object sizes for old objects during GCs. Returns
// a function that is guaranteed to be safe for computing object sizes in
// the current GC phase.
- static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+ HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
return gc_safe_size_of_old_object_;
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- static void public_set_non_monomorphic_cache(NumberDictionary* value) {
+ void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
}
- static void public_set_empty_script(Script* script) {
+ void public_set_empty_script(Script* script) {
roots_[kEmptyScriptRootIndex] = script;
}
// Update the next script id.
- static inline void SetLastScriptId(Object* last_script_id);
+ inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
- static Object** roots_address() { return roots_; }
+ Object** roots_address() { return roots_; }
// Get address of global contexts list for serialization support.
- static Object** global_contexts_list_address() {
+ Object** global_contexts_list_address() {
return &global_contexts_list_;
}
#ifdef DEBUG
- static void Print();
- static void PrintHandles();
+ void Print();
+ void PrintHandles();
// Verify the heap is in its normal state before or after a GC.
- static void Verify();
+ void Verify();
// Report heap statistics.
- static void ReportHeapStatistics(const char* title);
- static void ReportCodeStatistics(const char* title);
+ void ReportHeapStatistics(const char* title);
+ void ReportCodeStatistics(const char* title);
// Fill in bogus values in from space
- static void ZapFromSpace();
+ void ZapFromSpace();
#endif
#if defined(ENABLE_LOGGING_AND_PROFILING)
// Print short heap statistics.
- static void PrintShortHeapStatistics();
+ void PrintShortHeapStatistics();
#endif
// Makes a new symbol object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* CreateSymbol(const char* str,
- int length,
- int hash);
- MUST_USE_RESULT static MaybeObject* CreateSymbol(String* str);
+ MUST_USE_RESULT MaybeObject* CreateSymbol(
+ const char* str, int length, int hash);
+ MUST_USE_RESULT MaybeObject* CreateSymbol(String* str);
// Write barrier support for address[offset] = o.
- static inline void RecordWrite(Address address, int offset);
+ inline void RecordWrite(Address address, int offset);
// Write barrier support for address[start : start + len[ = o.
- static inline void RecordWrites(Address address, int start, int len);
+ inline void RecordWrites(Address address, int start, int len);
// Given an address occupied by a live code object, return that object.
- static Object* FindCodeObject(Address a);
+ Object* FindCodeObject(Address a);
// Invoke Shrink on shrinkable spaces.
- static void Shrink();
+ void Shrink();
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- static inline HeapState gc_state() { return gc_state_; }
+ inline HeapState gc_state() { return gc_state_; }
#ifdef DEBUG
- static bool IsAllocationAllowed() { return allocation_allowed_; }
- static inline bool allow_allocation(bool enable);
+ bool IsAllocationAllowed() { return allocation_allowed_; }
+ inline bool allow_allocation(bool enable);
- static bool disallow_allocation_failure() {
+ bool disallow_allocation_failure() {
return disallow_allocation_failure_;
}
- static void TracePathToObject(Object* target);
- static void TracePathToGlobal();
+ void TracePathToObject(Object* target);
+ void TracePathToGlobal();
#endif
// Callback function passed to Heap::Iterate etc. Copies an object if
// necessary, the object might be promoted to an old space. The caller must
// ensure the precondition that the object is (a) a heap object and (b) in
// the heap's from space.
- static void ScavengePointer(HeapObject** p);
+ static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Commits from space if it is uncommitted.
- static void EnsureFromSpaceIsCommitted();
+ void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a
// certain number of bytes using only linear allocation (with a
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
- static void ReserveSpace(
+ void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
// Support for the API.
//
- static bool CreateApiObjects();
+ bool CreateApiObjects();
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
- static Object* GetNumberStringCache(Object* number);
+ Object* GetNumberStringCache(Object* number);
// Update the cache with a new number-string pair.
- static void SetNumberStringCache(Object* number, String* str);
+ void SetNumberStringCache(Object* number, String* str);
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
- static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
+ inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
// Allocate uninitialized fixed array.
- MUST_USE_RESULT static MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT static MaybeObject* AllocateRawFixedArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
+ PretenureFlag pretenure);
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
- static bool OldGenerationPromotionLimitReached() {
+ bool OldGenerationPromotionLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_promotion_limit_;
}
- static intptr_t OldGenerationSpaceAvailable() {
+ intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ -
(PromotedSpaceSize() + PromotedExternalMemorySize());
}
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
- static bool OldGenerationAllocationLimitReached() {
+ bool OldGenerationAllocationLimitReached() {
return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
- static bool IdleNotification();
+ bool IdleNotification();
// Declare all the root indices.
enum RootListIndex {
kRootListLength
};
- MUST_USE_RESULT static MaybeObject* NumberToString(
- Object* number,
- bool check_number_string_cache = true);
+ MUST_USE_RESULT MaybeObject* NumberToString(
+ Object* number, bool check_number_string_cache = true);
- static Map* MapForExternalArrayType(ExternalArrayType array_type);
- static RootListIndex RootIndexForExternalArrayType(
+ Map* MapForExternalArrayType(ExternalArrayType array_type);
+ RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
- static void RecordStats(HeapStats* stats, bool take_snapshot = false);
+ void RecordStats(HeapStats* stats, bool take_snapshot = false);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
+ inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
- static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
+ inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
- static void CheckNewSpaceExpansionCriteria();
+ void CheckNewSpaceExpansionCriteria();
- static inline void IncrementYoungSurvivorsCounter(int survived) {
+ inline void IncrementYoungSurvivorsCounter(int survived) {
young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
- static void UpdateNewSpaceReferencesInExternalStringTable(
+ void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
- static void ProcessWeakReferences(WeakObjectRetainer* retainer);
+ void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
// we try to promote this object.
- static inline bool ShouldBePromoted(Address old_address, int object_size);
+ inline bool ShouldBePromoted(Address old_address, int object_size);
+
+ int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
+
+ void ClearJSFunctionResultCaches();
+
+ void ClearNormalizedMapCaches();
+
+ GCTracer* tracer() { return tracer_; }
+
+ // Returns maximum GC pause.
+ int get_max_gc_pause() { return max_gc_pause_; }
+
+ // Returns maximum size of objects alive after GC.
+ intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
- static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
+ // Returns minimal interval between two subsequent collections.
+ int get_min_in_mutator() { return min_in_mutator_; }
- static void ClearJSFunctionResultCaches();
+ MarkCompactCollector* mark_compact_collector() {
+ return &mark_compact_collector_;
+ }
- static void ClearNormalizedMapCaches();
+ ExternalStringTable* external_string_table() {
+ return &external_string_table_;
+ }
- static GCTracer* tracer() { return tracer_; }
+ inline Isolate* isolate();
+ bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
- static void CallGlobalGCPrologueCallback() {
+ void CallGlobalGCPrologueCallback() {
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
}
- static void CallGlobalGCEpilogueCallback() {
+ void CallGlobalGCEpilogueCallback() {
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
}
private:
- static int reserved_semispace_size_;
- static int max_semispace_size_;
- static int initial_semispace_size_;
- static intptr_t max_old_generation_size_;
- static intptr_t max_executable_size_;
- static intptr_t code_range_size_;
+ Heap();
+
+ // This can be calculated directly from a pointer to the heap; however, it is
+ // more expedient to get at the isolate directly from within Heap methods.
+ Isolate* isolate_;
+
+ int reserved_semispace_size_;
+ int max_semispace_size_;
+ int initial_semispace_size_;
+ intptr_t max_old_generation_size_;
+ intptr_t max_executable_size_;
+ intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
- static int survived_since_last_expansion_;
+ int survived_since_last_expansion_;
- static int always_allocate_scope_depth_;
- static int linear_allocation_scope_depth_;
+ int always_allocate_scope_depth_;
+ int linear_allocation_scope_depth_;
// For keeping track of context disposals.
- static int contexts_disposed_;
+ int contexts_disposed_;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB;
static const int kMaxObjectSizeInNewSpace = 512*KB;
#endif
- static NewSpace new_space_;
- static OldSpace* old_pointer_space_;
- static OldSpace* old_data_space_;
- static OldSpace* code_space_;
- static MapSpace* map_space_;
- static CellSpace* cell_space_;
- static LargeObjectSpace* lo_space_;
- static HeapState gc_state_;
+ NewSpace new_space_;
+ OldSpace* old_pointer_space_;
+ OldSpace* old_data_space_;
+ OldSpace* code_space_;
+ MapSpace* map_space_;
+ CellSpace* cell_space_;
+ LargeObjectSpace* lo_space_;
+ HeapState gc_state_;
// Returns the size of object residing in non new spaces.
- static intptr_t PromotedSpaceSize();
+ intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc.
- static int PromotedExternalMemorySize();
+ int PromotedExternalMemorySize();
- static int mc_count_; // how many mark-compact collections happened
- static int ms_count_; // how many mark-sweep collections happened
- static unsigned int gc_count_; // how many gc happened
+ int mc_count_; // how many mark-compact collections happened
+ int ms_count_; // how many mark-sweep collections happened
+ unsigned int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
- static int unflattened_strings_length_;
+ int unflattened_strings_length_;
#define ROOT_ACCESSOR(type, name, camel_name) \
- static inline void set_##name(type* value) { \
+ inline void set_##name(type* value) { \
roots_[k##camel_name##RootIndex] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#ifdef DEBUG
- static bool allocation_allowed_;
+ bool allocation_allowed_;
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
- static int allocation_timeout_;
+ int allocation_timeout_;
// Do we expect to be able to handle allocation failure at this
// time?
- static bool disallow_allocation_failure_;
+ bool disallow_allocation_failure_;
+
+ HeapDebugUtils* debug_utils_;
#endif // DEBUG
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
- static intptr_t old_gen_promotion_limit_;
+ intptr_t old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on
// every allocation in large object space.
- static intptr_t old_gen_allocation_limit_;
+ intptr_t old_gen_allocation_limit_;
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
- static intptr_t external_allocation_limit_;
+ intptr_t external_allocation_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
- static int amount_of_external_allocated_memory_;
+ int amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
- static int amount_of_external_allocated_memory_at_last_global_gc_;
+ int amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
- static int old_gen_exhausted_;
+ int old_gen_exhausted_;
- static Object* roots_[kRootListLength];
+ Object* roots_[kRootListLength];
- static Object* global_contexts_list_;
+ Object* global_contexts_list_;
struct StringTypeTable {
InstanceType type;
// The special hidden symbol which is an empty string, but does not match
// any string when looked up in properties.
- static String* hidden_symbol_;
+ String* hidden_symbol_;
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
GCPrologueCallback callback;
GCType gc_type;
};
- static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
+ List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
GCEpilogueCallback callback;
GCType gc_type;
};
- static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
+ List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
- static GCCallback global_gc_prologue_callback_;
- static GCCallback global_gc_epilogue_callback_;
+ GCCallback global_gc_prologue_callback_;
+ GCCallback global_gc_epilogue_callback_;
// Support for computing object sizes during GC.
- static HeapObjectCallback gc_safe_size_of_old_object_;
+ HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
- static void MarkMapPointersAsEncoded(bool encoded) {
+ void MarkMapPointersAsEncoded(bool encoded) {
gc_safe_size_of_old_object_ = encoded
? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
- static GarbageCollector SelectGarbageCollector(AllocationSpace space);
+ GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
- static bool PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer);
+ bool PerformGarbageCollection(GarbageCollector collector,
+ GCTracer* tracer);
+
+ static const intptr_t kMinimumPromotionLimit = 2 * MB;
+ static const intptr_t kMinimumAllocationLimit = 8 * MB;
+
+ inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT static inline MaybeObject* AllocateRawMap();
+ MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT static inline MaybeObject* AllocateRawCell();
+ MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
// Initializes a JSObject based on its map.
- static void InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
- Map* map);
+ void InitializeJSObjectFromMap(JSObject* obj,
+ FixedArray* properties,
+ Map* map);
- static bool CreateInitialMaps();
- static bool CreateInitialObjects();
+ bool CreateInitialMaps();
+ bool CreateInitialObjects();
- // These two Create*EntryStub functions are here and forced to not be inlined
+ // These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(static void CreateJSEntryStub());
- NO_INLINE(static void CreateJSConstructEntryStub());
+ NO_INLINE(void CreateJSEntryStub());
+ NO_INLINE(void CreateJSConstructEntryStub());
- static void CreateFixedStubs();
+ void CreateFixedStubs();
- MUST_USE_RESULT static MaybeObject* CreateOddball(const char* to_string,
- Object* to_number);
+ MaybeObject* CreateOddball(const char* to_string,
+ Object* to_number,
+ byte kind);
// Allocate empty fixed array.
- MUST_USE_RESULT static MaybeObject* AllocateEmptyFixedArray();
+ MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
// Performs a minor collection in new generation.
- static void Scavenge();
+ void Scavenge();
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap,
Object** pointer);
- static Address DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front);
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
// Performs a major collection in the whole heap.
- static void MarkCompact(GCTracer* tracer);
+ void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
- static void MarkCompactPrologue(bool is_compacting);
+ void MarkCompactPrologue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
- static void CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(the_hole_value());
- set_instanceof_cache_function(the_hole_value());
- }
+ inline void CompletelyClearInstanceofCache();
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record statistics before and after garbage collection.
- static void ReportStatisticsBeforeGC();
- static void ReportStatisticsAfterGC();
+ void ReportStatisticsBeforeGC();
+ void ReportStatisticsAfterGC();
#endif
// Slow part of scavenge object.
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* InitializeFunction(
+ MUST_USE_RESULT inline MaybeObject* InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
- static GCTracer* tracer_;
+ GCTracer* tracer_;
// Initializes the number to string cache based on the max semispace size.
- MUST_USE_RESULT static MaybeObject* InitializeNumberStringCache();
+ MUST_USE_RESULT MaybeObject* InitializeNumberStringCache();
// Flush the number to string cache.
- static void FlushNumberStringCache();
+ void FlushNumberStringCache();
- static void UpdateSurvivalRateTrend(int start_new_space_size);
+ void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
static const int kYoungSurvivalRateThreshold = 90;
static const int kYoungSurvivalRateAllowedDeviation = 15;
- static int young_survivors_after_last_gc_;
- static int high_survival_rate_period_length_;
- static double survival_rate_;
- static SurvivalRateTrend previous_survival_rate_trend_;
- static SurvivalRateTrend survival_rate_trend_;
+ int young_survivors_after_last_gc_;
+ int high_survival_rate_period_length_;
+ double survival_rate_;
+ SurvivalRateTrend previous_survival_rate_trend_;
+ SurvivalRateTrend survival_rate_trend_;
- static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
+ void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
ASSERT(survival_rate_trend != FLUCTUATING);
previous_survival_rate_trend_ = survival_rate_trend_;
survival_rate_trend_ = survival_rate_trend;
}
- static SurvivalRateTrend survival_rate_trend() {
+ SurvivalRateTrend survival_rate_trend() {
if (survival_rate_trend_ == STABLE) {
return STABLE;
} else if (previous_survival_rate_trend_ == STABLE) {
}
}
- static bool IsStableOrIncreasingSurvivalTrend() {
+ bool IsStableOrIncreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case INCREASING:
}
}
- static bool IsIncreasingSurvivalTrend() {
+ bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
}
- static bool IsHighSurvivalRate() {
+ bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
+ // Maximum GC pause.
+ int max_gc_pause_;
+
+ // Maximum size of objects alive after GC.
+ intptr_t max_alive_after_gc_;
+
+ // Minimal interval between two subsequent collections.
+ int min_in_mutator_;
+
+ // Size of objects alive after last GC.
+ intptr_t alive_after_last_gc_;
+
+ double last_gc_end_timestamp_;
+
+ MarkCompactCollector mark_compact_collector_;
+
+ // This field contains the meaning of the WATERMARK_INVALIDATED flag.
+ // Instead of clearing this flag from all pages we just flip
+ // its meaning at the beginning of a scavenge.
+ intptr_t page_watermark_invalidated_mark_;
+
+ int number_idle_notifications_;
+ unsigned int last_idle_notification_gc_count_;
+ bool last_idle_notification_gc_count_init_;
+
+ // Shared state read by the scavenge collector and set by ScavengeObject.
+ PromotionQueue promotion_queue_;
+
+ // Flag is set when the heap has been configured. The heap can be repeatedly
+ // configured through the API until it is setup.
+ bool configured_;
+
+ ExternalStringTable external_string_table_;
+
+ bool is_safe_to_read_maps_;
+
friend class Factory;
+ friend class GCTracer;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
+ friend class Page;
+ friend class Isolate;
friend class MarkCompactCollector;
+ friend class MapCompact;
+
+ DISALLOW_COPY_AND_ASSIGN(Heap);
};
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- ASSERT(Heap::always_allocate_scope_depth_ == 0);
- Heap::always_allocate_scope_depth_++;
+ ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+ HEAP->always_allocate_scope_depth_++;
}
~AlwaysAllocateScope() {
- Heap::always_allocate_scope_depth_--;
- ASSERT(Heap::always_allocate_scope_depth_ == 0);
+ HEAP->always_allocate_scope_depth_--;
+ ASSERT(HEAP->always_allocate_scope_depth_ == 0);
}
};
class LinearAllocationScope {
public:
LinearAllocationScope() {
- Heap::linear_allocation_scope_depth_++;
+ HEAP->linear_allocation_scope_depth_++;
}
~LinearAllocationScope() {
- Heap::linear_allocation_scope_depth_--;
- ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
+ HEAP->linear_allocation_scope_depth_--;
+ ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
}
};
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
}
}
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
- if (Heap::InNewSpace(object)) {
- ASSERT(Heap::InToSpace(object));
+ if (HEAP->InNewSpace(object)) {
+ ASSERT(HEAP->InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
- static int Lookup(Map* map, String* name);
+ int Lookup(Map* map, String* name);
// Update an element in the cache.
- static void Update(Map* map, String* name, int field_offset);
+ void Update(Map* map, String* name, int field_offset);
// Clear the cache.
- static void Clear();
+ void Clear();
static const int kLength = 64;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 2;
+ static const int kNotFound = -1;
private:
+ KeyedLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].map = NULL;
+ keys_[i].name = NULL;
+ field_offsets_[i] = kNotFound;
+ }
+ }
+
static inline int Hash(Map* map, String* name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
- static Address keys_address() {
+ Address keys_address() {
return reinterpret_cast<Address>(&keys_);
}
- static Address field_offsets_address() {
+ Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
Map* map;
String* name;
};
- static Key keys_[kLength];
- static int field_offsets_[kLength];
+
+ Key keys_[kLength];
+ int field_offsets_[kLength];
friend class ExternalReference;
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
};
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- static int Lookup(DescriptorArray* array, String* name) {
+ int Lookup(DescriptorArray* array, String* name) {
if (!StringShape(name).IsSymbol()) return kAbsent;
int index = Hash(array, name);
Key& key = keys_[index];
}
// Update an element in the cache.
- static void Update(DescriptorArray* array, String* name, int result) {
+ void Update(DescriptorArray* array, String* name, int result) {
ASSERT(result != kAbsent);
if (StringShape(name).IsSymbol()) {
int index = Hash(array, name);
}
// Clear the cache.
- static void Clear();
+ void Clear();
static const int kAbsent = -2;
private:
+ DescriptorLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].array = NULL;
+ keys_[i].name = NULL;
+ results_[i] = kAbsent;
+ }
+ }
+
static int Hash(DescriptorArray* array, String* name) {
// Uses only lower 32 bits if pointers are larger.
uint32_t array_hash =
String* name;
};
- static Key keys_[kLength];
- static int results_[kLength];
-};
-
-
-// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
-
-class MarkingStack {
- public:
- void Initialize(Address low, Address high) {
- top_ = low_ = reinterpret_cast<HeapObject**>(low);
- high_ = reinterpret_cast<HeapObject**>(high);
- overflowed_ = false;
- }
-
- bool is_full() { return top_ >= high_; }
-
- bool is_empty() { return top_ <= low_; }
+ Key keys_[kLength];
+ int results_[kLength];
- bool overflowed() { return overflowed_; }
-
- void clear_overflowed() { overflowed_ = false; }
-
- // Push the (marked) object on the marking stack if there is room,
- // otherwise mark the object as overflowed and wait for a rescan of the
- // heap.
- void Push(HeapObject* object) {
- CHECK(object->IsHeapObject());
- if (is_full()) {
- object->SetOverflow();
- overflowed_ = true;
- } else {
- *(top_++) = object;
- }
- }
-
- HeapObject* Pop() {
- ASSERT(!is_empty());
- HeapObject* object = *(--top_);
- CHECK(object->IsHeapObject());
- return object;
- }
-
- private:
- HeapObject** low_;
- HeapObject** top_;
- HeapObject** high_;
- bool overflowed_;
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
class DisallowAllocationFailure {
public:
DisallowAllocationFailure() {
- old_state_ = Heap::disallow_allocation_failure_;
- Heap::disallow_allocation_failure_ = true;
+ old_state_ = HEAP->disallow_allocation_failure_;
+ HEAP->disallow_allocation_failure_ = true;
}
~DisallowAllocationFailure() {
- Heap::disallow_allocation_failure_ = old_state_;
+ HEAP->disallow_allocation_failure_ = old_state_;
}
private:
bool old_state_;
class AssertNoAllocation {
public:
AssertNoAllocation() {
- old_state_ = Heap::allow_allocation(false);
+ old_state_ = HEAP->allow_allocation(false);
}
~AssertNoAllocation() {
- Heap::allow_allocation(old_state_);
+ HEAP->allow_allocation(old_state_);
}
private:
class DisableAssertNoAllocation {
public:
DisableAssertNoAllocation() {
- old_state_ = Heap::allow_allocation(true);
+ old_state_ = HEAP->allow_allocation(true);
}
~DisableAssertNoAllocation() {
- Heap::allow_allocation(old_state_);
+ HEAP->allow_allocation(old_state_);
}
private:
double start_time_;
};
- GCTracer();
+ explicit GCTracer(Heap* heap);
~GCTracer();
// Sets the collector.
promoted_objects_size_ += object_size;
}
- // Returns maximum GC pause.
- static int get_max_gc_pause() { return max_gc_pause_; }
-
- // Returns maximum size of objects alive after GC.
- static intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
- // Returns minimal interval between two subsequent collections.
- static int get_min_in_mutator() { return min_in_mutator_; }
-
private:
// Returns a string matching the collector.
const char* CollectorString();
// Returns size of object in heap (in MB).
double SizeOfHeapObjects() {
- return (static_cast<double>(Heap::SizeOfObjects())) / MB;
+ return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
}
double start_time_; // Timestamp set in the constructor.
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
- // Maximum GC pause.
- static int max_gc_pause_;
-
- // Maximum size of objects alive after GC.
- static intptr_t max_alive_after_gc_;
-
- // Minimal interval between two subsequent collections.
- static int min_in_mutator_;
-
- // Size of objects alive after last GC.
- static intptr_t alive_after_last_gc_;
-
- static double last_gc_end_timestamp_;
+ Heap* heap_;
};
static const int kTranscendentalTypeBits = 3;
STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
- explicit TranscendentalCache(Type t);
-
// Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument.
- MUST_USE_RESULT static inline MaybeObject* Get(Type type, double input) {
- TranscendentalCache* cache = caches_[type];
- if (cache == NULL) {
- caches_[type] = cache = new TranscendentalCache(type);
- }
- return cache->Get(input);
- }
+ MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
// The cache contains raw Object pointers. This method disposes of
// them before a garbage collection.
- static void Clear();
+ void Clear();
private:
- MUST_USE_RESULT inline MaybeObject* Get(double input) {
- Converter c;
- c.dbl = input;
- int hash = Hash(c);
- Element e = elements_[hash];
- if (e.in[0] == c.integers[0] &&
- e.in[1] == c.integers[1]) {
- ASSERT(e.output != NULL);
- Counters::transcendental_cache_hit.Increment();
- return e.output;
- }
- double answer = Calculate(input);
- Counters::transcendental_cache_miss.Increment();
- Object* heap_number;
- { MaybeObject* maybe_heap_number = Heap::AllocateHeapNumber(answer);
- if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
- }
- elements_[hash].in[0] = c.integers[0];
- elements_[hash].in[1] = c.integers[1];
- elements_[hash].output = heap_number;
- return heap_number;
- }
+ class SubCache {
+ static const int kCacheSize = 512;
- inline double Calculate(double input) {
- switch (type_) {
- case ACOS:
- return acos(input);
- case ASIN:
- return asin(input);
- case ATAN:
- return atan(input);
- case COS:
- return cos(input);
- case EXP:
- return exp(input);
- case LOG:
- return log(input);
- case SIN:
- return sin(input);
- case TAN:
- return tan(input);
- default:
- return 0.0; // Never happens.
- }
- }
- static const int kCacheSize = 512;
- struct Element {
- uint32_t in[2];
- Object* output;
- };
- union Converter {
- double dbl;
- uint32_t integers[2];
- };
- inline static int Hash(const Converter& c) {
- uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= static_cast<int32_t>(hash) >> 16;
- hash ^= static_cast<int32_t>(hash) >> 8;
- return (hash & (kCacheSize - 1));
- }
-
- static Address cache_array_address() {
- // Used to create an external reference.
- return reinterpret_cast<Address>(caches_);
- }
+ explicit SubCache(Type t);
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
- // Inline implementation of the cache.
- friend class TranscendentalCacheStub;
+ MUST_USE_RESULT inline MaybeObject* Get(double input);
- static TranscendentalCache* caches_[kNumberOfCaches];
- Element elements_[kCacheSize];
- Type type_;
-};
+ inline double Calculate(double input);
+ struct Element {
+ uint32_t in[2];
+ Object* output;
+ };
-// External strings table is a place where all external strings are
-// registered. We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable : public AllStatic {
- public:
- // Registers an external string.
- inline static void AddString(String* string);
+ union Converter {
+ double dbl;
+ uint32_t integers[2];
+ };
- inline static void Iterate(ObjectVisitor* v);
+ inline static int Hash(const Converter& c) {
+ uint32_t hash = (c.integers[0] ^ c.integers[1]);
+ hash ^= static_cast<int32_t>(hash) >> 16;
+ hash ^= static_cast<int32_t>(hash) >> 8;
+ return (hash & (kCacheSize - 1));
+ }
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- static void CleanUp();
+ Element elements_[kCacheSize];
+ Type type_;
+ Isolate* isolate_;
- // Destroys all allocated memory.
- static void TearDown();
+ // Allow access to the caches_ array as an ExternalReference.
+ friend class ExternalReference;
+ // Inline implementation of the cache.
+ friend class TranscendentalCacheStub;
+ // For evaluating value.
+ friend class TranscendentalCache;
- private:
- friend class Heap;
+ DISALLOW_COPY_AND_ASSIGN(SubCache);
+ };
- inline static void Verify();
+ TranscendentalCache() {
+ for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
+ }
- inline static void AddOldString(String* string);
+ // Used to create an external reference.
+ inline Address cache_array_address();
- // Notifies the table that only a prefix of the new list is valid.
- inline static void ShrinkNewStrings(int position);
+ // Instantiation
+ friend class Isolate;
+ // Inline implementation of the caching.
+ friend class TranscendentalCacheStub;
+ // Allow access to the caches_ array as an ExternalReference.
+ friend class ExternalReference;
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- static List<Object*> new_space_strings_;
- static List<Object*> old_space_strings_;
+ SubCache* caches_[kNumberOfCaches];
+ DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
};
} } // namespace v8::internal
+#undef HEAP
+
#endif // V8_HEAP_H_
HConstant* HConstant::CopyToTruncatedInt32() const {
if (!has_double_value_) return NULL;
int32_t truncated = NumberToInt32(*handle_);
- return new HConstant(Factory::NewNumberFromInt(truncated),
+ return new HConstant(FACTORY->NewNumberFromInt(truncated),
Representation::Integer32());
}
bool HArrayLiteral::IsCopyOnWrite() const {
- return constant_elements()->map() == Heap::fixed_cow_array_map();
+ return constant_elements()->map() == HEAP->fixed_cow_array_map();
}
Handle<JSFunction> function() const { return function_; }
bool IsApplyFunction() const {
- return function_->code() == Builtins::builtin(Builtins::FunctionApply);
+ return function_->code() ==
+ Isolate::Current()->builtins()->builtin(Builtins::FunctionApply);
}
virtual void PrintDataTo(StringStream* stream);
class HCallRuntime: public HCall<0> {
public:
HCallRuntime(Handle<String> name,
- Runtime::Function* c_function,
+ const Runtime::Function* c_function,
int argument_count)
: HCall<0>(argument_count), c_function_(c_function), name_(name) { }
virtual void PrintDataTo(StringStream* stream);
- Runtime::Function* function() const { return c_function_; }
+ const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
private:
- Runtime::Function* c_function_;
+ const Runtime::Function* c_function_;
Handle<String> name_;
};
}
virtual intptr_t Hashcode() {
- ASSERT(!Heap::IsAllocationAllowed());
+ ASSERT(!HEAP->IsAllocationAllowed());
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
Handle<Object> handle() const { return handle_; }
- bool InOldSpace() const { return !Heap::InNewSpace(*handle_); }
+ bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
bool HasStringValue() const { return handle_->IsString(); }
virtual intptr_t Hashcode() {
- ASSERT(!Heap::allow_allocation(false));
+ ASSERT(!HEAP->allow_allocation(false));
return reinterpret_cast<intptr_t>(*handle());
}
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT(!Heap::allow_allocation(false));
+ ASSERT(!HEAP->allow_allocation(false));
return reinterpret_cast<intptr_t>(*cell_);
}
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
#include "hydrogen.h"
#include "codegen.h"
HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, Heap::true_value());
+ return GetConstant(&constant_true_, HEAP->true_value());
}
HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, Heap::false_value());
+ return GetConstant(&constant_false_, HEAP->false_value());
}
lists_size_(other->lists_size_),
count_(other->count_),
present_flags_(other->present_flags_),
- array_(Zone::NewArray<HValueMapListElement>(other->array_size_)),
- lists_(Zone::NewArray<HValueMapListElement>(other->lists_size_)),
+ array_(ZONE->NewArray<HValueMapListElement>(other->array_size_)),
+ lists_(ZONE->NewArray<HValueMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_) {
memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
}
HValueMapListElement* new_array =
- Zone::NewArray<HValueMapListElement>(new_size);
+ ZONE->NewArray<HValueMapListElement>(new_size);
memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_array = array_;
ASSERT(new_size > lists_size_);
HValueMapListElement* new_lists =
- Zone::NewArray<HValueMapListElement>(new_size);
+ ZONE->NewArray<HValueMapListElement>(new_size);
memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_lists = lists_;
info_(info),
block_side_effects_(graph_->blocks()->length()),
loop_side_effects_(graph_->blocks()->length()) {
- ASSERT(Heap::allow_allocation(false));
+ ASSERT(HEAP->allow_allocation(false));
block_side_effects_.AddBlock(0, graph_->blocks()->length());
loop_side_effects_.AddBlock(0, graph_->blocks()->length());
}
~HGlobalValueNumberer() {
- ASSERT(!Heap::allow_allocation(true));
+ ASSERT(!HEAP->allow_allocation(true));
}
void Analyze();
if (scope->function() != NULL) BAILOUT("named function expression");
HConstant* undefined_constant =
- new HConstant(Factory::undefined_value(), Representation::Tagged());
+ new HConstant(FACTORY->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
graph_->set_undefined_constant(undefined_constant);
ASSERT(map->has_fast_elements());
AddInstruction(new HCheckMap(object, map));
HInstruction* elements = AddInstruction(new HLoadElements(object));
- AddInstruction(new HCheckMap(elements, Factory::fixed_array_map()));
+ AddInstruction(new HCheckMap(elements, FACTORY->fixed_array_map()));
bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
HInstruction* length = NULL;
if (is_array) {
CompilationInfo target_info(target);
if (!ParserApi::Parse(&target_info) ||
!Scope::Analyze(&target_info)) {
- if (Top::has_pending_exception()) {
+ if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
target->shared()->set_optimization_disabled(true);
BAILOUT("call to a JavaScript runtime function");
}
- Runtime::Function* function = expr->function();
+ const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
if (function->intrinsic_type == Runtime::INLINE) {
ASSERT(expr->name()->length() > 0);
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!Heap::InNewSpace(*candidate)) {
+ if (!Isolate::Current()->heap()->InNewSpace(*candidate)) {
target = candidate;
}
}
visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
Immediate::Immediate(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
void Assembler::emit(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
emit(reinterpret_cast<intptr_t>(handle.location()),
RelocInfo::EMBEDDED_OBJECT);
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-// Safe default is no features.
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::enabled_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+CpuFeatures::CpuFeatures()
+ : supported_(0),
+ enabled_(0),
+ found_by_runtime_probing_(0) {
+}
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe(bool portable) {
- ASSERT(Heap::HasBeenSetup());
+ ASSERT(HEAP->HasBeenSetup());
ASSERT(supported_ == 0);
if (portable && Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
CodeDesc desc;
assm.GetCode(&desc);
-
Object* code;
- { MaybeObject* maybe_code = Heap::CreateCode(desc,
+ { MaybeObject* maybe_code = HEAP->CreateCode(desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!maybe_code->ToObject(&code)) return;
}
if (!code->IsCode()) return;
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ PROFILE(ISOLATE,
+ CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
static void InitCoverageLog();
#endif
-// Spare buffer.
-byte* Assembler::spare_buffer_ = NULL;
-
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
+ Isolate* isolate = Isolate::Current();
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate->assembler_spare_buffer() != NULL) {
+ buffer = isolate->assembler_spare_buffer();
+ isolate->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
Assembler::~Assembler() {
+ Isolate* isolate = Isolate::Current();
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
- Counters::reloc_info_size.Increment(desc->reloc_size);
+ COUNTERS->reloc_info_size()->Increment(desc->reloc_size);
}
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(RDTSC));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDD);
void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
void Assembler::GrowBuffer() {
+ Isolate* isolate = Isolate::Current();
ASSERT(overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::MaxOldGenerationSize())) {
+ (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
+#include "isolate.h"
#include "serialize.h"
namespace v8 {
// } else {
// // Generate standard x87 floating point code.
// }
-class CpuFeatures : public AllStatic {
+class CpuFeatures {
public:
// Detect features of the target CPU. If the portable flag is set,
// the method sets safe defaults if the serializer is enabled
// (snapshots must be portable).
- static void Probe(bool portable);
- static void Clear() { supported_ = 0; }
+ void Probe(bool portable);
+ void Clear() { supported_ = 0; }
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
+ bool IsSupported(CpuFeature f) const {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ bool IsEnabled(CpuFeature f) const {
return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(CpuFeature f) {
+ explicit Scope(CpuFeature f)
+ : cpu_features_(Isolate::Current()->cpu_features()),
+ isolate_(Isolate::Current()) {
uint64_t mask = static_cast<uint64_t>(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= mask;
+ ASSERT(cpu_features_->IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (cpu_features_->found_by_runtime_probing_ & mask) == 0);
+ old_enabled_ = cpu_features_->enabled_;
+ cpu_features_->enabled_ |= mask;
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::Current(), isolate_);
+ cpu_features_->enabled_ = old_enabled_;
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
+ CpuFeatures* cpu_features_;
+ Isolate* isolate_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
+
private:
- static uint64_t supported_;
- static uint64_t enabled_;
- static uint64_t found_by_runtime_probing_;
+ CpuFeatures();
+
+ uint64_t supported_;
+ uint64_t enabled_;
+ uint64_t found_by_runtime_probing_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
- static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ jmp(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
}
// ebx: JSObject
// edi: start of next object
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, Factory::empty_fixed_array());
+ __ mov(ecx, FACTORY->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
__ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
// Set extra fields in the newly allocated object.
{ Label loop, entry;
// To allow for truncation.
if (count_constructions) {
- __ mov(edx, Factory::one_pointer_filler_map());
+ __ mov(edx, FACTORY->one_pointer_filler_map());
} else {
- __ mov(edx, Factory::undefined_value());
+ __ mov(edx, FACTORY->undefined_value());
}
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ jmp(&entry);
// edi: FixedArray
// edx: number of elements
// ecx: start of next object
- __ mov(eax, Factory::fixed_array_map());
+ __ mov(eax, FACTORY->fixed_array_map());
__ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
__ SmiTag(edx);
__ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
// edi: FixedArray
// ecx: start of next object
{ Label loop, entry;
- __ mov(edx, Factory::undefined_value());
+ __ mov(edx, FACTORY->undefined_value());
__ lea(eax, Operand(edi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
if (is_api_function) {
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::HandleApiCallConstruct));
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION);
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
+ __ IncrementCounter(COUNTERS->constructed_objects(), 1);
__ ret(0);
}
// Invoke the code.
if (is_construct) {
- __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
- RelocInfo::CODE_TARGET);
+ __ call(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall)), RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION);
__ test(eax, Operand(eax));
__ j(not_zero, &done, taken);
__ pop(ebx);
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(FACTORY->undefined_value()));
__ push(ebx);
__ inc(eax);
__ bind(&done);
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &convert_to_object);
- __ cmp(ebx, Factory::null_value());
+ __ cmp(ebx, FACTORY->null_value());
__ j(equal, &use_global_receiver);
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, FACTORY->undefined_value());
__ j(equal, &use_global_receiver);
// We don't use IsObjectJSObjectType here because we jump on success.
__ j(not_zero, &function, taken);
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ jmp(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
__ bind(&function);
}
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
__ cmp(eax, Operand(ebx));
- __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+ __ j(not_equal, Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)));
ParameterCount expected(0);
__ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
// Compute the receiver in non-strict mode.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
- __ cmp(ebx, Factory::null_value());
+ __ cmp(ebx, FACTORY->null_value());
__ j(equal, &use_global_receiver);
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, FACTORY->undefined_value());
__ j(equal, &use_global_receiver);
// If given receiver is already a JavaScript object then there's no
__ mov(edx, Operand(ebp, 2 * kPointerSize)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
// scratch2: start of next object
__ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
// fixed array.
if (initial_capacity == 0) {
__ mov(FieldOperand(result, JSArray::kElementsOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
return;
}
// scratch1: elements array
// scratch2: start of next object
__ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
__ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(initial_capacity)));
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
- __ mov(scratch3, Factory::the_hole_value());
+ __ mov(scratch3, FACTORY->the_hole_value());
for (int i = 0; i < initial_capacity; i++) {
__ mov(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
Label loop, entry;
__ jmp(&entry);
__ bind(&loop);
- __ mov(Operand(scratch1, 0), Factory::the_hole_value());
+ __ mov(Operand(scratch1, 0), FACTORY->the_hole_value());
__ add(Operand(scratch1), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(scratch1, Operand(scratch2));
// elements_array_end: start of next object
// array_size: size of array (smi)
__ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ mov(elements_array, Factory::empty_fixed_array());
+ __ mov(elements_array, FACTORY->empty_fixed_array());
__ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
// elements_array_end: start of next object
// array_size: size of array (smi)
__ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
__ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, Factory::the_hole_value());
+ __ mov(eax, FACTORY->the_hole_value());
__ cld();
// Do not use rep stos when filling less than kRepStosThreshold
// words.
edi,
kPreallocatedArrayElements,
&prepare_generic_code_call);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
edi,
true,
&prepare_generic_code_call);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
edi,
false,
&prepare_generic_code_call);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ jmp(array_code, RelocInfo::CODE_TARGET);
}
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
}
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- __ IncrementCounter(&Counters::string_ctor_calls, 1);
+ __ IncrementCounter(COUNTERS->string_ctor_calls(), 1);
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
edx, // Scratch 2.
false, // Input is known to be smi?
¬_cached);
- __ IncrementCounter(&Counters::string_ctor_cached_number, 1);
+ __ IncrementCounter(COUNTERS->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
// -- ebx : argument converted to string
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
// Set properties and elements.
- __ Set(ecx, Immediate(Factory::empty_fixed_array()));
+ __ Set(ecx, Immediate(FACTORY->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
__ j(NegateCondition(is_string), &convert_argument);
__ mov(ebx, eax);
- __ IncrementCounter(&Counters::string_ctor_string_value, 1);
+ __ IncrementCounter(COUNTERS->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
// Invoke the conversion builtin and put the result into ebx.
__ bind(&convert_argument);
- __ IncrementCounter(&Counters::string_ctor_conversions, 1);
+ __ IncrementCounter(COUNTERS->string_ctor_conversions(), 1);
__ EnterInternalFrame();
__ push(edi); // Preserve the function.
__ push(eax);
// Load the empty string into ebx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ Set(ebx, Immediate(Factory::empty_string()));
+ __ Set(ebx, Immediate(FACTORY->empty_string()));
__ pop(ecx);
__ lea(esp, Operand(esp, kPointerSize));
__ push(ecx);
// At this point the argument is already a string. Call runtime to
// create a string wrapper.
__ bind(&gc_required);
- __ IncrementCounter(&Counters::string_ctor_gc_required, 1);
+ __ IncrementCounter(COUNTERS->string_ctor_gc_required(), 1);
__ EnterInternalFrame();
__ push(ebx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
// -----------------------------------
Label invoke, dont_adapt_arguments;
- __ IncrementCounter(&Counters::arguments_adaptors, 1);
+ __ IncrementCounter(COUNTERS->arguments_adaptors(), 1);
Label enough, too_few;
__ cmp(eax, Operand(ebx));
Label fill;
__ bind(&fill);
__ inc(ecx);
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(FACTORY->undefined_value()));
__ cmp(ecx, Operand(ebx));
__ j(less, &fill);
// We shouldn't be performing on-stack replacement in the first
// place if the CPU features we need for the optimized Crankshaft
// code aren't supported.
- CpuFeatures::Probe(false);
- if (!CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
+ cpu_features->Probe(false);
+ if (!cpu_features->IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}
#include "code-stubs.h"
#include "bootstrapper.h"
#include "jsregexp.h"
+#include "isolate.h"
#include "regexp-macro-assembler.h"
namespace v8 {
__ bind(&check_heap_number);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ cmp(Operand(ebx), Immediate(FACTORY->heap_number_map()));
__ j(not_equal, &call_builtin);
__ ret(0);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
- __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+ __ mov(ebx, Immediate(FACTORY->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
__ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(Factory::the_hole_value()));
+ Immediate(FACTORY->the_hole_value()));
__ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
__ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(Factory::undefined_value()));
+ Immediate(FACTORY->undefined_value()));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ pop(edx);
__ push(esi);
__ push(edx);
- __ push(Immediate(Factory::false_value()));
+ __ push(Immediate(FACTORY->false_value()));
__ push(ecx); // Restore return address.
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Setup the object header.
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), FACTORY->context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
- __ mov(ebx, Factory::undefined_value());
+ __ mov(ebx, FACTORY->undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(ecx, Factory::undefined_value());
+ __ cmp(ecx, FACTORY->undefined_value());
__ j(equal, &slow_case);
if (FLAG_debug_code) {
Handle<Map> expected_map;
if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
- expected_map = Factory::fixed_array_map();
+ expected_map = FACTORY->fixed_array_map();
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
- expected_map = Factory::fixed_cow_array_map();
+ expected_map = FACTORY->fixed_cow_array_map();
}
__ push(ecx);
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
__ mov(eax, Operand(esp, 1 * kPointerSize));
// 'null' => false.
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, FACTORY->null_value());
__ j(equal, &false_result);
// Get the map and type of the heap object.
__ bind(¬_string);
// HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, Factory::heap_number_map());
+ __ cmp(edx, FACTORY->heap_number_map());
__ j(not_equal, &true_result);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls_regs(), 1);
}
// Call the stub.
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls_regs(), 1);
}
// Call the stub.
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls_regs(), 1);
}
// Call the stub.
// number in eax.
__ AllocateHeapNumber(eax, ecx, ebx, slow);
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(left));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
}
if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
__ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
switch (op_) {
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls(), 1);
if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
Label slow;
}
Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
const char* TypeRecordingBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
// number in eax.
__ AllocateHeapNumber(eax, ecx, ebx, slow);
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(left));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
break;
}
__ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
switch (op_) {
case Token::DIV: {
Label not_floats;
Label not_int32;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
case Token::MUL:
case Token::DIV: {
Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime;
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls(), 1);
switch (op_) {
case Token::ADD:
case Token::MUL:
case Token::DIV: {
Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ cmp(Operand(ebx), Immediate(FACTORY->heap_number_map()));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// low and high words into ebx, edx.
__ bind(&loaded);
} else { // UNTAGGED.
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ mov(eax, ecx);
__ sar(eax, 8);
__ xor_(ecx, Operand(eax));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ and_(Operand(ecx),
+ Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] or xmm1 == double value.
// ebx = low 32 bits of double value.
__ mov(eax,
Immediate(ExternalReference::transcendental_cache_array_address()));
// Eax points to cache array.
- __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ __ mov(eax, Operand(eax, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ test(eax, Operand(eax));
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
+ { TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
+ if (type_info.IsInteger32() &&
+ Isolate::Current()->cpu_features()->IsEnabled(SSE2)) {
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
return;
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1);
- __ cmp(edx, Factory::undefined_value());
+ __ cmp(edx, FACTORY->undefined_value());
__ j(not_equal, conversion_failure);
__ mov(edx, Immediate(0));
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
+ __ cmp(ebx, FACTORY->heap_number_map());
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in ecx.
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg2);
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, FACTORY->undefined_value());
__ j(not_equal, conversion_failure);
__ mov(ecx, Immediate(0));
__ jmp(&done);
__ bind(&arg2_is_object);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
+ __ cmp(ebx, FACTORY->heap_number_map());
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
// Load operand in edx into xmm0, or branch to not_numbers.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), FACTORY->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), FACTORY->heap_number_map());
__ j(equal, &load_float_eax);
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &test_other, not_taken); // argument in edx is OK
__ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
+ __ cmp(scratch, FACTORY->heap_number_map());
__ j(not_equal, non_float); // argument in edx is not a number -> NaN
__ bind(&test_other);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done); // argument in eax is OK
__ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
+ __ cmp(scratch, FACTORY->heap_number_map());
__ j(not_equal, non_float); // argument in eax is not a number -> NaN
// Fall-through: Both operands are numbers.
}
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
+ __ cmp(edx, FACTORY->heap_number_map());
__ j(not_equal, &slow);
if (overwrite_ == UNARY_OVERWRITE) {
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
// Check if the operand is a heap number.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
+ __ cmp(edx, FACTORY->heap_number_map());
__ j(not_equal, &slow, not_taken);
// Convert the heap number in eax to an untagged integer in ecx.
IntegerConvert(masm,
eax,
TypeInfo::Unknown(),
- CpuFeatures::IsSupported(SSE3),
+ Isolate::Current()->cpu_features()->IsSupported(SSE3),
&slow);
// Do the bitwise operation and check if the result fits in a smi.
__ AllocateHeapNumber(ebx, edx, edi, &slow);
__ mov(eax, Operand(ebx));
}
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ecx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
// on doubles.
__ bind(&exponent_nonsmi);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
// Test if exponent is nan.
__ bind(&base_not_smi);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ and_(ecx, HeapNumber::kExponentMask);
__ lea(edi, Operand(eax, GetArgumentsObjectSize()));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
// Check that the JSArray is in fast case.
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
__ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, Factory::fixed_array_map());
+ __ cmp(eax, FACTORY->fixed_array_map());
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
__ j(not_zero, &runtime);
// String is a cons string.
__ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Factory::empty_string());
+ __ cmp(Operand(edx), FACTORY->empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// edx: code
// edi: encoding of subject string (1 if ascii 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
+ __ IncrementCounter(COUNTERS->regexp_entry_native(), 1);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
__ EnterApiExitFrame(kRegExpExecuteArguments);
+ // Argument 8: Pass current isolate address.
+ __ mov(Operand(esp, 7 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address);
__ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
__ mov(eax, Operand::StaticVariable(pending_exception));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(eax, Factory::termination_exception());
+ __ cmp(eax, FACTORY->termination_exception());
Label throw_termination_exception;
__ j(equal, &throw_termination_exception);
__ bind(&failure);
// For failure to match, return null.
- __ mov(Operand(eax), Factory::null_value());
+ __ mov(Operand(eax), FACTORY->null_value());
__ ret(4 * kPointerSize);
// Load RegExp data.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
__ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(ecx, Immediate(Factory::empty_fixed_array()));
+ __ mov(ecx, Immediate(FACTORY->empty_fixed_array()));
__ lea(ebx, Operand(eax, JSRegExpResult::kSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
// Set map.
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
// Set length.
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
__ SmiUntag(ecx);
- __ mov(edx, Immediate(Factory::the_hole_value()));
+ __ mov(edx, Immediate(FACTORY->the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
// eax: JSArray.
__ jmp(&smi_hash_calculated);
__ bind(¬_smi);
__ cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, not_found);
STATIC_ASSERT(8 == kDoubleSize);
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
FixedArray::kHeaderSize));
__ test(probe, Immediate(kSmiTagMask));
__ j(zero, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
index,
times_twice_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
+ __ IncrementCounter(COUNTERS->number_to_string_native(), 1);
}
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
NearLabel check_for_nan;
- __ cmp(edx, Factory::undefined_value());
+ __ cmp(edx, FACTORY->undefined_value());
__ j(not_equal, &check_for_nan);
__ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
__ ret(0);
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
if (never_nan_nan_ && (cc_ == equal)) {
} else {
NearLabel heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
__ j(equal, &heap_number);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
// Check if the non-smi operand is a heap number.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal (ebx is not zero)
if (include_number_compare_) {
Label non_number_comparison;
Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
__ Set(eax, Immediate(argc_));
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code> adaptor(Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline));
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
// Call C function.
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
__ call(Operand(ebx));
// Result is in eax or edx:eax - do not destroy these registers!
// call as this may lead to crashes in the IC code later.
if (FLAG_debug_code) {
NearLabel okay;
- __ cmp(eax, Factory::the_hole_value());
+ __ cmp(eax, FACTORY->the_hole_value());
__ j(not_equal, &okay);
__ int3();
__ bind(&okay);
__ test(ecx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned, not_taken);
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address);
// Check that there is no pending exception, otherwise we
// should have returned some failure value.
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(eax, Factory::termination_exception());
+ __ cmp(eax, FACTORY->termination_exception());
__ j(equal, throw_termination_exception);
// Handle normal exception.
__ push(ebx);
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address);
__ push(Operand::StaticVariable(c_entry_fp));
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address);
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ j(not_equal, ¬_outermost_js);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
// Caught exception: Store result (exception) in the pending
// exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address);
__ mov(Operand::StaticVariable(pending_exception), eax);
__ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
__ jmp(&exit);
__ call(Operand(edx));
// Unlink this frame from the handler chain.
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ __ pop(Operand::StaticVariable(ExternalReference(
+ Isolate::k_handler_address)));
// Pop next_sp.
__ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
// Restore the top frame descriptor from the stack.
__ bind(&exit);
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+ __ pop(Operand::StaticVariable(ExternalReference(
+ Isolate::k_c_entry_fp_address)));
// Restore callee-saved registers (C calling conventions).
__ pop(ebx);
__ bind(&loop);
__ cmp(scratch, Operand(prototype));
__ j(equal, &is_instance);
- __ cmp(Operand(scratch), Immediate(Factory::null_value()));
+ __ cmp(Operand(scratch), Immediate(FACTORY->null_value()));
__ j(equal, &is_not_instance);
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
- __ mov(eax, Factory::true_value());
+ __ mov(eax, FACTORY->true_value());
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
scratch, times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
- __ mov(eax, Factory::false_value());
+ __ mov(eax, FACTORY->false_value());
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ j(not_equal, &slow, not_taken);
// Null is not instance of anything.
- __ cmp(object, Factory::null_value());
+ __ cmp(object, FACTORY->null_value());
__ j(not_equal, &object_not_null);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
NearLabel true_value, done;
__ test(eax, Operand(eax));
__ j(zero, &true_value);
- __ mov(eax, Factory::false_value());
+ __ mov(eax, FACTORY->false_value());
__ jmp(&done);
__ bind(&true_value);
- __ mov(eax, Factory::true_value());
+ __ mov(eax, FACTORY->true_value());
__ bind(&done);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
}
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
// the case we would rather go to the runtime system now to flatten
// the string.
__ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
+ Immediate(FACTORY->empty_string()));
__ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
__ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ j(not_zero, &slow_case_, not_taken);
- __ Set(result_, Immediate(Factory::single_character_string_cache()));
+ __ Set(result_, Immediate(FACTORY->single_character_string_cache()));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(result_, Factory::undefined_value());
+ __ cmp(result_, FACTORY->undefined_value());
__ j(equal, &slow_case_, not_taken);
__ bind(&exit_);
}
__ test(ecx, Operand(ecx));
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in eax.
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in edx.
__ mov(eax, edx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi,
&make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Allocate a two character string.
__ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
__ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(&Counters::string_add_make_two_char, 1);
+ __ IncrementCounter(COUNTERS->string_add_make_two_char(), 1);
__ AllocateAsciiString(eax, // Result.
2, // Length.
edi, // Scratch 1.
__ or_(ebx, Operand(ecx));
// Set the characters in the new string.
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&longer_than_two);
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
__ mov(eax, ecx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
// edx: first char of second argument
// edi: length of second argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
// edx: first char of second argument
// edi: length of second argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
- __ cmp(candidate, Factory::undefined_value());
+ __ cmp(candidate, FACTORY->undefined_value());
__ j(equal, not_found);
- __ cmp(candidate, Factory::null_value());
+ __ cmp(candidate, FACTORY->null_value());
__ j(equal, &next_probe[i]);
// If length is not 2 the string is not a candidate.
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&non_ascii_flat);
__ mov(esi, edx); // Restore esi.
__ bind(&return_eax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1);
__ ret(3 * kPointerSize);
// Just jump to runtime to create the sub string.
Label result_greater;
Label compare_lengths;
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1);
// Find minimum length.
NearLabel left_shorter;
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1);
__ ret(2 * kPointerSize);
__ bind(¬_same);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
- if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
+ if (cpu_features->IsSupported(SSE2) && cpu_features->IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);
if (static_operands_type_.IsSmi()) {
mode_ = NO_OVERWRITE;
}
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
+ use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
+ use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
safe_int32_mode_enabled_(true),
function_return_is_shadowed_(false),
in_spilled_code_(false),
- jit_cookie_((FLAG_mask_constants_with_cookie) ? V8::RandomPrivate() : 0) {
+ jit_cookie_((FLAG_mask_constants_with_cookie) ?
+ V8::RandomPrivate(Isolate::Current()) : 0) {
}
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ = info->is_in_loop() ? 1 : 0;
- JumpTarget::set_compiling_deferred_code(false);
+ Isolate::Current()->set_jump_target_compiling_deferred_code(false);
{
CodeGenState state(this);
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
+ frame_->Push(FACTORY->the_hole_value());
StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
}
if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
+ bool is_builtin = info->isolate()->bootstrapper()->IsActive();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
ASSERT(!function_return_is_shadowed_);
CodeForReturnPosition(info->function());
frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value());
+ Result undefined(FACTORY->undefined_value());
if (function_return_.is_bound()) {
function_return_.Jump(&undefined);
} else {
// Process any deferred code using the register allocator.
if (!HasStackOverflow()) {
- JumpTarget::set_compiling_deferred_code(true);
+ info->isolate()->set_jump_target_compiling_deferred_code(true);
ProcessDeferred();
- JumpTarget::set_compiling_deferred_code(false);
+ info->isolate()->set_jump_target_compiling_deferred_code(false);
}
// There is no need to delete the register allocator, it is a
__ sar(val, 1);
// If there was an overflow, bits 30 and 31 of the original number disagree.
__ xor_(val, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvtsi2sd(xmm0, Operand(val));
} else {
no_reg, &allocation_failed);
VirtualFrame* clone = new VirtualFrame(frame_);
scratch.Unuse();
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
} else {
RegisterFile empty_regs;
SetFrame(clone, &empty_regs);
__ bind(&allocation_failed);
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
// Pop the value from the floating point stack.
__ fstp(0);
}
safe_int32_mode_enabled() &&
expr->side_effect_free() &&
expr->num_bit_ops() > 2 &&
- CpuFeatures::IsSupported(SSE2)) {
+ Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
BreakTarget unsafe_bailout;
JumpTarget done;
unsafe_bailout.set_expected_height(frame_->height());
if (dest.false_was_fall_through()) {
// The false target was just bound.
JumpTarget loaded;
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
// There may be dangling jumps to the true target.
if (true_target.is_linked()) {
loaded.Jump();
true_target.Bind();
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
loaded.Bind();
}
// There is true, and possibly false, control flow (with true as
// the fall through).
JumpTarget loaded;
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
if (false_target.is_linked()) {
loaded.Jump();
false_target.Bind();
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
loaded.Bind();
}
loaded.Jump(); // Don't lose the current TOS.
if (true_target.is_linked()) {
true_target.Bind();
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
if (false_target.is_linked()) {
loaded.Jump();
}
}
if (false_target.is_linked()) {
false_target.Bind();
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
}
loaded.Bind();
}
// When using lazy arguments allocation, we store the arguments marker value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- frame_->Push(Factory::arguments_marker());
+ frame_->Push(FACTORY->arguments_marker());
} else {
ArgumentsAccessStub stub(is_strict_mode()
? ArgumentsAccessStub::NEW_STRICT
// been assigned a proper value.
skip_arguments = !probe.handle()->IsArgumentsMarker();
} else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
+ __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
probe.Unuse();
done.Branch(not_equal);
}
} else {
// Fast case checks.
// 'false' => false.
- __ cmp(value.reg(), Factory::false_value());
+ __ cmp(value.reg(), FACTORY->false_value());
dest->false_target()->Branch(equal);
// 'true' => true.
- __ cmp(value.reg(), Factory::true_value());
+ __ cmp(value.reg(), FACTORY->true_value());
dest->true_target()->Branch(equal);
// 'undefined' => false.
- __ cmp(value.reg(), Factory::undefined_value());
+ __ cmp(value.reg(), FACTORY->undefined_value());
dest->false_target()->Branch(equal);
// Smi => false iff zero.
Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
- if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
+ if (Token::IsBitOp(op_) &&
+ Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
return &non_smi_input_;
} else {
return entry_label();
void DeferredInlineBinaryOperation::Generate() {
// Registers are not saved implicitly for this stub, so we should not
// tread on the registers that were not passed to us.
- if (CpuFeatures::IsSupported(SSE2) &&
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2) &&
((op_ == Token::ADD) ||
(op_ == Token::SUB) ||
(op_ == Token::MUL) ||
__ j(zero, &left_smi);
if (!left_info_.IsNumber()) {
__ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
}
__ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
__ j(zero, &right_smi);
if (!right_info_.IsNumber()) {
__ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
}
__ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
// The left_ and right_ registers have not been initialized yet.
__ mov(right_, Immediate(smi_value_));
__ mov(left_, Operand(dst_));
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
__ jmp(entry_label());
return;
} else {
// This trashes right_.
__ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
__ bind(&allocation_ok);
- if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2) &&
+ op_ != Token::SHR) {
CpuFeatures::Scope use_sse2(SSE2);
ASSERT(Token::IsBitOp(op_));
// Signed conversion.
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
+ Object* answer_object = HEAP->undefined_value();
switch (op) {
case Token::ADD:
if (Smi::IsValid(left + right)) {
UNREACHABLE();
break;
}
- if (answer_object == Heap::undefined_value()) {
+ if (answer_object->IsUndefined()) {
return false;
}
frame_->Push(Handle<Object>(answer_object));
// Jump or fall through to here if we are comparing a non-smi to a
// constant smi. If the non-smi is a heap number and this is not
// a loop condition, inline the floating point code.
- if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
+ if (!is_loop_condition &&
+ Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
JumpTarget not_number;
__ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
not_number.Branch(not_equal, left_side);
__ movdbl(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
__ test(operand->reg(), Immediate(kSmiTagMask));
__ j(zero, &done);
__ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, not_taken);
__ bind(&done);
}
__ j(zero, &smi);
if (!operand->type_info().IsNumber()) {
__ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, taken);
}
__ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
ASSERT(right_side->is_register());
JumpTarget not_numbers;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
// Load left and right operand into registers xmm0 and xmm1 and compare.
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
__ nop();
if (probe.is_constant()) {
try_lazy = probe.handle()->IsArgumentsMarker();
} else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
+ __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
probe.Unuse();
__ j(not_equal, &slow);
}
__ j(not_equal, &build_args);
__ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
__ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ Handle<Code> apply_code(Isolate::Current()->builtins()->builtin(
+ Builtins::FunctionApply));
__ cmp(Operand(ecx), Immediate(apply_code));
__ j(not_equal, &build_args);
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Immediate(Factory::the_hole_value()));
+ frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
// If we have a function or a constant, we need to initialize the variable.
Expression* val = NULL;
if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
+ val = new Literal(FACTORY->the_hole_value());
} else {
val = node->fun(); // NULL if we don't have a function
}
frame_->EmitPop(eax);
// eax: value to be iterated over
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, FACTORY->undefined_value());
exit.Branch(equal);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, FACTORY->null_value());
exit.Branch(equal);
// Stack layout in body:
loop.Bind();
// Check that there are no elements.
__ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
call_runtime.Branch(not_equal);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in ebx for the subsequent
// prototype load.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array()));
+ __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
call_runtime.Branch(equal);
// Check that there in an enum cache in the non-empty instance
// descriptors. This is the case if the next enumeration index
__ cmp(ecx, Operand(eax));
check_prototype.Branch(equal);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
call_runtime.Branch(not_equal);
check_prototype.Bind();
// Load the prototype from the map and loop if non-null.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
loop.Branch(not_equal);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
// Runtime::kGetPropertyNamesFast)
__ mov(edx, Operand(eax));
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, Factory::meta_map());
+ __ cmp(ecx, FACTORY->meta_map());
fixed_array.Branch(not_equal);
use_cache.Bind();
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// Make sure that there's nothing left on the stack above the
// handler structure.
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// If we can fall off the end of the try block, unlink from the try
// chain and set the state on the frame to FALLING.
// Fake a top of stack value (unneeded when FALLING) and set the
// state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Immediate(Factory::undefined_value()));
+ frame_->EmitPush(Immediate(FACTORY->undefined_value()));
__ Set(ecx, Immediate(Smi::FromInt(FALLING)));
if (nof_unlinks > 0) {
finally_block.Jump();
frame_->EmitPush(eax);
} else {
// Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Immediate(Factory::undefined_value()));
+ frame_->EmitPush(Immediate(FACTORY->undefined_value()));
}
__ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
if (--nof_unlinks > 0) {
frame()->EmitPush(esi);
frame()->EmitPush(Immediate(function_info));
frame()->EmitPush(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? FACTORY->true_value()
+ : FACTORY->false_value()));
return frame()->CallRuntime(Runtime::kNewClosure, 3);
}
}
Comment cmnt(masm_, "[ Load const");
Label exit;
__ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, Factory::the_hole_value());
+ __ cmp(ecx, FACTORY->the_hole_value());
__ j(not_equal, &exit);
- __ mov(ecx, Factory::undefined_value());
+ __ mov(ecx, FACTORY->undefined_value());
__ bind(&exit);
frame()->EmitPush(ecx);
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
- __ cmp(Operand(result.reg()), Immediate(Factory::arguments_marker()));
+ __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
frame()->Push(&result);
exit.Branch(not_equal);
__ bind(&next);
// Terminate at global context.
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::global_context_map()));
+ Immediate(FACTORY->global_context_map()));
__ j(equal, &fast);
// Check that extension is NULL.
__ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
__ mov(result->reg(),
ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(result->reg(), Factory::the_hole_value());
+ __ cmp(result->reg(), FACTORY->the_hole_value());
done->Branch(not_equal, result);
- __ mov(result->reg(), Factory::undefined_value());
+ __ mov(result->reg(), FACTORY->undefined_value());
}
done->Jump(result);
} else if (rewrite != NULL) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Init const");
__ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, Factory::the_hole_value());
+ __ cmp(ecx, FACTORY->the_hole_value());
exit.Branch(not_equal);
}
Register target,
int registers_to_save = 0)
: size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
+ ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
ASSERT_EQ(0, registers_to_save & target.bit());
set_comment("[ DeferredAllocateInNewSpace");
}
// jump to the deferred code passing the literals array.
DeferredRegExpLiteral* deferred =
new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
+ __ cmp(boilerplate.reg(), FACTORY->undefined_value());
deferred->Branch(equal);
deferred->BindExit();
frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
clone = frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(COUNTERS->cow_arrays_created_stub(), 1);
} else if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
Load(function);
// Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
// Load the arguments.
int arg_count = args->length();
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
frame_->PushParameterAt(-1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
frame_->PushParameterAt(-1);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
__ bind(&need_conversion_);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result_, Immediate(Factory::undefined_value()));
+ __ Set(result_, Immediate(FACTORY->undefined_value()));
__ jmp(exit_label());
__ bind(&index_out_of_range_);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result_, Immediate(Factory::nan_value()));
+ __ Set(result_, Immediate(FACTORY->nan_value()));
__ jmp(exit_label());
}
__ bind(&index_out_of_range_);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result_, Immediate(Factory::empty_string()));
+ __ Set(result_, Immediate(FACTORY->empty_string()));
__ jmp(exit_label());
}
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ sar(array_length, 1);
__ j(not_zero, &non_trivial_array);
- __ mov(result_operand, Factory::empty_string());
+ __ mov(result_operand, FACTORY->empty_string());
__ jmp(&done);
// Save the array length.
__ bind(&bailout);
- __ mov(result_operand, Factory::undefined_value());
+ __ mov(result_operand, FACTORY->undefined_value());
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
__ test(obj.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
- __ cmp(obj.reg(), Factory::null_value());
+ __ cmp(obj.reg(), FACTORY->null_value());
destination()->true_target()->Branch(equal);
Result map = allocator()->Allocate();
// Check for fast case object. Generate false result for slow case object.
__ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
__ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ cmp(scratch1_, Factory::hash_table_map());
+ __ cmp(scratch1_, FACTORY->hash_table_map());
__ j(equal, &false_result);
// Look for valueOf symbol in the descriptor array, and indicate false if
__ jmp(&entry);
__ bind(&loop);
__ mov(scratch2_, FieldOperand(map_result_, 0));
- __ cmp(scratch2_, Factory::value_of_symbol());
+ __ cmp(scratch2_, FACTORY->value_of_symbol());
__ j(equal, &false_result);
__ add(Operand(map_result_), Immediate(kPointerSize));
__ bind(&entry);
// Functions have class 'Function'.
function.Bind();
- frame_->Push(Factory::function_class_symbol());
+ frame_->Push(FACTORY->function_class_symbol());
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
+ frame_->Push(FACTORY->Object_symbol());
leave.Jump();
// Non-JS objects have class null.
null.Bind();
- frame_->Push(Factory::null_value());
+ frame_->Push(FACTORY->null_value());
// All done.
leave.Bind();
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ Isolate::Current()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
return;
}
// Check the object's elements are in fast case and writable.
__ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
deferred->Branch(not_equal);
// Smi-tagging is equivalent to multiplying by 2.
__ bind(&done);
deferred->BindExit();
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
frame_->Push(&res);
} else {
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
__ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
call_runtime.Branch(not_equal);
__ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
// on doubles.
__ bind(&exponent_nonsmi);
__ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
call_runtime.Branch(not_equal);
__ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
// Test if exponent is nan.
__ jmp(&handle_special_cases);
__ bind(&base_not_smi);
__ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
call_runtime.Branch(not_equal);
__ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
__ and_(answer.reg(), HeapNumber::kExponentMask);
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
frame()->Push(&result);
} else {
__ jmp(&load_done);
__ bind(&non_smi);
__ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &runtime);
__ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
ZoneList<Expression*>* args = node->arguments();
Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
if (function == NULL) {
// Push the builtins object found in the current global object.
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
}
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
+ frame_->SetElementAt(0, FACTORY->true_value());
}
} else if (op == Token::TYPEOF) {
expression->AsLiteral()->IsNull())) {
// Omit evaluating the value of the primitive literal.
// It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
} else {
Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
+ frame_->SetElementAt(0, FACTORY->undefined_value());
}
} else {
Result answer = frame_->Pop();
answer.ToRegister();
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(HEAP->number_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->true_target()->Branch(zero);
frame_->Spill(answer.reg());
__ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ cmp(answer.reg(), Factory::heap_number_map());
+ __ cmp(answer.reg(), FACTORY->heap_number_map());
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(HEAP->string_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
answer.Unuse();
destination()->Split(below);
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(answer.reg(), Factory::true_value());
+ } else if (check->Equals(HEAP->boolean_symbol())) {
+ __ cmp(answer.reg(), FACTORY->true_value());
destination()->true_target()->Branch(equal);
- __ cmp(answer.reg(), Factory::false_value());
+ __ cmp(answer.reg(), FACTORY->false_value());
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(answer.reg(), Factory::undefined_value());
+ } else if (check->Equals(HEAP->undefined_symbol())) {
+ __ cmp(answer.reg(), FACTORY->undefined_value());
destination()->true_target()->Branch(equal);
__ test(answer.reg(), Immediate(kSmiTagMask));
answer.Unuse();
destination()->Split(not_zero);
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(HEAP->function_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
frame_->Spill(answer.reg());
__ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(HEAP->object_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
- __ cmp(answer.reg(), Factory::null_value());
+ __ cmp(answer.reg(), FACTORY->null_value());
destination()->true_target()->Branch(equal);
Result map = allocator()->Allocate();
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
__ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
- __ cmp(scratch.reg(), Factory::heap_number_map());
+ __ cmp(scratch.reg(), FACTORY->heap_number_map());
JumpTarget not_a_number;
not_a_number.Branch(not_equal, &lhs);
__ mov(scratch.reg(),
Load(node->expression());
Result operand = frame_->Pop();
operand.ToRegister();
- __ cmp(operand.reg(), Factory::null_value());
+ __ cmp(operand.reg(), FACTORY->null_value());
if (node->is_strict()) {
operand.Unuse();
destination()->Split(equal);
// The 'null' value is only equal to 'undefined' if using non-strict
// comparisons.
destination()->true_target()->Branch(equal);
- __ cmp(operand.reg(), Factory::undefined_value());
+ __ cmp(operand.reg(), FACTORY->undefined_value());
destination()->true_target()->Branch(equal);
__ test(operand.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(equal);
__ mov(eax, receiver_);
}
__ Set(ecx, Immediate(name_));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
RelocInfo::Mode mode = is_contextual_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
// instruction that gets patched and coverage code gets in the way.
if (is_contextual_) {
masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_inline_miss(), 1);
if (is_dont_delete_) {
- __ IncrementCounter(&Counters::dont_delete_hint_miss, 1);
+ __ IncrementCounter(COUNTERS->dont_delete_hint_miss(), 1);
}
} else {
masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_inline_miss(), 1);
}
if (!dst_.is(eax)) __ mov(dst_, eax);
// it in the IC initialization code and patch the cmp instruction.
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the __
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_inline_miss(), 1);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_inline_miss(), 1);
// Move value_ to eax, key_ to ecx, and receiver_ to edx.
Register old_value = value_;
}
// Call the IC stub.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
bool contextual_load_in_builtin =
is_contextual &&
- (Bootstrapper::IsActive() ||
+ (Isolate::Current()->bootstrapper()->IsActive() ||
(!info_->closure().is_null() && info_->closure()->IsBuiltin()));
Result result;
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(FACTORY->null_value()));
// This branch is always a forwards branch so it's always a fixed size
// which allows the assert below to succeed and patching to work.
deferred->Branch(not_equal);
if (is_contextual) {
// Load the (initialy invalid) cell and get its value.
- masm()->mov(result.reg(), Factory::null_value());
+ masm()->mov(result.reg(), FACTORY->null_value());
if (FLAG_debug_code) {
__ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::global_property_cell_map());
+ FACTORY->global_property_cell_map());
__ Assert(equal, "Uninitialized inlined contextual load");
}
__ mov(result.reg(),
FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
+ __ cmp(result.reg(), FACTORY->the_hole_value());
+ deferred->Branch(equal);
bool is_dont_delete = false;
if (!info_->closure().is_null()) {
// When doing lazy compilation we can check if the global cell
}
deferred->set_is_dont_delete(is_dont_delete);
if (!is_dont_delete) {
- __ cmp(result.reg(), Factory::the_hole_value());
+ __ cmp(result.reg(), FACTORY->the_hole_value());
deferred->Branch(equal);
} else if (FLAG_debug_code) {
- __ cmp(result.reg(), Factory::the_hole_value());
+ __ cmp(result.reg(), FACTORY->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_inline(), 1);
if (is_dont_delete) {
- __ IncrementCounter(&Counters::dont_delete_hint_hit, 1);
+ __ IncrementCounter(COUNTERS->dont_delete_hint_hit(), 1);
}
} else {
// The initial (invalid) offset has to be large enough to force a 32-bit
// kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(&Counters::named_load_inline, 1);
+ __ IncrementCounter(COUNTERS->named_load_inline(), 1);
}
deferred->BindExit();
// Initially use an invalid map to force a failure.
__ bind(&patch_site);
masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(FACTORY->null_value()));
// This branch is always a forwards branch so it's always a fixed size
// which allows the assert below to succeed and patching to work.
slow.Branch(not_equal, &value, &receiver);
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(FACTORY->null_value()));
deferred->Branch(not_equal);
// Check that the key is a smi.
times_2,
FixedArray::kHeaderSize));
result = elements;
- __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_inline(), 1);
deferred->BindExit();
} else {
// which will allow the debugger to break for fast case stores.
__ bind(deferred->patch_site());
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
deferred->Branch(not_equal);
// Check that the key is within bounds. Both the key and the length of
// Store the value.
__ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_inline(), 1);
deferred->BindExit();
} else {
__ int3();
__ bind(&ok);
}
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope enable(SSE2);
__ push(edi);
__ push(esi);
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
- __ IncrementCounter(&Counters::memcopy_aligned, 1);
+ __ IncrementCounter(COUNTERS->memcopy_aligned(), 1);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ IncrementCounter(&Counters::memcopy_unaligned, 1);
+ __ IncrementCounter(COUNTERS->memcopy_unaligned(), 1);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
}
} else {
- __ IncrementCounter(&Counters::memcopy_noxmm, 1);
+ __ IncrementCounter(COUNTERS->memcopy_noxmm(), 1);
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);
int jit_cookie_;
friend class VirtualFrame;
+ friend class Isolate;
friend class JumpTarget;
friend class Reference;
friend class Result;
friend class LCodeGen;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
+ friend class InlineRuntimeFunctionsTable;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
namespace internal {
void CPU::Setup() {
- CpuFeatures::Clear();
- CpuFeatures::Probe(true);
- if (!CpuFeatures::IsSupported(SSE2) || Serializer::enabled()) {
+ CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
+ cpu_features->Clear();
+ cpu_features->Probe(true);
+ if (!cpu_features->IsSupported(SSE2) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+ Isolate* isolate = Isolate::Current();
+ rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
+ Isolate* isolate = Isolate::Current();
rinfo()->PatchCodeWithCall(
- Debug::debug_break_slot()->entry(),
+ isolate->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
// a non-live object in the extra space at the end of the former reloc info.
Address junk_address = reloc_info->address() + reloc_info->Size();
ASSERT(junk_address <= reloc_end_address);
- Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
+ HEAP->CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation =
+ Isolate::Current()->builtins()->builtin(Builtins::NotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
// Set the continuation for the topmost frame.
if (is_topmost) {
+ Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? builtins->builtin(Builtins::NotifyDeoptimized)
+ : builtins->builtin(Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
#ifndef V8_IA32_FRAMES_IA32_H_
#define V8_IA32_FRAMES_IA32_H_
+#include "memory.h"
+
namespace v8 {
namespace internal {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = scope()->num_stack_slots();
if (locals_count == 1) {
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
- __ mov(eax, Immediate(Factory::undefined_value()));
+ __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
for (int i = 0; i < locals_count; i++) {
__ push(eax);
}
// Always emit a 'return undefined' in case control fell off the end of
// the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
EmitReturnSequence();
}
}
Label* materialize_false) const {
NearLabel done;
__ bind(materialize_true);
- __ mov(result_register(), Factory::true_value());
+ __ mov(result_register(), isolate()->factory()->true_value());
__ jmp(&done);
__ bind(materialize_false);
- __ mov(result_register(), Factory::false_value());
+ __ mov(result_register(), isolate()->factory()->false_value());
__ bind(&done);
}
Label* materialize_false) const {
NearLabel done;
__ bind(materialize_true);
- __ push(Immediate(Factory::true_value()));
+ __ push(Immediate(isolate()->factory()->true_value()));
__ jmp(&done);
__ bind(materialize_false);
- __ push(Immediate(Factory::false_value()));
+ __ push(Immediate(isolate()->factory()->false_value()));
__ bind(&done);
}
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value =
- flag ? Factory::true_value() : Factory::false_value();
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
__ mov(result_register(), value);
}
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Handle<Object> value =
- flag ? Factory::true_value() : Factory::false_value();
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
__ push(Immediate(value));
}
Label* if_false,
Label* fall_through) {
// Emit the inlined tests assumed by the stub.
- __ cmp(result_register(), Factory::undefined_value());
+ __ cmp(result_register(), isolate()->factory()->undefined_value());
__ j(equal, if_false);
- __ cmp(result_register(), Factory::true_value());
+ __ cmp(result_register(), isolate()->factory()->true_value());
__ j(equal, if_true);
- __ cmp(result_register(), Factory::false_value());
+ __ cmp(result_register(), isolate()->factory()->false_value());
__ j(equal, if_false);
STATIC_ASSERT(kSmiTag == 0);
__ test(result_register(), Operand(result_register()));
}
if (should_normalize) {
- __ cmp(eax, Factory::true_value());
+ __ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, NULL);
__ bind(&skip);
}
case Slot::LOCAL:
if (mode == Variable::CONST) {
__ mov(Operand(ebp, SlotOffset(slot)),
- Immediate(Factory::the_hole_value()));
+ Immediate(isolate()->factory()->the_hole_value()));
} else if (function != NULL) {
VisitForAccumulatorValue(function);
__ mov(Operand(ebp, SlotOffset(slot)), result_register());
}
if (mode == Variable::CONST) {
__ mov(ContextOperand(esi, slot->index()),
- Immediate(Factory::the_hole_value()));
+ Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
} else if (function != NULL) {
VisitForAccumulatorValue(function);
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (mode == Variable::CONST) {
- __ push(Immediate(Factory::the_hole_value()));
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
} else if (function != NULL) {
VisitForStackValue(function);
} else {
__ pop(edx);
} else {
__ mov(edx, eax);
- __ mov(eax, Factory::the_hole_value());
+ __ mov(eax, isolate()->factory()->the_hole_value());
}
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(
- is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(is_strict_mode()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, &exit);
// Convert the object to a JS object.
// Check that there are no elements. Register ecx contains the
// current JS object we've reached through the prototype chain.
__ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
- Factory::empty_fixed_array());
+ isolate()->factory()->empty_fixed_array());
__ j(not_equal, &call_runtime);
// Check that instance descriptors are not empty so that we can
// prototype load.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(edx, Factory::empty_descriptor_array());
+ __ cmp(edx, isolate()->factory()->empty_descriptor_array());
__ j(equal, &call_runtime);
// Check that there is an enum cache in the non-empty instance
__ cmp(ecx, Operand(eax));
__ j(equal, &check_prototype);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(edx, Factory::empty_fixed_array());
+ __ cmp(edx, isolate()->factory()->empty_fixed_array());
__ j(not_equal, &call_runtime);
// Load the prototype from the map and loop if non-null.
__ bind(&check_prototype);
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(ecx, Factory::null_value());
+ __ cmp(ecx, isolate()->factory()->null_value());
__ j(not_equal, &next);
// The enum cache is valid. Load the map of the object being
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
NearLabel fixed_array;
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array);
// We got a map in register eax. Get the enumeration cache from it.
__ push(esi);
__ push(Immediate(info));
__ push(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value()));
__ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(eax);
__ bind(&next);
// Terminate at global context.
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(Factory::global_context_map()));
+ Immediate(isolate()->factory()->global_context_map()));
__ j(equal, &fast);
// Check that extension is NULL.
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
// load IC call.
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, slot->var()->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
__ mov(eax,
ContextSlotOperandCheckExtensions(potential_slot, slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(eax, Factory::the_hole_value());
+ __ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
}
__ jmp(done);
} else if (rewrite != NULL) {
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ mov(eax, Immediate(key_literal->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
// object on the stack.
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
NearLabel done;
MemOperand slot_operand = EmitSlotSearch(slot, eax);
__ mov(eax, slot_operand);
- __ cmp(eax, Factory::the_hole_value());
+ __ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done);
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
__ bind(&done);
context()->Plug(eax);
} else {
__ mov(eax, Immediate(key_literal->handle()));
// Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC.
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, isolate()->factory()->undefined_value());
__ j(not_equal, &materialized);
// Create regexp literal using runtime function
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0));
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_elements()));
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
ASSERT(expr->depth() == 1);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ pop(edx);
}
__ pop(eax); // Restore value.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// ecx, and the global object on the stack.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
break;
case Slot::LOCAL:
__ mov(edx, Operand(ebp, SlotOffset(slot)));
- __ cmp(edx, Factory::the_hole_value());
+ __ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &skip);
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
case Slot::CONTEXT: {
__ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
__ mov(edx, ContextOperand(ecx, slot->index()));
- __ cmp(edx, Factory::the_hole_value());
+ __ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &skip);
__ mov(ContextOperand(ecx, slot->index()), eax);
int offset = Context::SlotOffset(slot->index());
} else {
__ pop(edx);
}
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arg_count, in_loop);
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Restore context register.
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
+ arg_count, in_loop);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
} else {
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(FACTORY->undefined_value()));
}
// Push the receiver of the enclosing function.
{ PreservePositionScope pos_scope(masm()->positions_recorder());
VisitForStackValue(fun);
// Reserved receiver slot.
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Push result (function).
__ push(eax);
// also use the full code generator.
FunctionLiteral* lit = fun->AsFunctionLiteral();
if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
+ lit->name()->Equals(isolate()->heap()->empty_string()) &&
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
__ Set(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin(isolate()->builtins()->builtin(
+ Builtins::JSConstructCall));
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(eax);
}
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
// Functions have class 'Function'.
__ bind(&function);
- __ mov(eax, Factory::function_class_symbol());
+ __ mov(eax, isolate()->factory()->function_class_symbol());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ mov(eax, Factory::Object_symbol());
+ __ mov(eax, isolate()->factory()->Object_symbol());
__ jmp(&done);
// Non-JS objects have class null.
__ bind(&null);
- __ mov(eax, Factory::null_value());
+ __ mov(eax, isolate()->factory()->null_value());
// All done.
__ bind(&done);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
}
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (isolate()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result, Immediate(Factory::nan_value()));
+ __ Set(result, Immediate(isolate()->factory()->nan_value()));
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result, Immediate(Factory::undefined_value()));
+ __ Set(result, Immediate(isolate()->factory()->undefined_value()));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result, Immediate(Factory::empty_string()));
+ __ Set(result, Immediate(isolate()->factory()->empty_string()));
__ jmp(&done);
__ bind(&need_conversion);
// Check the object's elements are in fast case and writable.
__ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(isolate()->factory()->fixed_array_map()));
__ j(not_equal, &slow_case);
// Check that both indices are smis.
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ add(Operand(esp), Immediate(3 * kPointerSize));
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
__ bind(&slow_case);
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
return;
}
__ and_(Operand(tmp), right);
__ test(Operand(tmp), Immediate(kSmiTagMask));
__ j(zero, &fail);
- __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
+ __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
+ __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
__ j(not_equal, &fail);
__ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
__ j(not_equal, &fail);
__ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
__ j(equal, &ok);
__ bind(&fail);
- __ mov(eax, Immediate(Factory::false_value()));
+ __ mov(eax, Immediate(isolate()->factory()->false_value()));
__ jmp(&done);
__ bind(&ok);
- __ mov(eax, Immediate(Factory::true_value()));
+ __ mov(eax, Immediate(isolate()->factory()->true_value()));
__ bind(&done);
context()->Plug(eax);
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length);
__ j(not_zero, &non_trivial_array);
- __ mov(result_operand, Factory::empty_string());
+ __ mov(result_operand, FACTORY->empty_string());
__ jmp(&done);
// Save the array length.
__ bind(&bailout);
- __ mov(result_operand, Factory::undefined_value());
+ __ mov(result_operand, FACTORY->undefined_value());
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
// Call the JS runtime function via a call IC.
__ Set(ecx, Immediate(expr->name()));
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arg_count, in_loop);
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
- context()->Plug(Factory::undefined_value());
+ context()->Plug(isolate()->factory()->undefined_value());
break;
}
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Comment cmnt(masm_, "Global variable");
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(eax, if_true);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ isolate()->factory()->heap_number_map());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, if_false);
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(eax, Factory::true_value());
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ __ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true);
- __ cmp(eax, Factory::false_value());
+ __ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(eax, Factory::undefined_value());
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
// Check for undetectable objects => true.
__ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_FUNCTION_CLASS_TYPE, edx);
Split(above_equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edx);
__ j(below, if_false);
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- __ cmp(eax, Factory::true_value());
+ __ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, isolate()->factory()->null_value());
if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
} else {
__ j(equal, if_true);
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
mode == RelocInfo::CODE_TARGET_CONTEXT);
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
default:
break;
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(COUNTERS->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(COUNTERS->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1);
default:
break;
}
__ j(not_zero, miss, not_taken);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, Factory::hash_table_map(), miss, true);
+ __ CheckMap(r0, FACTORY->hash_table_map(), miss, true);
}
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
- __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
+ __ CheckMap(scratch, FACTORY->fixed_array_map(), not_fast_array, true);
} else {
__ AssertFastElements(scratch);
}
// Fast case: Do the load.
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(Operand(scratch), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
eax,
NULL,
&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
// ebx: untagged index
// eax: key
// ecx: elements
- __ CheckMap(ecx, Factory::hash_table_map(), &slow, true);
+ __ CheckMap(ecx, FACTORY->hash_table_map(), &slow, true);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
// probing.
// Slow case: jump to runtime.
// edx: receiver
// eax: key
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// cache. Otherwise probe the dictionary.
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
+ Immediate(FACTORY->hash_table_map()));
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
__ mov(eax, FieldOperand(eax, edi, times_pointer_size,
FixedArray::kHeaderSize));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_string);
// ecx: key (a smi)
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
- __ CheckMap(edi, Factory::fixed_array_map(), &slow, true);
+ __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken);
// edx: receiver, a JSArray
// ecx: key, a smi.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, Factory::fixed_array_map(), &slow, true);
+ __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
Code::kNoExtraICState,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+ eax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
// Check for boolean.
__ bind(&non_string);
- __ cmp(edx, Factory::true_value());
+ __ cmp(edx, FACTORY->true_value());
__ j(equal, &boolean, not_taken);
- __ cmp(edx, Factory::false_value());
+ __ cmp(edx, FACTORY->false_value());
__ j(not_equal, &miss, taken);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+ no_reg);
__ bind(&miss);
}
// -----------------------------------
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1);
+ __ IncrementCounter(COUNTERS->call_miss(), 1);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_miss(), 1);
}
// Get the receiver of the function from the stack; 1 ~ return address.
GenerateFastArrayLoad(
masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_smi_fast(), 1);
__ bind(&do_call);
// receiver in edx is not used after this point.
// eax: elements
// ecx: smi key
// Check whether the elements is a number dictionary.
- __ CheckMap(eax, Factory::hash_table_map(), &slow_load, true);
+ __ CheckMap(eax, FACTORY->hash_table_map(), &slow_load, true);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
GenerateNumberDictionaryLoad(
masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
__ bind(&slow_reload_receiver);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
__ push(ecx); // save the key
__ push(edx); // pass the receiver
masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx, Factory::hash_table_map(), &lookup_monomorphic_cache, true);
+ __ CheckMap(ebx, FACTORY->hash_table_map(), &lookup_monomorphic_cache, true);
GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_lookup_dict(), 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_lookup_cache(), 1);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
__ bind(&index_string);
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
+ edx);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
// -- esp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::load_miss, 1);
+ __ IncrementCounter(COUNTERS->load_miss(), 1);
__ pop(ebx);
__ push(eax); // receiver
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
*reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
+ (offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
// Patch the offset in the write-barrier code. The offset is the
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
*reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
+ (offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
// -- esp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::keyed_load_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_miss(), 1);
__ pop(ebx);
__ push(edx); // receiver
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+ no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
__ push(edx);
GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
__ Drop(1);
- __ IncrementCounter(&Counters::store_normal_hit, 1);
+ __ IncrementCounter(COUNTERS->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
__ pop(edx);
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1);
+ __ IncrementCounter(COUNTERS->store_normal_miss(), 1);
GenerateMiss(masm);
}
}
-void LCodeGen::CallRuntime(Runtime::Function* fun,
+void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
LInstruction* instr,
bool adjusted) {
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (isolate()->cpu_features()->IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
- __ cmp(reg, Factory::true_value());
+ __ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ cmp(reg, Factory::undefined_value());
+ __ cmp(reg, factory()->undefined_value());
__ j(equal, false_label);
- __ cmp(reg, Factory::true_value());
+ __ cmp(reg, factory()->true_value());
__ j(equal, true_label);
- __ cmp(reg, Factory::false_value());
+ __ cmp(reg, factory()->false_value());
__ j(equal, false_label);
__ test(reg, Operand(reg));
__ j(equal, false_label);
// Test for double values. Zero is false.
NearLabel call_stub;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
__ j(not_equal, &call_stub);
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
NearLabel done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), Factory::true_value());
+ __ mov(ToRegister(result), factory()->true_value());
__ j(cc, &done);
__ bind(&unordered);
- __ mov(ToRegister(result), Factory::false_value());
+ __ mov(ToRegister(result), factory()->false_value());
__ bind(&done);
}
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
NearLabel done;
__ j(equal, &done);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Materialize false.
- __ cmp(reg, Factory::null_value());
+ __ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
NearLabel done;
__ j(equal, &done);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
} else {
NearLabel true_value, false_value, done;
__ j(equal, &true_value);
- __ cmp(reg, Factory::undefined_value());
+ __ cmp(reg, factory()->undefined_value());
__ j(equal, &true_value);
__ test(reg, Immediate(kSmiTagMask));
__ j(zero, &false_value);
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value);
__ bind(&false_value);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ jmp(&done);
__ bind(&true_value);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmp(reg, Factory::null_value());
+ __ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ cmp(reg, Factory::undefined_value());
+ __ cmp(reg, factory()->undefined_value());
__ j(equal, true_label);
__ test(reg, Immediate(kSmiTagMask));
__ j(zero, false_label);
__ test(input, Immediate(kSmiTagMask));
__ j(equal, is_not_object);
- __ cmp(input, Factory::null_value());
+ __ cmp(input, isolate()->factory()->null_value());
__ j(equal, is_object);
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
__ j(true_cond, &is_true);
__ bind(&is_false);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ jmp(&done);
__ bind(&is_true);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ test(input, Immediate(kSmiTagMask));
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
NearLabel done;
__ j(zero, &done);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ jmp(&done);
__ bind(&is_false);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
NearLabel done;
__ j(zero, &done);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
__ j(not_equal, &is_false);
__ bind(&is_true);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ jmp(&done);
__ bind(&is_false);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
NearLabel true_value, done;
__ test(eax, Operand(eax));
__ j(zero, &true_value);
- __ mov(ToRegister(instr->result()), Factory::false_value());
+ __ mov(ToRegister(instr->result()), factory()->false_value());
__ jmp(&done);
__ bind(&true_value);
- __ mov(ToRegister(instr->result()), Factory::true_value());
+ __ mov(ToRegister(instr->result()), factory()->true_value());
__ bind(&done);
}
Register map = ToRegister(instr->TempAt(0));
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- __ cmp(map, Factory::the_hole_value()); // Patched to cached map.
+ __ cmp(map, factory()->the_hole_value()); // Patched to cached map.
__ j(not_equal, &cache_miss, not_taken);
- __ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
+ __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not an instance of anything.
- __ cmp(object, Factory::null_value());
+ __ cmp(object, factory()->null_value());
__ j(equal, &false_result);
// String values are not instances of anything.
__ jmp(deferred->entry());
__ bind(&false_result);
- __ mov(ToRegister(instr->result()), Factory::false_value());
+ __ mov(ToRegister(instr->result()), factory()->false_value());
// Here result has either true or false. Deferred code also produces true or
// false object.
NearLabel true_value, done;
__ test(eax, Operand(eax));
__ j(condition, &true_value);
- __ mov(ToRegister(instr->result()), Factory::false_value());
+ __ mov(ToRegister(instr->result()), factory()->false_value());
__ jmp(&done);
__ bind(&true_value);
- __ mov(ToRegister(instr->result()), Factory::true_value());
+ __ mov(ToRegister(instr->result()), factory()->true_value());
__ bind(&done);
}
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->check_hole_value()) {
- __ cmp(result, Factory::the_hole_value());
+ __ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
}
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->check_hole_value()) {
- __ cmp(cell_operand, Factory::the_hole_value());
+ __ cmp(cell_operand, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(Builtins::LoadIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
DeoptimizeIf(equal, instr->environment());
// If the function does not have an initial map, we're done.
if (FLAG_debug_code) {
NearLabel done;
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(factory()->fixed_array_map()));
__ j(equal, &done);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::external_pixel_array_map()));
+ Immediate(factory()->external_pixel_array_map()));
__ j(equal, &done);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::fixed_cow_array_map()));
+ Immediate(factory()->fixed_cow_array_map()));
__ Check(equal, "Check for fast elements or pixel array failed.");
__ bind(&done);
}
FixedArray::kHeaderSize));
// Check for the hole value.
- __ cmp(result, Factory::the_hole_value());
+ __ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(eax));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
// If the receiver is null or undefined, we have to pass the global object
// as a receiver.
NearLabel global_object, receiver_ok;
- __ cmp(receiver, Factory::null_value());
+ __ cmp(receiver, factory()->null_value());
__ j(equal, &global_object);
- __ cmp(receiver, Factory::undefined_value());
+ __ cmp(receiver, factory()->undefined_value());
__ j(equal, &global_object);
// The receiver should be a JS object.
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
Label done;
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->
+ ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->
+ ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->
+ ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
}
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin(isolate()->builtins()->builtin(
+ Builtins::JSConstructCall));
__ Set(eax, Immediate(instr->arity()));
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// the case we would rather go to the runtime system now to flatten
// the string.
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
+ Immediate(factory()->empty_string()));
__ j(not_equal, deferred->entry());
// Get the first of the two strings and load its instance type.
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
__ cmp(char_code, String::kMaxAsciiCharCode);
__ j(above, deferred->entry());
- __ Set(result, Immediate(Factory::single_character_string_cache()));
+ __ Set(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(result, Factory::undefined_value());
+ __ cmp(result, factory()->undefined_value());
__ j(equal, deferred->entry());
__ bind(deferred->exit());
}
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
__ j(equal, &heap_number);
- __ cmp(input_reg, Factory::undefined_value());
+ __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN.
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
if (instr->truncating()) {
__ j(equal, &heap_number);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
- __ cmp(input_reg, Factory::undefined_value());
+ __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done);
__ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
+ if (isolate()->cpu_features()->IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
NearLabel convert;
// Use more powerful conversion when sse3 is available.
// the JS bitwise operations.
__ cvttsd2si(result_reg, Operand(input_reg));
__ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
+ if (isolate()->cpu_features()->IsSupported(SSE3)) {
// This will deoptimize if the exponent of the input in out of range.
CpuFeatures::Scope scope(SSE3);
NearLabel convert, done;
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
__ mov(result, object);
int literal_offset = FixedArray::kHeaderSize +
instr->hydrogen()->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, factory()->undefined_value());
__ j(not_equal, &materialized);
// Create regexp literal using runtime function
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? factory()->true_value()
+ : factory()->false_value()));
CallRuntime(Runtime::kNewClosure, 3, instr, false);
}
}
instr->type_literal());
__ j(final_branch_condition, &true_label);
__ bind(&false_label);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ jmp(&done);
__ bind(&true_label);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(Heap::number_symbol())) {
+ if (type_name->Equals(heap()->number_symbol())) {
__ JumpIfSmi(input, true_label);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::string_symbol())) {
+ } else if (type_name->Equals(heap()->string_symbol())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
1 << Map::kIsUndetectable);
final_branch_condition = zero;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
- __ cmp(input, Factory::true_value());
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
+ __ cmp(input, factory()->true_value());
__ j(equal, true_label);
- __ cmp(input, Factory::false_value());
+ __ cmp(input, factory()->false_value());
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
- __ cmp(input, Factory::undefined_value());
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
+ __ cmp(input, factory()->undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
1 << Map::kIsUndetectable);
final_branch_condition = not_zero;
- } else if (type_name->Equals(Heap::function_symbol())) {
+ } else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
__ j(equal, true_label);
__ CmpInstanceType(input, JS_REGEXP_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::object_symbol())) {
+ } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
- __ cmp(input, Factory::null_value());
+ __ cmp(input, factory()->null_value());
__ j(equal, true_label);
// Regular expressions => 'function', not 'object'.
__ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
EmitIsConstructCall(result);
__ j(equal, &true_label);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ jmp(&done);
__ bind(&true_label);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
bool adjusted = true);
- void CallRuntime(Runtime::Function* fun, int argc, LInstruction* instr,
+ void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
bool adjusted = true);
void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
bool adjusted = true) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, argc, instr, adjusted);
}
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "ia32/lithium-gap-resolver-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
+ CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ (instr->CanTruncateToInt32() && !cpu_features->IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
} else {
ASSERT(to.IsInteger32());
bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
+ !Isolate::Current()->cpu_features()->IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
: Assembler(buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ code_object_(HEAP->undefined_value()) {
}
void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CMOV)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
fucomip();
ffree(0);
fincstp();
test(object, Immediate(kSmiTagMask));
j(zero, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
Assert(equal, "Operand not a number");
bind(&ok);
}
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(Factory::undefined_value()));
+ cmp(Operand(esp, 0), Immediate(FACTORY->undefined_value()));
Check(not_equal, "code object not properly patched");
}
}
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
- ExternalReference context_address(Top::k_context_address);
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address);
+ ExternalReference context_address(Isolate::k_context_address);
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
}
// Get the required frame alignment for the OS.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
and_(esp, -kFrameAlignment);
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
- EnterExitFrameEpilogue(2, save_doubles);
+ // Reserve space for argc, argv and isolate.
+ EnterExitFrameEpilogue(3, save_doubles);
}
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Top::k_context_address);
+ ExternalReference context_address(Isolate::k_context_address);
mov(esi, Operand::StaticVariable(context_address));
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address);
mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
}
push(Immediate(0)); // NULL frame pointer.
}
// Save the current handler as the next handler.
- push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ push(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address)));
// Link this handler as the new current one.
- mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
+ mov(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address)),
+ esp);
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ pop(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address)));
add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
}
}
// Drop the sp to the top of the handler.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state.
}
// Drop sp to the top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address);
mov(eax, false);
mov(Operand::StaticVariable(external_caught), eax);
// Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address);
mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
mov(Operand::StaticVariable(pending_exception), eax);
}
push(scratch);
// Read the first word and compare to global_context_map.
mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, Factory::global_context_map());
+ cmp(scratch, FACTORY->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
pop(scratch);
}
// Check the context is a global context.
if (emit_debug_code()) {
- cmp(holder_reg, Factory::null_value());
+ cmp(holder_reg, FACTORY->null_value());
Check(not_equal, "JSGlobalProxy::context() should not be null.");
push(holder_reg);
// Read the first word and compare to global_context_map(),
mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, Factory::global_context_map());
+ cmp(holder_reg, FACTORY->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
pop(holder_reg);
}
// Set the map.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
}
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::string_map()));
+ Immediate(FACTORY->string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::ascii_string_map()));
+ Immediate(FACTORY->ascii_string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::ascii_string_map()));
+ Immediate(FACTORY->ascii_string_map()));
mov(FieldOperand(result, String::kLengthOffset),
Immediate(Smi::FromInt(length)));
mov(FieldOperand(result, String::kHashFieldOffset),
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::cons_string_map()));
+ Immediate(FACTORY->cons_string_map()));
}
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::cons_ascii_string_map()));
+ Immediate(FACTORY->cons_ascii_string_map()));
}
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- cmp(Operand(result), Immediate(Factory::the_hole_value()));
+ cmp(Operand(result), Immediate(FACTORY->the_hole_value()));
j(equal, miss, not_taken);
// If the function does not have an initial map, we're done.
if (num_arguments > 0) {
add(Operand(esp), Immediate(num_arguments * kPointerSize));
}
- mov(eax, Immediate(Factory::undefined_value()));
+ mov(eax, Immediate(FACTORY->undefined_value()));
}
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function)));
CEntryStub ces(1);
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
// Since we did not call the stub, there was no allocation failure.
// Return some non-failure object.
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// TODO(1236192): Most runtime routines don't need the number of
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address();
cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(Factory::the_hole_value()));
+ Immediate(FACTORY->the_hole_value()));
j(not_equal, &promote_scheduled_exception, not_taken);
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
}
bind(&empty_handle);
// It was zero; the result is undefined.
- mov(eax, Factory::undefined_value());
+ mov(eax, FACTORY->undefined_value());
jmp(&prologue);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
mov(Operand::StaticVariable(limit_address), edi);
mov(edi, eax);
+ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
mov(eax, Immediate(ExternalReference::delete_handle_scope_extensions()));
call(Operand(eax));
mov(eax, edi);
if (!definitely_matches) {
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline));
if (!code_constant.is_null()) {
mov(edx, Immediate(code_constant));
add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
- CheckMap(map, Factory::meta_map(), &fail, false);
+ CheckMap(map, FACTORY->meta_map(), &fail, false);
jmp(&ok);
bind(&fail);
Abort("Global functions must have initial map");
if (emit_debug_code()) {
Label ok;
cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::fixed_cow_array_map()));
+ Immediate(FACTORY->fixed_cow_array_map()));
j(equal, &ok);
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
if (emit_debug_code()) AbortIfSmi(reg);
if (!info.IsNumber()) {
cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
j(not_equal, on_not_number);
}
}
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ // Reserve space for Isolate address which is always passed as last parameter
+ num_arguments += 1;
+
int frameAlignment = OS::ActivationFrameAlignment();
if (frameAlignment != 0) {
// Make stack end at alignment and make room for num_arguments words
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
+ // Pass current isolate address as additional parameter.
+ mov(Operand(esp, num_arguments * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+ num_arguments += 1;
+
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::Function* f,
+ MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start().address());
lea(scratch, Operand(object, -new_space_start));
- and_(scratch, Heap::NewSpaceMask());
+ and_(scratch, HEAP->NewSpaceMask());
j(cc, branch);
}
}
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
+ Isolate* isolate = ISOLATE;
+ Handle<Code> code =
+ isolate->factory()->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+ CodeGenerator* code_generator =
+ CodeGeneratorScope::Current(Isolate::Current());
+ Result fresh = code_generator->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (is_untagged_int32()) {
fresh.set_untagged_int32(true);
if (handle()->IsSmi()) {
- CodeGeneratorScope::Current()->masm()->Set(
+ code_generator->masm()->Set(
fresh.reg(),
Immediate(Smi::cast(*handle())->value()));
} else if (handle()->IsHeapNumber()) {
if (double_value == 0 && signbit(double_value)) {
// Negative zero must not be converted to an int32 unless
// the context allows it.
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+ code_generator->unsafe_bailout_->Branch(equal);
+ code_generator->unsafe_bailout_->Branch(not_equal);
} else if (double_value == value) {
- CodeGeneratorScope::Current()->masm()->Set(
- fresh.reg(), Immediate(value));
+ code_generator->masm()->Set(fresh.reg(), Immediate(value));
} else {
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+ code_generator->unsafe_bailout_->Branch(equal);
+ code_generator->unsafe_bailout_->Branch(not_equal);
}
} else {
// Constant is not a number. This was not predicted by AST analysis.
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+ code_generator->unsafe_bailout_->Branch(equal);
+ code_generator->unsafe_bailout_->Branch(not_equal);
}
- } else if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
+ } else if (code_generator->IsUnsafeSmi(handle())) {
+ code_generator->MoveUnsafeSmi(fresh.reg(), handle());
} else {
- CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
- Immediate(handle()));
+ code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
}
// This result becomes a copy of the fresh one.
fresh.set_type_info(type_info());
void Result::ToRegister(Register target) {
+ CodeGenerator* code_generator =
+ CodeGeneratorScope::Current(Isolate::Current());
ASSERT(is_valid());
if (!is_register() || !reg().is(target)) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+ Result fresh = code_generator->allocator()->Allocate(target);
ASSERT(fresh.is_valid());
if (is_register()) {
- CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
+ code_generator->masm()->mov(fresh.reg(), reg());
} else {
ASSERT(is_constant());
if (is_untagged_int32()) {
if (handle()->IsSmi()) {
- CodeGeneratorScope::Current()->masm()->Set(
+ code_generator->masm()->Set(
fresh.reg(),
Immediate(Smi::cast(*handle())->value()));
} else {
if (double_value == 0 && signbit(double_value)) {
// Negative zero must not be converted to an int32 unless
// the context allows it.
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+ code_generator->unsafe_bailout_->Branch(equal);
+ code_generator->unsafe_bailout_->Branch(not_equal);
} else if (double_value == value) {
- CodeGeneratorScope::Current()->masm()->Set(
- fresh.reg(), Immediate(value));
+ code_generator->masm()->Set(fresh.reg(), Immediate(value));
} else {
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+ code_generator->unsafe_bailout_->Branch(equal);
+ code_generator->unsafe_bailout_->Branch(not_equal);
}
}
} else {
- if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
+ if (code_generator->IsUnsafeSmi(handle())) {
+ code_generator->MoveUnsafeSmi(fresh.reg(), handle());
} else {
- CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
- Immediate(handle()));
+ code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
}
}
}
fresh.set_untagged_int32(is_untagged_int32());
*this = fresh;
} else if (is_register() && reg().is(target)) {
- ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
- CodeGeneratorScope::Current()->frame()->Spill(target);
- ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+ ASSERT(code_generator->has_valid_frame());
+ code_generator->frame()->Spill(target);
+ ASSERT(code_generator->allocator()->count(target) == 1);
}
ASSERT(is_register());
ASSERT(reg().is(target));
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register offset,
Register extra) {
- ExternalReference key_offset(SCTableReference::keyReference(table));
- ExternalReference value_offset(SCTableReference::valueReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
Label miss;
Register r0,
Register r1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+ __ IncrementCounter(COUNTERS->negative_lookups(), 1);
+ __ IncrementCounter(COUNTERS->negative_lookups_miss(), 1);
Label done;
__ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
+ Immediate(FACTORY->hash_table_map()));
__ j(not_equal, miss_label);
// Compute the capacity mask.
ASSERT_EQ(kSmiTagSize, 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, Factory::undefined_value());
+ __ cmp(entity_name, FACTORY->undefined_value());
if (i != kProbes - 1) {
__ j(equal, &done, taken);
}
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+ __ DecrementCounter(COUNTERS->negative_lookups_miss(), 1);
}
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = Isolate::Current();
Label miss;
USE(extra2); // The register extra2 is not used on the ia32 platform.
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch, extra);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch, extra);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Check we're still in the same context.
__ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
- Top::global());
+ Isolate::Current()->global());
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function = JSFunction::cast(
+ Isolate::Current()->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!HEAP->InNewSpace(interceptor));
Register scratch = name;
__ mov(scratch, Immediate(Handle<Object>(interceptor)));
__ push(scratch);
__ mov(Operand(esp, 2 * kPointerSize), edi);
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (HEAP->InNewSpace(call_data)) {
__ mov(ecx, api_call_info_handle);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
__ mov(Operand(esp, 3 * kPointerSize), ebx);
name,
holder,
miss);
- return Heap::undefined_value(); // Success.
+ return HEAP->undefined_value(); // Success.
}
}
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1);
+ __ IncrementCounter(COUNTERS->call_const_interceptor(), 1);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ __ IncrementCounter(COUNTERS->call_const_interceptor_fast_api(), 1);
ReserveSpaceForFastApiCall(masm, scratch1);
}
FreeSpaceForFastApiCall(masm, scratch1);
}
- return Heap::undefined_value(); // Success.
+ return HEAP->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
__ pop(receiver); // Restore the holder.
__ LeaveInternalFrame();
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ cmp(eax, FACTORY->no_interceptor_result_sentinel());
__ j(not_equal, interceptor_succeeded);
}
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = Isolate::Current()->builtins()->builtin(Builtins::LoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = Isolate::Current()->builtins()->builtin(Builtins::KeyedLoadIC_Miss);
}
Handle<Code> ic(code);
if (Serializer::enabled()) {
__ mov(scratch, Immediate(Handle<Object>(cell)));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(Factory::the_hole_value()));
+ Immediate(FACTORY->the_hole_value()));
} else {
__ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
- Immediate(Factory::the_hole_value()));
+ Immediate(FACTORY->the_hole_value()));
}
__ j(not_equal, miss, not_taken);
return cell;
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* maybe_lookup_result = HEAP->LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (HEAP->InNewSpace(prototype)) {
// Get the map of the current object.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
ASSERT(current == holder);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
ASSERT(!scratch2.is(reg));
__ push(reg); // holder
// Push data from AccessorInfo.
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (HEAP->InNewSpace(callback_handle->data())) {
__ mov(scratch1, Immediate(callback_handle));
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
} else {
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ cmp(eax, FACTORY->no_interceptor_result_sentinel());
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
}
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (HEAP->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj =
+ Isolate::Current()->stub_cache()->ComputeCallMiss(
+ arguments().immediate(), kind_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
Label miss;
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
- Immediate(Factory::the_hole_value()));
+ Immediate(FACTORY->the_hole_value()));
}
// Restore receiver to edx as finish sequence assumes it's here.
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
Label miss, return_undefined, call_builtin;
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
__ j(not_equal, &call_builtin);
// Get the array's length into ecx and calculate new length.
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(eax), Immediate(FACTORY->the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
__ mov(FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize),
- Immediate(Factory::the_hole_value()));
+ Immediate(FACTORY->the_hole_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
- __ mov(eax, Immediate(Factory::undefined_value()));
+ __ mov(eax, Immediate(FACTORY->undefined_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
const int argc = arguments().immediate();
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
- __ Set(index, Immediate(Factory::undefined_value()));
+ __ Set(index, Immediate(FACTORY->undefined_value()));
}
StringCharCodeAtGenerator char_code_at_generator(receiver,
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::nan_value()));
+ __ Set(eax, Immediate(FACTORY->nan_value()));
__ ret((argc + 1) * kPointerSize);
}
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
const int argc = arguments().immediate();
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
- __ Set(index, Immediate(Factory::undefined_value()));
+ __ Set(index, Immediate(FACTORY->undefined_value()));
}
StringCharAtGenerator char_at_generator(receiver,
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::empty_string()));
+ __ Set(eax, Immediate(FACTORY->empty_string()));
__ ret((argc + 1) * kPointerSize);
}
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2))
+ return HEAP->undefined_value();
CpuFeatures::Scope use_sse2(SSE2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
- __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(eax, FACTORY->heap_number_map(), &slow, true);
__ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
// Check if the argument is a heap number and load its exponent and
// sign into ebx.
__ bind(¬_smi);
- __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(eax, FACTORY->heap_number_map(), &slow, true);
__ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return Heap::undefined_value();
- if (cell != NULL) return Heap::undefined_value();
+ if (object->IsGlobalObject()) return HEAP->undefined_value();
+ if (cell != NULL) return HEAP->undefined_value();
int depth = optimization.GetPrototypeDepthOfExpectedType(
JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Heap::undefined_value();
+ if (depth == kInvalidProtoDepth) return HEAP->undefined_value();
Label miss, miss_before_stack_reserved;
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss_before_stack_reserved, not_taken);
- __ IncrementCounter(&Counters::call_const, 1);
- __ IncrementCounter(&Counters::call_const_fast_api, 1);
+ __ IncrementCounter(COUNTERS->call_const(), 1);
+ __ IncrementCounter(COUNTERS->call_const_fast_api(), 1);
// Allocate space for v8::Arguments implicit values. Must be initialized
// before calling any runtime function.
SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1);
+ __ IncrementCounter(COUNTERS->call_const(), 1);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
} else {
Label fast;
// Check that the object is a boolean.
- __ cmp(edx, Factory::true_value());
+ __ cmp(edx, FACTORY->true_value());
__ j(equal, &fast, taken);
- __ cmp(edx, Factory::false_value());
+ __ cmp(edx, FACTORY->false_value());
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1);
+ __ IncrementCounter(COUNTERS->call_global_inline(), 1);
ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
if (V8::UseCrankshaft()) {
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->call_global_inline_miss(), 1);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
// Handle store cache miss.
__ bind(&miss);
__ mov(ecx, Immediate(Handle<String>(name))); // restore name
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, Factory::the_hole_value());
+ __ cmp(cell_operand, FACTORY->the_hole_value());
__ j(equal, &miss);
// Store the value in the cell.
__ mov(cell_operand, eax);
// Return the value (register eax).
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ __ IncrementCounter(COUNTERS->named_store_global_inline(), 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(COUNTERS->named_store_global_inline_miss(), 1);
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_field(), 1);
// Check that the name has not changed.
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ DecrementCounter(COUNTERS->keyed_store_field(), 1);
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
__ j(not_equal, &miss, not_taken);
// Check that the key is within bounds.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Handle<Code> ic(
+ Isolate::Current()->builtins()->builtin(Builtins::KeyedStoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, FACTORY->undefined_value());
__ ret(0);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, HEAP->empty_string());
}
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(ebx, Factory::the_hole_value());
+ __ cmp(ebx, FACTORY->the_hole_value());
__ j(equal, &miss, not_taken);
} else if (FLAG_debug_code) {
- __ cmp(ebx, Factory::the_hole_value());
+ __ cmp(ebx, FACTORY->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_stub, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_stub(), 1);
__ mov(eax, ebx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_stub_miss(), 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_field, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_field(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_field, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_field(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_callback, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_callback(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_constant_function(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
value, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_interceptor(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
name,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_array_length(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_array_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_string_length(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_string_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_function_prototype(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// Load the result and make sure it's not the hole.
__ mov(ebx, Operand(ecx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, Factory::the_hole_value());
+ __ cmp(ebx, FACTORY->the_hole_value());
__ j(equal, &miss, not_taken);
__ mov(eax, ebx);
__ ret(0);
// code for the function thereby hitting the break points.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, FACTORY->undefined_value());
__ j(not_equal, &generic_stub_call, not_taken);
#endif
// ebx: initial map
// edx: JSObject (untagged)
__ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, Factory::empty_fixed_array());
+ __ mov(ebx, FACTORY->empty_fixed_array());
__ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
__ mov(Operand(edx, JSObject::kElementsOffset), ebx);
__ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
// Use edi for holding undefined which is used in several places below.
- __ mov(edi, Factory::undefined_value());
+ __ mov(edi, FACTORY->undefined_value());
// eax: argc
// ecx: first argument
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ mov(ebx, edi);
__ cmp(eax, arg_number);
- if (CpuFeatures::IsSupported(CMOV)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
} else {
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
__ push(ecx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ __ IncrementCounter(COUNTERS->constructed_objects(), 1);
+ __ IncrementCounter(COUNTERS->constructed_objects_stub(), 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
// Slow case: Jump to runtime.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// processors that don't support SSE2. The code in IntegerConvert
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
if (array_type != kExternalIntArray &&
array_type != kExternalUnsignedIntArray) {
- ASSERT(CpuFeatures::IsSupported(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
// ecx: untagged integer value
break;
}
} else {
- if (CpuFeatures::IsSupported(SSE3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
// fisttp stores values as signed integers. To represent the
// entire range of int and unsigned int arrays, store as a
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
} else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
// We can easily implement the correct rounding behavior for the
// range [0, 2^31-1]. For the time being, to keep this code simple,
// them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals.
SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = Factory::undefined_value();
+ Handle<Object> undefined = FACTORY->undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
if (count == 1) {
__ bind(¬_smi);
if (!original.type_info().IsNumber()) {
__ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
cgen()->unsafe_bailout_->Branch(not_equal);
}
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
UNREACHABLE();
} else {
CpuFeatures::Scope use_sse2(SSE2);
}
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
PrepareForCall(0, 0); // No stack arguments.
MoveResultsToRegisters(&name, &receiver, ecx, eax);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
return RawCallCodeObject(ic, mode);
}
PrepareForCall(0, 0);
MoveResultsToRegisters(&key, &receiver, eax, edx);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
return RawCallCodeObject(ic, mode);
}
StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in ecx, value in eax, and receiver in edx.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
receiver.Unuse();
}
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
// The IC expects the name in ecx and the rest on the stack and
// drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic = Isolate::Current()->stub_cache()->ComputeCallInitialize(
+ arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
Result name = Pop();
// The IC expects the name in ecx and the rest on the stack and
// drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ Isolate::Current()->stub_cache()->ComputeKeyedCallInitialize(arg_count,
+ in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
Result name = Pop();
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in eax, function in edi, and the arguments
// and receiver on the stack.
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
PushElementAt(arg_count);
Result function = Pop();
private:
bool previous_state_;
- CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
+ CodeGenerator* cgen() {
+ return CodeGeneratorScope::Current(Isolate::Current());
+ }
};
// An illegal index into the virtual frame.
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ CodeGenerator* cgen() {
+ return CodeGeneratorScope::Current(Isolate::Current());
+ }
MacroAssembler* masm() { return cgen()->masm(); }
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
+ Result CallRuntime(const Runtime::Function* f, int arg_count);
Result CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
Address result = pc() - Assembler::kCallTargetAddressOffset;
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = Isolate::Current()->debug();
// First check if any break points are active if not just return the address
// of the call.
- if (!Debug::has_break_points()) return result;
+ if (!debug->has_break_points()) return result;
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
- if (Debug::IsDebugBreak(Assembler::target_address_at(result))) {
+ if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
// If the call site is a call to debug break then return the address in
// the original code instead of the address in the running code. This will
// cause the original code to be updated and keeps the breakpoint active in
const char* extra_info) {
if (FLAG_trace_ic) {
State new_state = StateFrom(new_target,
- Heap::undefined_value(),
- Heap::undefined_value());
+ HEAP->undefined_value(),
+ HEAP->undefined_value());
PrintF("[%s (%c->%c)%s", type,
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state),
#endif
-IC::IC(FrameDepth depth) {
+IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
+ ASSERT(isolate == Isolate::Current());
// To improve the performance of the (much used) IC code, we unfold
// a few levels of the stack frame iteration code. This yields a
// ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
- const Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+ const Address entry =
+ Isolate::c_entry_fp(isolate->thread_local_top());
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
#endif
-static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
+static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
+ LookupResult* lookup,
Object* receiver) {
- Object* end = lookup->IsProperty() ? lookup->holder() : Heap::null_value();
+ Object* end = lookup->IsProperty()
+ ? lookup->holder() : isolate->heap()->null_value();
for (Object* current = receiver;
current != end;
current = current->GetPrototype()) {
RelocInfo::Mode IC::ComputeMode() {
Address addr = address();
- Code* code = Code::cast(Heap::FindCodeObject(addr));
+ Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Failure* IC::TypeError(const char* type,
Handle<Object> object,
Handle<Object> key) {
- HandleScope scope;
+ HandleScope scope(isolate());
Handle<Object> args[2] = { key, object };
- Handle<Object> error = Factory::NewTypeError(type, HandleVector(args, 2));
- return Top::Throw(*error);
+ Handle<Object> error = isolate()->factory()->NewTypeError(
+ type, HandleVector(args, 2));
+ return isolate()->Throw(*error);
}
Failure* IC::ReferenceError(const char* type, Handle<String> name) {
- HandleScope scope;
- Handle<Object> error =
- Factory::NewReferenceError(type, HandleVector(&name, 1));
- return Top::Throw(*error);
+ HandleScope scope(isolate());
+ Handle<Object> error = isolate()->factory()->NewReferenceError(
+ type, HandleVector(&name, 1));
+ return isolate()->Throw(*error);
}
State state = target->ic_state();
if (state == UNINITIALIZED) return;
Code* code =
- StubCache::FindCallInitialize(target->arguments_count(),
- target->ic_in_loop(),
- target->kind());
+ Isolate::Current()->stub_cache()->FindCallInitialize(
+ target->arguments_count(),
+ target->ic_in_loop(),
+ target->kind());
SetTargetAtAddress(address, code);
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
- PatchInlinedLoad(address, Heap::null_value());
+ PatchInlinedLoad(address, HEAP->null_value());
}
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
- PatchInlinedLoad(address, Heap::null_value(), 0);
+ Heap* heap = HEAP;
+ PatchInlinedLoad(address, heap->null_value(), 0);
PatchInlinedContextualLoad(address,
- Heap::null_value(),
- Heap::null_value(),
+ heap->null_value(),
+ heap->null_value(),
true);
}
// Reset the map check of the inlined inobject property store (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
- PatchInlinedStore(address, Heap::null_value(), 0);
+ PatchInlinedStore(address, HEAP->null_value(), 0);
}
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
- PatchInlinedStore(address, Heap::null_value());
+ PatchInlinedStore(address, HEAP->null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
- PatchInlinedStore(address, Heap::fixed_array_map());
+ PatchInlinedStore(address, HEAP->fixed_array_map());
}
Object* CallICBase::TryCallAsFunction(Object* object) {
- HandleScope scope;
- Handle<Object> target(object);
+ HandleScope scope(isolate());
+ Handle<Object> target(object, isolate());
Handle<Object> delegate = Execution::GetFunctionDelegate(target);
if (delegate->IsJSFunction()) {
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *Factory::ToObject(object));
+ frame->SetExpression(index, *isolate()->factory()->ToObject(object));
}
}
ASSERT(!result->IsTheHole());
- HandleScope scope;
+ HandleScope scope(isolate());
// Wrap result in a handle because ReceiverToObjectIfRequired may allocate
// new object and cause GC.
Handle<Object> result_handle(result);
if (result_handle->IsJSFunction()) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
- if (Debug::StepInActive()) {
+ Debug* debug = isolate()->debug();
+ if (debug->StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
// cause GC.
- Handle<JSFunction> function(JSFunction::cast(*result_handle));
- Debug::HandleStepIn(function, object, fp(), false);
+ Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
+ debug->HandleStepIn(function, object, fp(), false);
return *function;
}
#endif
// Fetch the arguments passed to the called function.
const int argc = target()->arguments_count();
- Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+ Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
Arguments args(argc + 1,
&Memory::Object_at(fp +
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
- maybe_code = StubCache::ComputeCallField(argc,
- in_loop,
- kind_,
- *name,
- *object,
- lookup->holder(),
- index);
+ maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
+ in_loop,
+ kind_,
+ *name,
+ *object,
+ lookup->holder(),
+ index);
break;
}
case CONSTANT_FUNCTION: {
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeCallConstant(argc,
- in_loop,
- kind_,
- extra_ic_state,
- *name,
- *object,
- lookup->holder(),
- function);
+ maybe_code =
+ isolate()->stub_cache()->ComputeCallConstant(argc,
+ in_loop,
+ kind_,
+ extra_ic_state,
+ *name,
+ *object,
+ lookup->holder(),
+ function);
break;
}
case NORMAL: {
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
if (!cell->value()->IsJSFunction()) return NULL;
JSFunction* function = JSFunction::cast(cell->value());
- maybe_code = StubCache::ComputeCallGlobal(argc,
- in_loop,
- kind_,
- *name,
- *receiver,
- global,
- cell,
- function);
+ maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
+ in_loop,
+ kind_,
+ *name,
+ *receiver,
+ global,
+ cell,
+ function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return NULL;
- maybe_code = StubCache::ComputeCallNormal(argc,
- in_loop,
- kind_,
- *name,
- *receiver);
+ maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
+ in_loop,
+ kind_,
+ *name,
+ *receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeCallInterceptor(argc,
- kind_,
- *name,
- *object,
- lookup->holder());
+ maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
+ argc,
+ kind_,
+ *name,
+ *object,
+ lookup->holder());
break;
}
default:
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
if (lookup->holder() != *object &&
- HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
+ HasNormalObjectsInPrototypeChain(
+ isolate(), lookup, object->GetPrototype())) {
// Suppress optimization for prototype chains with slow properties objects
// in the middle.
return;
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- maybe_code = StubCache::ComputeCallPreMonomorphic(argc, in_loop, kind_);
+ maybe_code = isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
+ in_loop,
+ kind_);
} else if (state == MONOMORPHIC) {
if (kind_ == Code::CALL_IC &&
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
object,
name);
} else {
- maybe_code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
+ maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(argc,
+ in_loop,
+ kind_);
}
} else {
maybe_code = ComputeMonomorphicStub(lookup,
object->GetPrototype())->map();
// Update the stub cache.
- StubCache::Set(*name, map, Code::cast(code));
+ isolate()->stub_cache()->Set(*name, map, Code::cast(code));
}
USE(had_proto_failure);
if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
- MaybeObject* maybe_code = StubCache::ComputeCallMegamorphic(
+ MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
argc, in_loop, Code::KEYED_CALL_IC);
Object* code;
if (maybe_code->ToObject(&code)) {
}
}
- HandleScope scope;
+ HandleScope scope(isolate());
Handle<Object> result = GetProperty(object, key);
- RETURN_IF_EMPTY_HANDLE(result);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
- name->Equals(Heap::length_symbol())) {
- HandleScope scope;
+ name->Equals(isolate()->heap()->length_symbol())) {
+ HandleScope scope(isolate());
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
#endif
Map* map = HeapObject::cast(*object)->map();
const int offset = String::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
- set_target(Builtins::builtin(Builtins::LoadIC_StringLength));
+ set_target(isolate()->builtins()->builtin(
+ Builtins::LoadIC_StringLength));
} else {
- set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+ set_target(isolate()->builtins()->builtin(
+ Builtins::LoadIC_StringWrapperLength));
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+ set_target(isolate()->builtins()->builtin(
+ Builtins::LoadIC_StringWrapperLength));
} else {
set_target(non_monomorphic_stub);
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
- object = Handle<Object>(Handle<JSValue>::cast(object)->value());
+ object = Handle<Object>(Handle<JSValue>::cast(object)->value(),
+ isolate());
}
return Smi::FromInt(String::cast(*object)->length());
}
// Use specialized code for getting the length of arrays.
- if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+ if (object->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
Map* map = HeapObject::cast(*object)->map();
const int offset = JSArray::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
- set_target(Builtins::builtin(Builtins::LoadIC_ArrayLength));
+ set_target(isolate()->builtins()->builtin(
+ Builtins::LoadIC_ArrayLength));
} else {
set_target(non_monomorphic_stub);
}
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+ if (object->IsJSFunction() &&
+ name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
if (state == PREMONOMORPHIC) {
- set_target(Builtins::builtin(Builtins::LoadIC_FunctionPrototype));
+ set_target(isolate()->builtins()->builtin(
+ Builtins::LoadIC_FunctionPrototype));
} else {
set_target(non_monomorphic_stub);
}
if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
- LOG(SuspectReadEvent(*name, *object));
+ LOG(isolate(), SuspectReadEvent(*name, *object));
}
bool can_be_inlined_precheck =
lookup.IsDontDelete())) {
set_target(megamorphic_stub());
TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
- ASSERT(cell->value() != Heap::the_hole_value());
+ ASSERT(cell->value() != isolate()->heap()->the_hole_value());
return cell->value();
}
} else {
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+ if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
MaybeObject* maybe_code = NULL;
maybe_code = pre_monomorphic_stub();
} else if (!lookup->IsProperty()) {
// Nonexistent property. The result is undefined.
- maybe_code = StubCache::ComputeLoadNonexistent(*name, *receiver);
+ maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
+ *receiver);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
- maybe_code = StubCache::ComputeLoadField(*name, *receiver,
- lookup->holder(),
- lookup->GetFieldIndex());
+ maybe_code = isolate()->stub_cache()->ComputeLoadField(
+ *name,
+ *receiver,
+ lookup->holder(),
+ lookup->GetFieldIndex());
break;
}
case CONSTANT_FUNCTION: {
Object* constant = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeLoadConstant(*name, *receiver,
- lookup->holder(), constant);
+ maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
+ *name, *receiver, lookup->holder(), constant);
break;
}
case NORMAL: {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = StubCache::ComputeLoadGlobal(*name,
+ maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
*receiver,
global,
cell,
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
- maybe_code = StubCache::ComputeLoadNormal();
+ maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
}
break;
}
AccessorInfo* callback =
AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = StubCache::ComputeLoadCallback(*name, *receiver,
- lookup->holder(), callback);
+ maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
+ *name, *receiver, lookup->holder(), callback);
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeLoadInterceptor(*name, *receiver,
- lookup->holder());
+ maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
+ *name, *receiver, lookup->holder());
break;
}
default:
Map* map = JSObject::cast(object->IsJSObject() ? *object :
object->GetPrototype())->map();
- StubCache::Set(*name, map, Code::cast(code));
+ isolate()->stub_cache()->Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
// TODO(1073): don't ignore the current stub state.
// Use specialized code for getting the length of strings.
- if (object->IsString() && name->Equals(Heap::length_symbol())) {
+ if (object->IsString() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
Object* code = NULL;
{ MaybeObject* maybe_code =
- StubCache::ComputeKeyedLoadStringLength(*name, *string);
+ isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
+ *string);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
}
// Use specialized code for getting the length of arrays.
- if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+ if (object->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
Handle<JSArray> array = Handle<JSArray>::cast(object);
Object* code;
{ MaybeObject* maybe_code =
- StubCache::ComputeKeyedLoadArrayLength(*name, *array);
+ isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
+ *array);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+ if (object->IsJSFunction() &&
+ name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
Object* code;
{ MaybeObject* maybe_code =
- StubCache::ComputeKeyedLoadFunctionPrototype(*name, *function);
+ isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
+ *name, *function);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
// the element or char if so.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- HandleScope scope;
+ HandleScope scope(isolate());
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) set_target(generic_stub());
- return Runtime::GetElementOrCharAt(object, index);
+ return Runtime::GetElementOrCharAt(isolate(), object, index);
}
// Named lookup.
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
MaybeObject* probe =
- StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
- false,
- kNonStrictMode);
+ isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
+ *receiver, false, kNonStrictMode);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasIndexedInterceptor()) {
} else if (key->IsSmi() &&
receiver->map()->has_fast_elements()) {
MaybeObject* probe =
- StubCache::ComputeKeyedLoadSpecialized(*receiver);
+ isolate()->stub_cache()->ComputeKeyedLoadSpecialized(*receiver);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
}
}
// Get the property.
- return Runtime::GetObjectProperty(object, key);
+ return Runtime::GetObjectProperty(isolate(), object, key);
}
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+ if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
MaybeObject* maybe_code = NULL;
// Compute a monomorphic stub.
switch (lookup->type()) {
case FIELD: {
- maybe_code = StubCache::ComputeKeyedLoadField(*name, *receiver,
- lookup->holder(),
- lookup->GetFieldIndex());
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
+ *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
break;
}
case CONSTANT_FUNCTION: {
Object* constant = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeKeyedLoadConstant(*name,
- *receiver,
- lookup->holder(),
- constant);
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
+ *name, *receiver, lookup->holder(), constant);
break;
}
case CALLBACKS: {
AccessorInfo* callback =
AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = StubCache::ComputeKeyedLoadCallback(*name,
- *receiver,
- lookup->holder(),
- callback);
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ *name, *receiver, lookup->holder(), callback);
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver,
- lookup->holder());
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+ *name, *receiver, lookup->holder());
break;
}
default: {
if (!object->IsJSObject()) {
// The length property of string values is read-only. Throw in strict mode.
if (strict_mode == kStrictMode && object->IsString() &&
- name->Equals(Heap::length_symbol())) {
+ name->Equals(isolate()->heap()->length_symbol())) {
return TypeError("strict_read_only_property", object, name);
}
// Ignore stores where the receiver is not a JSObject.
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- HandleScope scope;
+ HandleScope scope(isolate());
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
- && name->Equals(Heap::length_symbol())
+ && name->Equals(isolate()->heap()->length_symbol())
&& receiver->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
Builtins::Name target = (strict_mode == kStrictMode)
? Builtins::StoreIC_ArrayLength_Strict
: Builtins::StoreIC_ArrayLength;
- set_target(Builtins::builtin(target));
+ set_target(isolate()->builtins()->builtin(target));
return receiver->SetProperty(*name, *value, NONE, strict_mode);
}
Object* code = NULL;
switch (type) {
case FIELD: {
- maybe_code = StubCache::ComputeStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break;
}
case MAP_TRANSITION: {
if (lookup->GetAttributes() != NONE) return;
- HandleScope scope;
+ HandleScope scope(isolate());
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = StubCache::ComputeStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeStoreField(
*name, *receiver, index, *transition, strict_mode);
break;
}
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = StubCache::ComputeStoreGlobal(
+ maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
*name, *global, cell, strict_mode);
} else {
if (lookup->holder() != *receiver) return;
- maybe_code = StubCache::ComputeStoreNormal(strict_mode);
+ maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
break;
}
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->setter()) == 0) return;
- maybe_code = StubCache::ComputeStoreCallback(
+ maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
*name, *receiver, callback, strict_mode);
break;
}
case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- maybe_code = StubCache::ComputeStoreInterceptor(
+ maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
*name, *receiver, strict_mode);
break;
}
}
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
- StubCache::Set(*name, receiver->map(), Code::cast(code));
+ isolate()->stub_cache()->Set(*name,
+ receiver->map(),
+ Code::cast(code));
}
#ifdef DEBUG
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- HandleScope scope;
+ HandleScope scope(isolate());
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
MaybeObject* probe =
- StubCache::ComputeKeyedLoadOrStoreExternalArray(
+ isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
*receiver, true, strict_mode);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
} else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
MaybeObject* probe =
- StubCache::ComputeKeyedStoreSpecialized(*receiver, strict_mode);
+ isolate()->stub_cache()->ComputeKeyedStoreSpecialized(
+ *receiver, strict_mode);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
}
}
// Set the property.
- return Runtime::SetObjectProperty(object, key, value, NONE, strict_mode);
+ return Runtime::SetObjectProperty(
+ isolate(), object , key, value, NONE, strict_mode);
}
switch (type) {
case FIELD: {
- maybe_code = StubCache::ComputeKeyedStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break;
}
case MAP_TRANSITION: {
if (lookup->GetAttributes() == NONE) {
- HandleScope scope;
+ HandleScope scope(isolate());
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = StubCache::ComputeKeyedStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
*name, *receiver, index, *transition, strict_mode);
break;
}
// Static IC stub generators.
//
-static JSFunction* CompileFunction(JSFunction* function,
+static JSFunction* CompileFunction(Isolate* isolate,
+ JSFunction* function,
InLoopFlag in_loop) {
// Compile now with optimization.
- HandleScope scope;
- Handle<JSFunction> function_handle(function);
+ HandleScope scope(isolate);
+ Handle<JSFunction> function_handle(function, isolate);
if (in_loop == IN_LOOP) {
CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
} else {
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* CallIC_Miss(Arguments args) {
+MUST_USE_RESULT MaybeObject* CallIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 2);
- CallIC ic;
+ CallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
MaybeObject* maybe_result = ic.LoadFunction(state,
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
+ return CompileFunction(isolate,
+ JSFunction::cast(result),
+ ic.target()->ic_in_loop());
}
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedCallIC_Miss(Arguments args) {
+MUST_USE_RESULT MaybeObject* KeyedCallIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 2);
- KeyedCallIC ic;
+ KeyedCallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Object* result;
{ MaybeObject* maybe_result =
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
+ return CompileFunction(isolate,
+ JSFunction::cast(result),
+ ic.target()->ic_in_loop());
}
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* LoadIC_Miss(Arguments args) {
+MUST_USE_RESULT MaybeObject* LoadIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 2);
- LoadIC ic;
+ LoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<String>(1));
}
// Used from ic-<arch>.cc
-MUST_USE_RESULT MaybeObject* KeyedLoadIC_Miss(Arguments args) {
+MUST_USE_RESULT MaybeObject* KeyedLoadIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 2);
- KeyedLoadIC ic;
+ KeyedLoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
}
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) {
+MUST_USE_RESULT MaybeObject* StoreIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 3);
- StoreIC ic;
+ StoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
}
-MUST_USE_RESULT MaybeObject* StoreIC_ArrayLength(Arguments args) {
+MUST_USE_RESULT MaybeObject* StoreIC_ArrayLength(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation nha;
ASSERT(args.length() == 2);
// Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a
// JSObject.
-MUST_USE_RESULT MaybeObject* SharedStoreIC_ExtendStorage(Arguments args) {
+MUST_USE_RESULT MaybeObject* SharedStoreIC_ExtendStorage(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 3);
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(Arguments args) {
+MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 3);
- KeyedStoreIC ic;
+ KeyedStoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
-MUST_USE_RESULT MaybeObject* BinaryOp_Patch(Arguments args) {
+MUST_USE_RESULT MaybeObject* BinaryOp_Patch(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 5);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = Smi::cast(args[2])->value();
BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
Handle<Code> code = GetBinaryOpStub(key, type);
if (!code.is_null()) {
- BinaryOpIC ic;
+ BinaryOpIC ic(isolate);
ic.patch(*code);
if (FLAG_trace_ic) {
PrintF("[BinaryOpIC (%s->%s)#%s]\n",
}
}
- Handle<JSBuiltinsObject> builtins = Top::builtins();
+ Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+ isolate->thread_local_top()->context_->builtins(), isolate);
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
UNREACHABLE();
}
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
+ Handle<JSFunction> builtin_function(JSFunction::cast(builtin),
+ isolate);
bool caught_exception;
Object** builtin_args[] = { right.location() };
TRBinaryOpIC::TypeInfo result_type);
-MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
+MaybeObject* TypeRecordingBinaryOp_Patch(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 5);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = Smi::cast(args[2])->value();
TRBinaryOpIC::GetName(result_type),
Token::Name(op));
}
- TRBinaryOpIC ic;
+ TRBinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
}
}
- Handle<JSBuiltinsObject> builtins = Top::builtins();
+ Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+ isolate->thread_local_top()->context_->builtins(), isolate);
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
UNREACHABLE();
}
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
+ Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
Object** builtin_args[] = { right.location() };
// Used from ic_<arch>.cc.
-Code* CompareIC_Miss(Arguments args) {
+Code* CompareIC_Miss(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 3);
- CompareIC ic(static_cast<Token::Value>(Smi::cast(args[2])->value()));
+ CompareIC ic(isolate, static_cast<Token::Value>(Smi::cast(args[2])->value()));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
return ic.target();
}
-static Address IC_utilities[] = {
+static const Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)
NULL
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- explicit IC(FrameDepth depth);
+ IC(FrameDepth depth, Isolate* isolate);
// Get the call-site target; used for determining the state.
Code* target() { return GetTargetAtAddress(address()); }
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
+ Isolate* isolate() const { return isolate_; }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is
const char* extra_info = "");
#endif
- static Failure* TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key);
- static Failure* ReferenceError(const char* type, Handle<String> name);
+ Failure* TypeError(const char* type,
+ Handle<Object> object,
+ Handle<Object> key);
+ Failure* ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
static inline Code* GetTargetAtAddress(Address address);
// invoke the garbage collector.
Address* pc_address_;
+ Isolate* isolate_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
class CallICBase: public IC {
protected:
- explicit CallICBase(Code::Kind kind) : IC(EXTRA_CALL_FRAME), kind_(kind) {}
+ CallICBase(Code::Kind kind, Isolate* isolate)
+ : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
public:
MUST_USE_RESULT MaybeObject* LoadFunction(State state,
class CallIC: public CallICBase {
public:
- CallIC() : CallICBase(Code::CALL_IC) { ASSERT(target()->is_call_stub()); }
+ explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
+ ASSERT(target()->is_call_stub());
+ }
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm, int argc) {
class KeyedCallIC: public CallICBase {
public:
- KeyedCallIC() : CallICBase(Code::KEYED_CALL_IC) {
+ explicit KeyedCallIC(Isolate* isolate)
+ : CallICBase(Code::KEYED_CALL_IC, isolate) {
ASSERT(target()->is_keyed_call_stub());
}
class LoadIC: public IC {
public:
- LoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_load_stub()); }
+ explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+ ASSERT(target()->is_load_stub());
+ }
MUST_USE_RESULT MaybeObject* Load(State state,
Handle<Object> object,
Handle<String> name);
// Stub accessors.
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::LoadIC_Megamorphic);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::LoadIC_Megamorphic);
}
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::LoadIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize);
}
- static Code* pre_monomorphic_stub() {
- return Builtins::builtin(Builtins::LoadIC_PreMonomorphic);
+ Code* pre_monomorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::LoadIC_PreMonomorphic);
}
static void Clear(Address address, Code* target);
class KeyedLoadIC: public IC {
public:
- KeyedLoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_keyed_load_stub()); }
+ explicit KeyedLoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+ ASSERT(target()->is_keyed_load_stub());
+ }
MUST_USE_RESULT MaybeObject* Load(State state,
Handle<Object> object,
// Stub accessors.
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize);
}
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Generic);
}
- static Code* generic_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+ Code* generic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Generic);
}
- static Code* pre_monomorphic_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
+ Code* pre_monomorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_PreMonomorphic);
}
- static Code* string_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_String);
+ Code* string_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_String);
}
- static Code* indexed_interceptor_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
+ Code* indexed_interceptor_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_IndexedInterceptor);
}
static void Clear(Address address, Code* target);
class StoreIC: public IC {
public:
- StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
+ explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+ ASSERT(target()->is_store_stub());
+ }
MUST_USE_RESULT MaybeObject* Store(State state,
StrictModeFlag strict_mode,
}
// Stub accessors.
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::StoreIC_Megamorphic);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::StoreIC_Megamorphic);
}
- static Code* megamorphic_stub_strict() {
- return Builtins::builtin(Builtins::StoreIC_Megamorphic_Strict);
+ Code* megamorphic_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::StoreIC_Megamorphic_Strict);
}
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::StoreIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Initialize);
}
static Code* initialize_stub_strict() {
- return Builtins::builtin(Builtins::StoreIC_Initialize_Strict);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Initialize_Strict);
}
- static Code* global_proxy_stub() {
- return Builtins::builtin(Builtins::StoreIC_GlobalProxy);
+ Code* global_proxy_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::StoreIC_GlobalProxy);
}
- static Code* global_proxy_stub_strict() {
- return Builtins::builtin(Builtins::StoreIC_GlobalProxy_Strict);
+ Code* global_proxy_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::StoreIC_GlobalProxy_Strict);
}
static void Clear(Address address, Code* target);
class KeyedStoreIC: public IC {
public:
- KeyedStoreIC() : IC(NO_EXTRA_FRAME) { }
+ explicit KeyedStoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
MUST_USE_RESULT MaybeObject* Store(State state,
StrictModeFlag strict_mode,
// Stub accessors.
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Initialize);
}
- static Code* initialize_stub_strict() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Initialize_Strict);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Generic);
}
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+ static Code* initialize_stub_strict() {
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Initialize_Strict);
}
- static Code* megamorphic_stub_strict() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
+ Code* megamorphic_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Generic_Strict);
}
- static Code* generic_stub() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+ Code* generic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Generic);
}
- static Code* generic_stub_strict() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
+ Code* generic_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Generic_Strict);
}
static void Clear(Address address, Code* target);
GENERIC // Non-specialized case (processes any type combination).
};
- BinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+ explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
GENERIC
};
- TRBinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+ explicit TRBinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
GENERIC
};
- explicit CompareIC(Token::Value op) : IC(EXTRA_CALL_FRAME), op_(op) { }
+ CompareIC(Isolate* isolate, Token::Value op)
+ : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
namespace internal {
-static unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize;
+typedef unibrow::Mapping<unibrow::Ecma262Canonicalize> Canonicalize;
-
-static bool BackRefMatchesNoCase(int from,
+static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
+ int from,
int current,
int len,
Vector<const uc16> subject) {
if (old_char == new_char) continue;
unibrow::uchar old_string[1] = { old_char };
unibrow::uchar new_string[1] = { new_char };
- interp_canonicalize.get(old_char, '\0', old_string);
- interp_canonicalize.get(new_char, '\0', new_string);
+ interp_canonicalize->get(old_char, '\0', old_string);
+ interp_canonicalize->get(new_char, '\0', new_string);
if (old_string[0] != new_string[0]) {
return false;
}
}
-static bool BackRefMatchesNoCase(int from,
+static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
+ int from,
int current,
int len,
Vector<const char> subject) {
// matching terminates.
class BacktrackStack {
public:
- explicit BacktrackStack() {
- if (cache_ != NULL) {
+ explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
+ if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
// If the cache is not empty reuse the previously allocated stack.
- data_ = cache_;
- cache_ = NULL;
+ data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
+ isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
} else {
// Cache was empty. Allocate a new backtrack stack.
data_ = NewArray<int>(kBacktrackStackSize);
}
~BacktrackStack() {
- if (cache_ == NULL) {
+ if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
// The cache is empty. Keep this backtrack stack around.
- cache_ = data_;
+ isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
} else {
// A backtrack stack was already cached, just release this one.
DeleteArray(data_);
static const int kBacktrackStackSize = 10000;
int* data_;
- static int* cache_;
+ Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
-int* BacktrackStack::cache_ = NULL;
-
template <typename Char>
-static bool RawMatch(const byte* code_base,
+static bool RawMatch(Isolate* isolate,
+ const byte* code_base,
Vector<const Char> subject,
int* registers,
int current,
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
- BacktrackStack backtrack_stack;
+ BacktrackStack backtrack_stack(isolate);
int* backtrack_stack_base = backtrack_stack.data();
int* backtrack_sp = backtrack_stack_base;
int backtrack_stack_space = backtrack_stack.max_size();
pc = code_base + Load32Aligned(pc + 4);
break;
} else {
- if (BackRefMatchesNoCase(from, current, len, subject)) {
+ if (BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
+ from, current, len, subject)) {
current += len;
pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
} else {
}
-bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
+bool IrregexpInterpreter::Match(Isolate* isolate,
+ Handle<ByteArray> code_array,
Handle<String> subject,
int* registers,
int start_position) {
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(code_base,
+ return RawMatch(isolate,
+ code_base,
subject_vector,
registers,
start_position,
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(code_base,
+ return RawMatch(isolate,
+ code_base,
subject_vector,
registers,
start_position,
class IrregexpInterpreter {
public:
- static bool Match(Handle<ByteArray> code,
+ static bool Match(Isolate* isolate,
+ Handle<ByteArray> code,
Handle<String> subject,
int* captures,
int start_position);
--- /dev/null
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "ast.h"
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "compilation-cache.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "heap-profiler.h"
+#include "hydrogen.h"
+#include "isolate.h"
+#include "lithium-allocator.h"
+#include "log.h"
+#include "regexp-stack.h"
+#include "runtime-profiler.h"
+#include "scanner.h"
+#include "scopeinfo.h"
+#include "serialize.h"
+#include "simulator.h"
+#include "spaces.h"
+#include "stub-cache.h"
+#include "version.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Create a dummy thread that will wait forever on a semaphore. The only
+// purpose for this thread is to have some stack area to save essential data
+// into for use by a stacks only core dump (aka minidump).
+class PreallocatedMemoryThread: public Thread {
+ public:
+ char* data() {
+ if (data_ready_semaphore_ != NULL) {
+ // Initial access is guarded until the data has been published.
+ data_ready_semaphore_->Wait();
+ delete data_ready_semaphore_;
+ data_ready_semaphore_ = NULL;
+ }
+ return data_;
+ }
+
+ unsigned length() {
+ if (data_ready_semaphore_ != NULL) {
+ // Initial access is guarded until the data has been published.
+ data_ready_semaphore_->Wait();
+ delete data_ready_semaphore_;
+ data_ready_semaphore_ = NULL;
+ }
+ return length_;
+ }
+
+ // Stop the PreallocatedMemoryThread and release its resources.
+ void StopThread() {
+ keep_running_ = false;
+ wait_for_ever_semaphore_->Signal();
+
+ // Wait for the thread to terminate.
+ Join();
+
+ if (data_ready_semaphore_ != NULL) {
+ delete data_ready_semaphore_;
+ data_ready_semaphore_ = NULL;
+ }
+
+ delete wait_for_ever_semaphore_;
+ wait_for_ever_semaphore_ = NULL;
+ }
+
+ protected:
+ // When the thread starts running it will allocate a fixed number of bytes
+ // on the stack and publish the location of this memory for others to use.
+ void Run() {
+ EmbeddedVector<char, 15 * 1024> local_buffer;
+
+ // Initialize the buffer with a known good value.
+ OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
+ local_buffer.length());
+
+ // Publish the local buffer and signal its availability.
+ data_ = local_buffer.start();
+ length_ = local_buffer.length();
+ data_ready_semaphore_->Signal();
+
+ while (keep_running_) {
+ // This thread will wait here until the end of time.
+ wait_for_ever_semaphore_->Wait();
+ }
+
+ // Make sure we access the buffer after the wait to remove all possibility
+ // of it being optimized away.
+ OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
+ local_buffer.length());
+ }
+
+
+ private:
+ explicit PreallocatedMemoryThread(Isolate* isolate)
+ : Thread(isolate, "v8:PreallocMem"),
+ keep_running_(true),
+ wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
+ data_ready_semaphore_(OS::CreateSemaphore(0)),
+ data_(NULL),
+ length_(0) {
+ }
+
+ // Used to make sure that the thread keeps looping even for spurious wakeups.
+ bool keep_running_;
+
+ // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
+ Semaphore* wait_for_ever_semaphore_;
+ // Semaphore to signal that the data has been initialized.
+ Semaphore* data_ready_semaphore_;
+
+ // Location and size of the preallocated memory block.
+ char* data_;
+ unsigned length_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
+};
+
+
+void Isolate::PreallocatedMemoryThreadStart() {
+ if (preallocated_memory_thread_ != NULL) return;
+ preallocated_memory_thread_ = new PreallocatedMemoryThread(this);
+ preallocated_memory_thread_->Start();
+}
+
+
+void Isolate::PreallocatedMemoryThreadStop() {
+ if (preallocated_memory_thread_ == NULL) return;
+ preallocated_memory_thread_->StopThread();
+ // Done with the thread entirely.
+ delete preallocated_memory_thread_;
+ preallocated_memory_thread_ = NULL;
+}
+
+
+Isolate* Isolate::default_isolate_ = NULL;
+Thread::LocalStorageKey Isolate::isolate_key_;
+Thread::LocalStorageKey Isolate::thread_id_key_;
+Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
+Isolate::ThreadId Isolate::highest_thread_id_ = 0;
+
+
+class IsolateInitializer {
+ public:
+ IsolateInitializer() {
+ Isolate::EnsureDefaultIsolate();
+ }
+};
+
+static IsolateInitializer* EnsureDefaultIsolateAllocated() {
+ // TODO(isolates): Use the system threading API to do this once?
+ static IsolateInitializer static_initializer;
+ return &static_initializer;
+}
+
+// This variable only needed to trigger static intialization.
+static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
+
+
+Isolate::ThreadId Isolate::AllocateThreadId() {
+ ThreadId new_id;
+ {
+ ScopedLock lock(process_wide_mutex_);
+ new_id = ++highest_thread_id_;
+ }
+ return new_id;
+}
+
+
+Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
+ ThreadId thread_id) {
+ ASSERT(thread_id != 0);
+ ASSERT(Thread::GetThreadLocalInt(thread_id_key_) == thread_id);
+ PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
+ {
+ ScopedLock lock(process_wide_mutex_);
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
+ thread_data_table_->Insert(per_thread);
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
+ }
+ return per_thread;
+}
+
+
+Isolate::PerIsolateThreadData*
+ Isolate::FindOrAllocatePerThreadDataForThisThread() {
+ ThreadId thread_id = Thread::GetThreadLocalInt(thread_id_key_);
+ if (thread_id == 0) {
+ thread_id = AllocateThreadId();
+ Thread::SetThreadLocalInt(thread_id_key_, thread_id);
+ }
+ PerIsolateThreadData* per_thread = NULL;
+ {
+ ScopedLock lock(process_wide_mutex_);
+ per_thread = thread_data_table_->Lookup(this, thread_id);
+ if (per_thread == NULL) {
+ per_thread = AllocatePerIsolateThreadData(thread_id);
+ }
+ }
+ return per_thread;
+}
+
+
+void Isolate::EnsureDefaultIsolate() {
+ ScopedLock lock(process_wide_mutex_);
+ if (default_isolate_ == NULL) {
+ isolate_key_ = Thread::CreateThreadLocalKey();
+ thread_id_key_ = Thread::CreateThreadLocalKey();
+ per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+ thread_data_table_ = new Isolate::ThreadDataTable();
+ default_isolate_ = new Isolate();
+ }
+ // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
+ // becase a non-null thread data may be already set.
+ Thread::SetThreadLocal(isolate_key_, default_isolate_);
+ CHECK(default_isolate_->PreInit());
+}
+
+
+Debugger* Isolate::GetDefaultIsolateDebugger() {
+ EnsureDefaultIsolate();
+ return default_isolate_->debugger();
+}
+
+
+StackGuard* Isolate::GetDefaultIsolateStackGuard() {
+ EnsureDefaultIsolate();
+ return default_isolate_->stack_guard();
+}
+
+
+void Isolate::EnterDefaultIsolate() {
+ EnsureDefaultIsolate();
+ ASSERT(default_isolate_ != NULL);
+
+ PerIsolateThreadData* data = CurrentPerIsolateThreadData();
+ // If not yet in default isolate - enter it.
+ if (data == NULL || data->isolate() != default_isolate_) {
+ default_isolate_->Enter();
+ }
+}
+
+
+Isolate* Isolate::GetDefaultIsolateForLocking() {
+ EnsureDefaultIsolate();
+ return default_isolate_;
+}
+
+
+Isolate::ThreadDataTable::ThreadDataTable()
+ : list_(NULL) {
+}
+
+
+Isolate::PerIsolateThreadData*
+ Isolate::ThreadDataTable::Lookup(Isolate* isolate, ThreadId thread_id) {
+ for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
+ if (data->Matches(isolate, thread_id)) return data;
+ }
+ return NULL;
+}
+
+
+void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
+ if (list_ != NULL) list_->prev_ = data;
+ data->next_ = list_;
+ list_ = data;
+}
+
+
+void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
+ if (list_ == data) list_ = data->next_;
+ if (data->next_ != NULL) data->next_->prev_ = data->prev_;
+ if (data->prev_ != NULL) data->prev_->next_ = data->next_;
+}
+
+
+void Isolate::ThreadDataTable::Remove(Isolate* isolate, ThreadId thread_id) {
+ PerIsolateThreadData* data = Lookup(isolate, thread_id);
+ if (data != NULL) {
+ Remove(data);
+ }
+}
+
+
+#ifdef DEBUG
+#define TRACE_ISOLATE(tag) \
+ do { \
+ if (FLAG_trace_isolates) { \
+ PrintF("Isolate %p " #tag "\n", reinterpret_cast<void*>(this)); \
+ } \
+ } while (false)
+#else
+#define TRACE_ISOLATE(tag)
+#endif
+
+
+Isolate::Isolate()
+ : state_(UNINITIALIZED),
+ entry_stack_(NULL),
+ stack_trace_nesting_level_(0),
+ incomplete_message_(NULL),
+ preallocated_memory_thread_(NULL),
+ preallocated_message_space_(NULL),
+ bootstrapper_(NULL),
+ runtime_profiler_(NULL),
+ compilation_cache_(NULL),
+ counters_(new Counters()),
+ cpu_features_(NULL),
+ code_range_(NULL),
+ break_access_(OS::CreateMutex()),
+ logger_(new Logger()),
+ stats_table_(new StatsTable()),
+ stub_cache_(NULL),
+ deoptimizer_data_(NULL),
+ capture_stack_trace_for_uncaught_exceptions_(false),
+ stack_trace_for_uncaught_exceptions_frame_limit_(0),
+ stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
+ transcendental_cache_(NULL),
+ memory_allocator_(NULL),
+ keyed_lookup_cache_(NULL),
+ context_slot_cache_(NULL),
+ descriptor_lookup_cache_(NULL),
+ handle_scope_implementer_(NULL),
+ scanner_constants_(NULL),
+ in_use_list_(0),
+ free_list_(0),
+ preallocated_storage_preallocated_(false),
+ pc_to_code_cache_(NULL),
+ write_input_buffer_(NULL),
+ global_handles_(NULL),
+ context_switcher_(NULL),
+ thread_manager_(NULL),
+ ast_sentinels_(NULL),
+ string_tracker_(NULL),
+ regexp_stack_(NULL),
+ frame_element_constant_list_(0),
+ result_constant_list_(0) {
+ TRACE_ISOLATE(constructor);
+
+ memset(isolate_addresses_, 0,
+ sizeof(isolate_addresses_[0]) * (k_isolate_address_count + 1));
+
+ heap_.isolate_ = this;
+ zone_.isolate_ = this;
+ stack_guard_.isolate_ = this;
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
+ simulator_initialized_ = false;
+ simulator_i_cache_ = NULL;
+ simulator_redirection_ = NULL;
+#endif
+
+#ifdef DEBUG
+ // heap_histograms_ initializes itself.
+ memset(&js_spill_information_, 0, sizeof(js_spill_information_));
+ memset(code_kind_statistics_, 0,
+ sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
+#endif
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debug_ = NULL;
+ debugger_ = NULL;
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ producer_heap_profile_ = NULL;
+#endif
+
+ handle_scope_data_.Initialize();
+
+#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
+ name##_ = (initial_value);
+ ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
+#undef ISOLATE_INIT_EXECUTE
+
+#define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
+ memset(name##_, 0, sizeof(type) * length);
+ ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
+#undef ISOLATE_INIT_ARRAY_EXECUTE
+}
+
+void Isolate::TearDown() {
+ TRACE_ISOLATE(tear_down);
+
+ // Temporarily set this isolate as current so that various parts of
+ // the isolate can access it in their destructors without having a
+ // direct pointer. We don't use Enter/Exit here to avoid
+ // initializing the thread data.
+ PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
+ Isolate* saved_isolate = UncheckedCurrent();
+ SetIsolateThreadLocals(this, NULL);
+
+ Deinit();
+
+ if (!IsDefaultIsolate()) {
+ delete this;
+ }
+
+ // Restore the previous current isolate.
+ SetIsolateThreadLocals(saved_isolate, saved_data);
+}
+
+
+void Isolate::Deinit() {
+ if (state_ == INITIALIZED) {
+ TRACE_ISOLATE(deinit);
+
+ if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
+
+ // We must stop the logger before we tear down other components.
+ logger_->EnsureTickerStopped();
+
+ delete deoptimizer_data_;
+ deoptimizer_data_ = NULL;
+ if (FLAG_preemption) {
+ v8::Locker locker;
+ v8::Locker::StopPreemption();
+ }
+ builtins_.TearDown();
+ bootstrapper_->TearDown();
+
+ // Remove the external reference to the preallocated stack memory.
+ delete preallocated_message_space_;
+ preallocated_message_space_ = NULL;
+ PreallocatedMemoryThreadStop();
+
+ HeapProfiler::TearDown();
+ CpuProfiler::TearDown();
+ if (runtime_profiler_ != NULL) {
+ runtime_profiler_->TearDown();
+ delete runtime_profiler_;
+ runtime_profiler_ = NULL;
+ }
+ heap_.TearDown();
+ logger_->TearDown();
+
+ // The default isolate is re-initializable due to legacy API.
+ state_ = PREINITIALIZED;
+ }
+}
+
+
+void Isolate::SetIsolateThreadLocals(Isolate* isolate,
+ PerIsolateThreadData* data) {
+ Thread::SetThreadLocal(isolate_key_, isolate);
+ Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
+}
+
+
+Isolate::~Isolate() {
+ TRACE_ISOLATE(destructor);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ delete producer_heap_profile_;
+ producer_heap_profile_ = NULL;
+#endif
+
+ delete scanner_constants_;
+ scanner_constants_ = NULL;
+
+ delete regexp_stack_;
+ regexp_stack_ = NULL;
+
+ delete ast_sentinels_;
+ ast_sentinels_ = NULL;
+
+ delete descriptor_lookup_cache_;
+ descriptor_lookup_cache_ = NULL;
+ delete context_slot_cache_;
+ context_slot_cache_ = NULL;
+ delete keyed_lookup_cache_;
+ keyed_lookup_cache_ = NULL;
+
+ delete transcendental_cache_;
+ transcendental_cache_ = NULL;
+ delete stub_cache_;
+ stub_cache_ = NULL;
+ delete stats_table_;
+ stats_table_ = NULL;
+
+ delete logger_;
+ logger_ = NULL;
+
+ delete counters_;
+ counters_ = NULL;
+ delete cpu_features_;
+ cpu_features_ = NULL;
+
+ delete handle_scope_implementer_;
+ handle_scope_implementer_ = NULL;
+ delete break_access_;
+ break_access_ = NULL;
+
+ delete compilation_cache_;
+ compilation_cache_ = NULL;
+ delete bootstrapper_;
+ bootstrapper_ = NULL;
+ delete pc_to_code_cache_;
+ pc_to_code_cache_ = NULL;
+ delete write_input_buffer_;
+ write_input_buffer_ = NULL;
+
+ delete context_switcher_;
+ context_switcher_ = NULL;
+ delete thread_manager_;
+ thread_manager_ = NULL;
+
+ delete string_tracker_;
+ string_tracker_ = NULL;
+
+ delete memory_allocator_;
+ memory_allocator_ = NULL;
+ delete code_range_;
+ code_range_ = NULL;
+ delete global_handles_;
+ global_handles_ = NULL;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ delete debugger_;
+ debugger_ = NULL;
+ delete debug_;
+ debug_ = NULL;
+#endif
+}
+
+
+bool Isolate::PreInit() {
+ if (state_ != UNINITIALIZED) return true;
+
+ TRACE_ISOLATE(preinit);
+
+ ASSERT(Isolate::Current() == this);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debug_ = new Debug(this);
+ debugger_ = new Debugger();
+ debugger_->isolate_ = this;
+#endif
+
+ memory_allocator_ = new MemoryAllocator();
+ memory_allocator_->isolate_ = this;
+ code_range_ = new CodeRange();
+ code_range_->isolate_ = this;
+
+ // Safe after setting Heap::isolate_, initializing StackGuard and
+ // ensuring that Isolate::Current() == this.
+ heap_.SetStackLimits();
+
+#ifdef DEBUG
+ DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+#define C(name) isolate_addresses_[Isolate::k_##name] = \
+ reinterpret_cast<Address>(name());
+ ISOLATE_ADDRESS_LIST(C)
+ ISOLATE_ADDRESS_LIST_PROF(C)
+#undef C
+
+ string_tracker_ = new StringTracker();
+ string_tracker_->isolate_ = this;
+ thread_manager_ = new ThreadManager();
+ thread_manager_->isolate_ = this;
+ compilation_cache_ = new CompilationCache();
+ transcendental_cache_ = new TranscendentalCache();
+ keyed_lookup_cache_ = new KeyedLookupCache();
+ context_slot_cache_ = new ContextSlotCache();
+ descriptor_lookup_cache_ = new DescriptorLookupCache();
+ scanner_constants_ = new ScannerConstants();
+ pc_to_code_cache_ = new PcToCodeCache(this);
+ write_input_buffer_ = new StringInputBuffer();
+ global_handles_ = new GlobalHandles(this);
+ bootstrapper_ = new Bootstrapper();
+ cpu_features_ = new CpuFeatures();
+ handle_scope_implementer_ = new HandleScopeImplementer();
+ stub_cache_ = new StubCache(this);
+ ast_sentinels_ = new AstSentinels();
+ regexp_stack_ = new RegExpStack();
+ regexp_stack_->isolate_ = this;
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ producer_heap_profile_ = new ProducerHeapProfile();
+ producer_heap_profile_->isolate_ = this;
+#endif
+
+ state_ = PREINITIALIZED;
+ return true;
+}
+
+
+void Isolate::InitializeThreadLocal() {
+ thread_local_top_.Initialize();
+ clear_pending_exception();
+ clear_pending_message();
+ clear_scheduled_exception();
+}
+
+
+bool Isolate::Init(Deserializer* des) {
+ ASSERT(state_ != INITIALIZED);
+
+ TRACE_ISOLATE(init);
+
+ bool create_heap_objects = des == NULL;
+
+#ifdef DEBUG
+ // The initialization process does not handle memory exhaustion.
+ DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+ if (state_ == UNINITIALIZED && !PreInit()) return false;
+
+ // Enable logging before setting up the heap
+ logger_->Setup();
+
+ CpuProfiler::Setup();
+ HeapProfiler::Setup();
+
+ // Setup the platform OS support.
+ OS::Setup();
+
+ // Initialize other runtime facilities
+#if defined(USE_SIMULATOR)
+#if defined(V8_TARGET_ARCH_ARM)
+ Simulator::Initialize();
+#elif defined(V8_TARGET_ARCH_MIPS)
+ ::assembler::mips::Simulator::Initialize();
+#endif
+#endif
+
+ { // NOLINT
+ // Ensure that the thread has a valid stack guard. The v8::Locker object
+ // will ensure this too, but we don't have to use lockers if we are only
+ // using one thread.
+ ExecutionAccess lock(this);
+ stack_guard_.InitThread(lock);
+ }
+
+ // Setup the object heap
+ ASSERT(!heap_.HasBeenSetup());
+ if (!heap_.Setup(create_heap_objects)) {
+ V8::SetFatalError();
+ return false;
+ }
+
+ bootstrapper_->Initialize(create_heap_objects);
+ builtins_.Setup(create_heap_objects);
+
+ InitializeThreadLocal();
+
+ // Only preallocate on the first initialization.
+ if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
+ // Start the thread which will set aside some memory.
+ PreallocatedMemoryThreadStart();
+ preallocated_message_space_ =
+ new NoAllocationStringAllocator(
+ preallocated_memory_thread_->data(),
+ preallocated_memory_thread_->length());
+ PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
+ }
+
+ if (FLAG_preemption) {
+ v8::Locker locker;
+ v8::Locker::StartPreemption(100);
+ }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debug_->Setup(create_heap_objects);
+#endif
+ stub_cache_->Initialize(create_heap_objects);
+
+ // If we are deserializing, read the state into the now-empty heap.
+ if (des != NULL) {
+ des->Deserialize();
+ stub_cache_->Clear();
+ }
+
+ // Deserializing may put strange things in the root array's copy of the
+ // stack guard.
+ heap_.SetStackLimits();
+
+ // Setup the CPU support. Must be done after heap setup and after
+ // any deserialization because we have to have the initial heap
+ // objects in place for creating the code object used for probing.
+ CPU::Setup();
+
+ deoptimizer_data_ = new DeoptimizerData;
+ runtime_profiler_ = new RuntimeProfiler(this);
+ runtime_profiler_->Setup();
+
+ // If we are deserializing, log non-function code objects and compiled
+ // functions found in the snapshot.
+ if (des != NULL && FLAG_log_code) {
+ HandleScope scope;
+ LOG(this, LogCodeObjects());
+ LOG(this, LogCompiledFunctions());
+ }
+
+ state_ = INITIALIZED;
+ return true;
+}
+
+
+void Isolate::Enter() {
+ Isolate* current_isolate = NULL;
+ PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
+ if (current_data != NULL) {
+ current_isolate = current_data->isolate_;
+ ASSERT(current_isolate != NULL);
+ if (current_isolate == this) {
+ ASSERT(Current() == this);
+ ASSERT(entry_stack_ != NULL);
+ ASSERT(entry_stack_->previous_thread_data == NULL ||
+ entry_stack_->previous_thread_data->thread_id() ==
+ Thread::GetThreadLocalInt(thread_id_key_));
+ // Same thread re-enters the isolate, no need to re-init anything.
+ entry_stack_->entry_count++;
+ return;
+ }
+ }
+
+ // Threads can have default isolate set into TLS as Current but not yet have
+ // PerIsolateThreadData for it, as it requires more advanced phase of the
+ // initialization. For example, a thread might be the one that system used for
+ // static initializers - in this case the default isolate is set in TLS but
+ // the thread did not yet Enter the isolate. If PerisolateThreadData is not
+ // there, use the isolate set in TLS.
+ if (current_isolate == NULL) {
+ current_isolate = Isolate::UncheckedCurrent();
+ }
+
+ PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(data != NULL);
+ ASSERT(data->isolate_ == this);
+
+ EntryStackItem* item = new EntryStackItem(current_data,
+ current_isolate,
+ entry_stack_);
+ entry_stack_ = item;
+
+ SetIsolateThreadLocals(this, data);
+
+ CHECK(PreInit());
+
+ // In case it's the first time some thread enters the isolate.
+ set_thread_id(data->thread_id());
+}
+
+
+void Isolate::Exit() {
+ ASSERT(entry_stack_ != NULL);
+ ASSERT(entry_stack_->previous_thread_data == NULL ||
+ entry_stack_->previous_thread_data->thread_id() ==
+ Thread::GetThreadLocalInt(thread_id_key_));
+
+ if (--entry_stack_->entry_count > 0) return;
+
+ ASSERT(CurrentPerIsolateThreadData() != NULL);
+ ASSERT(CurrentPerIsolateThreadData()->isolate_ == this);
+
+ // Pop the stack.
+ EntryStackItem* item = entry_stack_;
+ entry_stack_ = item->previous_item;
+
+ PerIsolateThreadData* previous_thread_data = item->previous_thread_data;
+ Isolate* previous_isolate = item->previous_isolate;
+
+ delete item;
+
+ // Reinit the current thread for the isolate it was running before this one.
+ SetIsolateThreadLocals(previous_isolate, previous_thread_data);
+}
+
+
+void Isolate::ResetEagerOptimizingData() {
+ compilation_cache_->ResetEagerOptimizingData();
+}
+
+
+#ifdef DEBUG
+#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
+const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
+ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
+ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
+#undef ISOLATE_FIELD_OFFSET
+#endif
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ISOLATE_H_
+#define V8_ISOLATE_H_
+
+#include "../include/v8-debug.h"
+#include "allocation.h"
+#include "apiutils.h"
+#include "atomicops.h"
+#include "builtins.h"
+#include "contexts.h"
+#include "execution.h"
+#include "frames.h"
+#include "global-handles.h"
+#include "handles.h"
+#include "heap.h"
+#include "regexp-stack.h"
+#include "runtime-profiler.h"
+#include "runtime.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+class AstSentinels;
+class Bootstrapper;
+class CodeGenerator;
+class CodeRange;
+class CompilationCache;
+class ContextSlotCache;
+class ContextSwitcher;
+class Counters;
+class CpuFeatures;
+class CpuProfiler;
+class DeoptimizerData;
+class Deserializer;
+class EmptyStatement;
+class ExternalReferenceTable;
+class Factory;
+class FunctionInfoListener;
+class HandleScopeImplementer;
+class HeapProfiler;
+class InlineRuntimeFunctionsTable;
+class NoAllocationStringAllocator;
+class PcToCodeCache;
+class PreallocatedMemoryThread;
+class ProducerHeapProfile;
+class RegExpStack;
+class SaveContext;
+class ScannerConstants;
+class StringInputBuffer;
+class StringTracker;
+class StubCache;
+class ThreadManager;
+class ThreadState;
+class ThreadVisitor; // Defined in v8threads.h
+class VMState;
+
+// 'void function pointer', used to roundtrip the
+// ExternalReference::ExternalReferenceRedirector since we can not include
+// assembler.h, where it is defined, here.
+typedef void* ExternalReferenceRedirectorPointer();
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+class Debug;
+class Debugger;
+class DebuggerAgent;
+#endif
+
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+class Redirection;
+class Simulator;
+#endif
+
+// Static indirection table for handles to constants. If a frame
+// element represents a constant, the data contains an index into
+// this table of handles to the actual constants.
+// Static indirection table for handles to constants. If a Result
+// represents a constant, the data contains an index into this table
+// of handles to the actual constants.
+typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
+ if (isolate->has_scheduled_exception()) \
+ return isolate->PromoteScheduledException()
+
+#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
+ if (call.is_null()) { \
+ ASSERT(isolate->has_pending_exception()); \
+ return value; \
+ }
+
+#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
+
+#define ISOLATE_ADDRESS_LIST(C) \
+ C(handler_address) \
+ C(c_entry_fp_address) \
+ C(context_address) \
+ C(pending_exception_address) \
+ C(external_caught_exception_address)
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define ISOLATE_ADDRESS_LIST_PROF(C) \
+ C(js_entry_sp_address)
+#else
+#define ISOLATE_ADDRESS_LIST_PROF(C)
+#endif
+
+
+class ThreadLocalTop BASE_EMBEDDED {
+ public:
+ // Initialize the thread data.
+ void Initialize();
+
+ // Get the top C++ try catch handler or NULL if none are registered.
+ //
+ // This method is not guarenteed to return an address that can be
+ // used for comparison with addresses into the JS stack. If such an
+ // address is needed, use try_catch_handler_address.
+ v8::TryCatch* TryCatchHandler();
+
+ // Get the address of the top C++ try catch handler or NULL if
+ // none are registered.
+ //
+ // This method always returns an address that can be compared to
+ // pointers into the JavaScript stack. When running on actual
+ // hardware, try_catch_handler_address and TryCatchHandler return
+ // the same pointer. When running on a simulator with a separate JS
+ // stack, try_catch_handler_address returns a JS stack address that
+ // corresponds to the place on the JS stack where the C++ handler
+ // would have been if the stack were not separate.
+ inline Address try_catch_handler_address() {
+ return try_catch_handler_address_;
+ }
+
+ // Set the address of the top C++ try catch handler.
+ inline void set_try_catch_handler_address(Address address) {
+ try_catch_handler_address_ = address;
+ }
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_address_ == NULL);
+ }
+
+ // The context where the current execution method is created and for variable
+ // lookups.
+ Context* context_;
+ int thread_id_;
+ MaybeObject* pending_exception_;
+ bool has_pending_message_;
+ const char* pending_message_;
+ Object* pending_message_obj_;
+ Script* pending_message_script_;
+ int pending_message_start_pos_;
+ int pending_message_end_pos_;
+ // Use a separate value for scheduled exceptions to preserve the
+ // invariants that hold about pending_exception. We may want to
+ // unify them later.
+ MaybeObject* scheduled_exception_;
+ bool external_caught_exception_;
+ SaveContext* save_context_;
+ v8::TryCatch* catcher_;
+
+ // Stack.
+ Address c_entry_fp_; // the frame pointer of the top c entry frame
+ Address handler_; // try-blocks are chained through the stack
+
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+ Simulator* simulator_;
+#elif V8_TARGET_ARCH_MIPS
+ assembler::mips::Simulator* simulator_;
+#endif
+#endif // USE_SIMULATOR
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Address js_entry_sp_; // the stack pointer of the bottom js entry frame
+ Address external_callback_; // the external callback we're currently in
+#endif
+
+#ifdef ENABLE_VMSTATE_TRACKING
+ StateTag current_vm_state_;
+#endif
+
+ // Generated code scratch locations.
+ int32_t formal_count_;
+
+ // Call back function to report unsafe JS accesses.
+ v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+ private:
+ Address try_catch_handler_address_;
+};
+
+#if defined(V8_TARGET_ARCH_ARM)
+
+#define ISOLATE_PLATFORM_INIT_LIST(V) \
+ /* VirtualFrame::SpilledScope state */ \
+ V(bool, is_virtual_frame_in_spilled_scope, false) \
+ /* CodeGenerator::EmitNamedStore state */ \
+ V(int, inlined_write_barrier_size, -1)
+
+#if !defined(__arm__)
+class HashMap;
+#endif
+
+#else
+
+#define ISOLATE_PLATFORM_INIT_LIST(V)
+
+#endif
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+#define ISOLATE_DEBUGGER_INIT_LIST(V) \
+ V(v8::Debug::EventCallback, debug_event_callback, NULL) \
+ V(DebuggerAgent*, debugger_agent_instance, NULL)
+#else
+
+#define ISOLATE_DEBUGGER_INIT_LIST(V)
+
+#endif
+
+#ifdef DEBUG
+
+#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
+ V(CommentStatistic, paged_space_comments_statistics, \
+ CommentStatistic::kMaxComments + 1)
+#else
+
+#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
+
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#define ISOLATE_LOGGING_INIT_LIST(V) \
+ V(CpuProfiler*, cpu_profiler, NULL) \
+ V(HeapProfiler*, heap_profiler, NULL)
+
+#else
+
+#define ISOLATE_LOGGING_INIT_LIST(V)
+
+#endif
+
+#define ISOLATE_INIT_ARRAY_LIST(V) \
+ /* SerializerDeserializer state. */ \
+ V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
+ V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
+ V(int, bad_char_shift_table, kUC16AlphabetSize) \
+ V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
+ V(int, suffix_table, (kBMMaxShift + 1)) \
+ ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
+
+typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
+
+#define ISOLATE_INIT_LIST(V) \
+ /* AssertNoZoneAllocation state. */ \
+ V(bool, zone_allow_allocation, true) \
+ /* SerializerDeserializer state. */ \
+ V(int, serialize_partial_snapshot_cache_length, 0) \
+ /* Assembler state. */ \
+ /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
+ V(byte*, assembler_spare_buffer, NULL) \
+ /*This static counter ensures that NativeAllocationCheckers can be nested.*/ \
+ V(int, allocation_disallowed, 0) \
+ V(FatalErrorCallback, exception_behavior, NULL) \
+ V(v8::Debug::MessageHandler, message_handler, NULL) \
+ /* To distinguish the function templates, so that we can find them in the */ \
+ /* function cache of the global context. */ \
+ V(int, next_serial_number, 0) \
+ V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
+ V(bool, always_allow_natives_syntax, false) \
+ /* Part of the state of liveedit. */ \
+ V(FunctionInfoListener*, active_function_info_listener, NULL) \
+ /* State for Relocatable. */ \
+ V(Relocatable*, relocatable_top, NULL) \
+ /* State for CodeEntry in profile-generator. */ \
+ V(CodeGenerator*, current_code_generator, NULL) \
+ V(bool, jump_target_compiling_deferred_code, false) \
+ V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
+ V(Object*, string_stream_current_security_token, NULL) \
+ /* TODO(isolates): Release this on destruction? */ \
+ V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
+ /* Serializer state. */ \
+ V(ExternalReferenceTable*, external_reference_table, NULL) \
+ /* AstNode state. */ \
+ V(unsigned, ast_node_id, 0) \
+ V(unsigned, ast_node_count, 0) \
+ ISOLATE_PLATFORM_INIT_LIST(V) \
+ ISOLATE_LOGGING_INIT_LIST(V) \
+ ISOLATE_DEBUGGER_INIT_LIST(V)
+
+class Isolate {
+ // These forward declarations are required to make the friend declarations in
+ // PerIsolateThreadData work on some older versions of gcc.
+ class ThreadDataTable;
+ class EntryStackItem;
+ public:
+ ~Isolate();
+
+ typedef int ThreadId;
+
+ // A thread has a PerIsolateThreadData instance for each isolate that it has
+ // entered. That instance is allocated when the isolate is initially entered
+ // and reused on subsequent entries.
+ class PerIsolateThreadData {
+ public:
+ PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
+ : isolate_(isolate),
+ thread_id_(thread_id),
+ stack_limit_(0),
+ thread_state_(NULL),
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+ simulator_(NULL),
+#endif
+ next_(NULL),
+ prev_(NULL) { }
+ Isolate* isolate() const { return isolate_; }
+ ThreadId thread_id() const { return thread_id_; }
+ void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
+ uintptr_t stack_limit() const { return stack_limit_; }
+ ThreadState* thread_state() const { return thread_state_; }
+ void set_thread_state(ThreadState* value) { thread_state_ = value; }
+
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+ Simulator* simulator() const { return simulator_; }
+ void set_simulator(Simulator* simulator) {
+ simulator_ = simulator;
+ }
+#endif
+
+ bool Matches(Isolate* isolate, ThreadId thread_id) const {
+ return isolate_ == isolate && thread_id_ == thread_id;
+ }
+
+ private:
+ Isolate* isolate_;
+ ThreadId thread_id_;
+ uintptr_t stack_limit_;
+ ThreadState* thread_state_;
+
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+ Simulator* simulator_;
+#endif
+
+ PerIsolateThreadData* next_;
+ PerIsolateThreadData* prev_;
+
+ friend class Isolate;
+ friend class ThreadDataTable;
+ friend class EntryStackItem;
+
+ DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
+ };
+
+
+ enum AddressId {
+#define C(name) k_##name,
+ ISOLATE_ADDRESS_LIST(C)
+ ISOLATE_ADDRESS_LIST_PROF(C)
+#undef C
+ k_isolate_address_count
+ };
+
+ // Returns the PerIsolateThreadData for the current thread (or NULL if one is
+ // not currently set).
+ static PerIsolateThreadData* CurrentPerIsolateThreadData() {
+ return reinterpret_cast<PerIsolateThreadData*>(
+ Thread::GetThreadLocal(per_isolate_thread_data_key_));
+ }
+
+ // Returns the isolate inside which the current thread is running.
+ INLINE(static Isolate* Current()) {
+ Isolate* isolate = UncheckedCurrent();
+ ASSERT(isolate != NULL);
+ return isolate;
+ }
+
+ INLINE(static Isolate* UncheckedCurrent()) {
+ return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+ }
+
+ bool Init(Deserializer* des);
+
+ bool IsInitialized() { return state_ == INITIALIZED; }
+
+ // True if at least one thread Enter'ed this isolate.
+ bool IsInUse() { return entry_stack_ != NULL; }
+
+ // Destroys the non-default isolates.
+ // Sets default isolate into "has_been_disposed" state rather then destroying,
+ // for legacy API reasons.
+ void TearDown();
+
+ bool IsDefaultIsolate() const { return this == default_isolate_; }
+
+ // Ensures that process-wide resources and the default isolate have been
+ // allocated. It is only necessary to call this method in rare casses, for
+ // example if you are using V8 from within the body of a static initializer.
+ // Safe to call multiple times.
+ static void EnsureDefaultIsolate();
+
+ // Get the debugger from the default isolate. Preinitializes the
+ // default isolate if needed.
+ static Debugger* GetDefaultIsolateDebugger();
+
+ // Get the stack guard from the default isolate. Preinitializes the
+ // default isolate if needed.
+ static StackGuard* GetDefaultIsolateStackGuard();
+
+ // Returns the key used to store the pointer to the current isolate.
+ // Used internally for V8 threads that do not execute JavaScript but still
+ // are part of the domain of an isolate (like the context switcher).
+ static Thread::LocalStorageKey isolate_key() {
+ return isolate_key_;
+ }
+
+ // Returns the key used to store process-wide thread IDs.
+ static Thread::LocalStorageKey thread_id_key() {
+ return thread_id_key_;
+ }
+
+ // Atomically allocates a new thread ID.
+ static ThreadId AllocateThreadId();
+
+ // If a client attempts to create a Locker without specifying an isolate,
+ // we assume that the client is using legacy behavior. Set up the current
+ // thread to be inside the implicit isolate (or fail a check if we have
+ // switched to non-legacy behavior).
+ static void EnterDefaultIsolate();
+
+ // Debug.
+ // Mutex for serializing access to break control structures.
+ Mutex* break_access() { return break_access_; }
+
+ Address get_address_from_id(AddressId id);
+
+ // Access to top context (where the current function object was created).
+ Context* context() { return thread_local_top_.context_; }
+ void set_context(Context* context) {
+ thread_local_top_.context_ = context;
+ }
+ Context** context_address() { return &thread_local_top_.context_; }
+
+ SaveContext* save_context() {return thread_local_top_.save_context_; }
+ void set_save_context(SaveContext* save) {
+ thread_local_top_.save_context_ = save;
+ }
+
+ // Access to current thread id.
+ int thread_id() { return thread_local_top_.thread_id_; }
+ void set_thread_id(int id) { thread_local_top_.thread_id_ = id; }
+
+ // Interface to pending exception.
+ MaybeObject* pending_exception() {
+ ASSERT(has_pending_exception());
+ return thread_local_top_.pending_exception_;
+ }
+ bool external_caught_exception() {
+ return thread_local_top_.external_caught_exception_;
+ }
+ void set_pending_exception(MaybeObject* exception) {
+ thread_local_top_.pending_exception_ = exception;
+ }
+ void clear_pending_exception() {
+ thread_local_top_.pending_exception_ = heap_.the_hole_value();
+ }
+ MaybeObject** pending_exception_address() {
+ return &thread_local_top_.pending_exception_;
+ }
+ bool has_pending_exception() {
+ return !thread_local_top_.pending_exception_->IsTheHole();
+ }
+ void clear_pending_message() {
+ thread_local_top_.has_pending_message_ = false;
+ thread_local_top_.pending_message_ = NULL;
+ thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
+ thread_local_top_.pending_message_script_ = NULL;
+ }
+ v8::TryCatch* try_catch_handler() {
+ return thread_local_top_.TryCatchHandler();
+ }
+ Address try_catch_handler_address() {
+ return thread_local_top_.try_catch_handler_address();
+ }
+ bool* external_caught_exception_address() {
+ return &thread_local_top_.external_caught_exception_;
+ }
+
+ MaybeObject** scheduled_exception_address() {
+ return &thread_local_top_.scheduled_exception_;
+ }
+ MaybeObject* scheduled_exception() {
+ ASSERT(has_scheduled_exception());
+ return thread_local_top_.scheduled_exception_;
+ }
+ bool has_scheduled_exception() {
+ return !thread_local_top_.scheduled_exception_->IsTheHole();
+ }
+ void clear_scheduled_exception() {
+ thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
+ }
+
+ bool IsExternallyCaught();
+
+ bool is_catchable_by_javascript(MaybeObject* exception) {
+ return (exception != Failure::OutOfMemoryException()) &&
+ (exception != heap()->termination_exception());
+ }
+
+ // JS execution stack (see frames.h).
+ static Address c_entry_fp(ThreadLocalTop* thread) {
+ return thread->c_entry_fp_;
+ }
+ static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
+
+ inline Address* c_entry_fp_address() {
+ return &thread_local_top_.c_entry_fp_;
+ }
+ inline Address* handler_address() { return &thread_local_top_.handler_; }
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Bottom JS entry (see StackTracer::Trace in log.cc).
+ static Address js_entry_sp(ThreadLocalTop* thread) {
+ return thread->js_entry_sp_;
+ }
+ inline Address* js_entry_sp_address() {
+ return &thread_local_top_.js_entry_sp_;
+ }
+#endif
+
+ // Generated code scratch locations.
+ void* formal_count_address() { return &thread_local_top_.formal_count_; }
+
+ // Returns the global object of the current context. It could be
+ // a builtin object, or a js global object.
+ Handle<GlobalObject> global() {
+ return Handle<GlobalObject>(context()->global());
+ }
+
+ // Returns the global proxy object of the current context.
+ Object* global_proxy() {
+ return context()->global_proxy();
+ }
+
+ Handle<JSBuiltinsObject> js_builtins_object() {
+ return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
+ }
+
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
+ void FreeThreadResources() { thread_local_top_.Free(); }
+
+ // This method is called by the api after operations that may throw
+ // exceptions. If an exception was thrown and not handled by an external
+ // handler the exception is scheduled to be rethrown when we return to running
+ // JavaScript code. If an exception is scheduled true is returned.
+ bool OptionalRescheduleException(bool is_bottom_call);
+
+ void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options);
+
+ // Tells whether the current context has experienced an out of memory
+ // exception.
+ bool is_out_of_memory();
+
+ void PrintCurrentStackTrace(FILE* out);
+ void PrintStackTrace(FILE* out, char* thread_data);
+ void PrintStack(StringStream* accumulator);
+ void PrintStack();
+ Handle<String> StackTraceString();
+ Handle<JSArray> CaptureCurrentStackTrace(
+ int frame_limit,
+ StackTrace::StackTraceOptions options);
+
+ // Returns if the top context may access the given global object. If
+ // the result is false, the pending exception is guaranteed to be
+ // set.
+ bool MayNamedAccess(JSObject* receiver,
+ Object* key,
+ v8::AccessType type);
+ bool MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type);
+
+ void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
+ void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
+
+ // Exception throwing support. The caller should use the result
+ // of Throw() as its return value.
+ Failure* Throw(Object* exception, MessageLocation* location = NULL);
+ // Re-throw an exception. This involves no error reporting since
+ // error reporting was handled when the exception was thrown
+ // originally.
+ Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
+ void ScheduleThrow(Object* exception);
+ void ReportPendingMessages();
+ Failure* ThrowIllegalOperation();
+
+ // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
+ Failure* PromoteScheduledException();
+ void DoThrow(MaybeObject* exception,
+ MessageLocation* location,
+ const char* message);
+ // Checks if exception should be reported and finds out if it's
+ // caught externally.
+ bool ShouldReportException(bool* can_be_caught_externally,
+ bool catchable_by_javascript);
+
+ // Attempts to compute the current source location, storing the
+ // result in the target out parameter.
+ void ComputeLocation(MessageLocation* target);
+
+ // Override command line flag.
+ void TraceException(bool flag);
+
+ // Out of resource exception helpers.
+ Failure* StackOverflow();
+ Failure* TerminateExecution();
+
+ // Administration
+ void Iterate(ObjectVisitor* v);
+ void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
+ char* Iterate(ObjectVisitor* v, char* t);
+ void IterateThread(ThreadVisitor* v);
+ void IterateThread(ThreadVisitor* v, char* t);
+
+
+ // Returns the current global context.
+ Handle<Context> global_context();
+
+ // Returns the global context of the calling JavaScript code. That
+ // is, the global context of the top-most JavaScript frame.
+ Handle<Context> GetCallingGlobalContext();
+
+ void RegisterTryCatchHandler(v8::TryCatch* that);
+ void UnregisterTryCatchHandler(v8::TryCatch* that);
+
+ char* ArchiveThread(char* to);
+ char* RestoreThread(char* from);
+
+ static const char* const kStackOverflowMessage;
+
+ static const int kUC16AlphabetSize = 256; // See StringSearchBase.
+ static const int kBMMaxShift = 250; // See StringSearchBase.
+
+ // Accessors.
+#define GLOBAL_ACCESSOR(type, name, initialvalue) \
+ inline type name() const { \
+ ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ return name##_; \
+ } \
+ inline void set_##name(type value) { \
+ ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ name##_ = value; \
+ }
+ ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
+#undef GLOBAL_ACCESSOR
+
+#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
+ inline type* name() { \
+ ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ return &(name##_)[0]; \
+ }
+ ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
+#undef GLOBAL_ARRAY_ACCESSOR
+
+#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> name() { \
+ return Handle<type>(context()->global_context()->name()); \
+ }
+ GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
+#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
+
+ Bootstrapper* bootstrapper() { return bootstrapper_; }
+ Counters* counters() { return counters_; }
+ // TODO(isolates): Having CPU features per isolate is probably too
+ // flexible. We only really need to have the set of currently
+ // enabled features for asserts in DEBUG builds.
+ CpuFeatures* cpu_features() { return cpu_features_; }
+ CodeRange* code_range() { return code_range_; }
+ RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
+ CompilationCache* compilation_cache() { return compilation_cache_; }
+ Logger* logger() { return logger_; }
+ StackGuard* stack_guard() { return &stack_guard_; }
+ Heap* heap() { return &heap_; }
+ StatsTable* stats_table() { return stats_table_; }
+ StubCache* stub_cache() { return stub_cache_; }
+ DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
+ ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
+
+ TranscendentalCache* transcendental_cache() const {
+ return transcendental_cache_;
+ }
+
+ MemoryAllocator* memory_allocator() {
+ return memory_allocator_;
+ }
+
+ KeyedLookupCache* keyed_lookup_cache() {
+ return keyed_lookup_cache_;
+ }
+
+ ContextSlotCache* context_slot_cache() {
+ return context_slot_cache_;
+ }
+
+ DescriptorLookupCache* descriptor_lookup_cache() {
+ return descriptor_lookup_cache_;
+ }
+
+ v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
+ return &handle_scope_data_;
+ }
+ HandleScopeImplementer* handle_scope_implementer() {
+ ASSERT(handle_scope_implementer_);
+ return handle_scope_implementer_;
+ }
+ Zone* zone() { return &zone_; }
+
+ ScannerConstants* scanner_constants() {
+ return scanner_constants_;
+ }
+
+ PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
+
+ StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
+
+ GlobalHandles* global_handles() { return global_handles_; }
+
+ ThreadManager* thread_manager() { return thread_manager_; }
+
+ ContextSwitcher* context_switcher() { return context_switcher_; }
+
+ void set_context_switcher(ContextSwitcher* switcher) {
+ context_switcher_ = switcher;
+ }
+
+ StringTracker* string_tracker() { return string_tracker_; }
+
+ unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
+ return &jsregexp_uncanonicalize_;
+ }
+
+ unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
+ return &jsregexp_canonrange_;
+ }
+
+ StringInputBuffer* objects_string_compare_buffer_a() {
+ return &objects_string_compare_buffer_a_;
+ }
+
+ StringInputBuffer* objects_string_compare_buffer_b() {
+ return &objects_string_compare_buffer_b_;
+ }
+
+ StaticResource<StringInputBuffer>* objects_string_input_buffer() {
+ return &objects_string_input_buffer_;
+ }
+
+ AstSentinels* ast_sentinels() { return ast_sentinels_; }
+
+ RuntimeState* runtime_state() { return &runtime_state_; }
+
+ StringInputBuffer* liveedit_compare_substrings_buf1() {
+ return &liveedit_compare_substrings_buf1_;
+ }
+
+ StringInputBuffer* liveedit_compare_substrings_buf2() {
+ return &liveedit_compare_substrings_buf2_;
+ }
+
+ StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
+ return &compiler_safe_string_input_buffer_;
+ }
+
+ Builtins* builtins() { return &builtins_; }
+
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>*
+ regexp_macro_assembler_canonicalize() {
+ return ®exp_macro_assembler_canonicalize_;
+ }
+
+ RegExpStack* regexp_stack() { return regexp_stack_; }
+
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>*
+ interp_canonicalize_mapping() {
+ return &interp_canonicalize_mapping_;
+ }
+
+ ZoneObjectList* frame_element_constant_list() {
+ return &frame_element_constant_list_;
+ }
+
+ ZoneObjectList* result_constant_list() {
+ return &result_constant_list_;
+ }
+
+ void* PreallocatedStorageNew(size_t size);
+ void PreallocatedStorageDelete(void* p);
+ void PreallocatedStorageInit(size_t size);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debugger* debugger() { return debugger_; }
+ Debug* debug() { return debug_; }
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ ProducerHeapProfile* producer_heap_profile() {
+ return producer_heap_profile_;
+ }
+#endif
+
+#ifdef DEBUG
+ HistogramInfo* heap_histograms() { return heap_histograms_; }
+
+ JSObject::SpillInformation* js_spill_information() {
+ return &js_spill_information_;
+ }
+
+ int* code_kind_statistics() { return code_kind_statistics_; }
+#endif
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
+ bool simulator_initialized() { return simulator_initialized_; }
+ void set_simulator_initialized(bool initialized) {
+ simulator_initialized_ = initialized;
+ }
+
+ HashMap* simulator_i_cache() { return simulator_i_cache_; }
+ void set_simulator_i_cache(HashMap* hash_map) {
+ simulator_i_cache_ = hash_map;
+ }
+
+ Redirection* simulator_redirection() {
+ return simulator_redirection_;
+ }
+ void set_simulator_redirection(Redirection* redirection) {
+ simulator_redirection_ = redirection;
+ }
+#endif
+
+ Factory* factory() { return reinterpret_cast<Factory*>(this); }
+
+ // SerializerDeserializer state.
+ static const int kPartialSnapshotCacheCapacity = 1400;
+
+ static const int kJSRegexpStaticOffsetsVectorSize = 50;
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Address external_callback() {
+ return thread_local_top_.external_callback_;
+ }
+ void set_external_callback(Address callback) {
+ thread_local_top_.external_callback_ = callback;
+ }
+#endif
+
+#ifdef ENABLE_VMSTATE_TRACKING
+ StateTag current_vm_state() {
+ return thread_local_top_.current_vm_state_;
+ }
+
+ void SetCurrentVMState(StateTag state) {
+ if (RuntimeProfiler::IsEnabled()) {
+ if (state == JS) {
+ // JS or non-JS -> JS transition.
+ RuntimeProfiler::IsolateEnteredJS(this);
+ } else if (thread_local_top_.current_vm_state_ == JS) {
+ // JS -> non-JS transition.
+ ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
+ RuntimeProfiler::IsolateExitedJS(this);
+ }
+ }
+ thread_local_top_.current_vm_state_ = state;
+ }
+#endif
+
+ void ResetEagerOptimizingData();
+
+ private:
+ Isolate();
+
+ // The per-process lock should be acquired before the ThreadDataTable is
+ // modified.
+ class ThreadDataTable {
+ public:
+ ThreadDataTable();
+ ~ThreadDataTable();
+
+ PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
+ void Insert(PerIsolateThreadData* data);
+ void Remove(Isolate* isolate, ThreadId thread_id);
+ void Remove(PerIsolateThreadData* data);
+
+ private:
+ PerIsolateThreadData* list_;
+ };
+
+ // These items form a stack synchronously with threads Enter'ing and Exit'ing
+ // the Isolate. The top of the stack points to a thread which is currently
+ // running the Isolate. When the stack is empty, the Isolate is considered
+ // not entered by any thread and can be Disposed.
+ // If the same thread enters the Isolate more then once, the entry_count_
+ // is incremented rather then a new item pushed to the stack.
+ class EntryStackItem {
+ public:
+ EntryStackItem(PerIsolateThreadData* previous_thread_data,
+ Isolate* previous_isolate,
+ EntryStackItem* previous_item)
+ : entry_count(1),
+ previous_thread_data(previous_thread_data),
+ previous_isolate(previous_isolate),
+ previous_item(previous_item) { }
+
+ int entry_count;
+ PerIsolateThreadData* previous_thread_data;
+ Isolate* previous_isolate;
+ EntryStackItem* previous_item;
+
+ DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
+ };
+
+ // This mutex protects highest_thread_id_, thread_data_table_ and
+ // default_isolate_.
+ static Mutex* process_wide_mutex_;
+
+ static Thread::LocalStorageKey per_isolate_thread_data_key_;
+ static Thread::LocalStorageKey isolate_key_;
+ static Thread::LocalStorageKey thread_id_key_;
+ static Isolate* default_isolate_;
+ static ThreadDataTable* thread_data_table_;
+ static ThreadId highest_thread_id_;
+
+ bool PreInit();
+
+ void Deinit();
+
+ static void SetIsolateThreadLocals(Isolate* isolate,
+ PerIsolateThreadData* data);
+
+ enum State {
+ UNINITIALIZED, // Some components may not have been allocated.
+ PREINITIALIZED, // Components have been allocated but not initialized.
+ INITIALIZED // All components are fully initialized.
+ };
+
+ State state_;
+ EntryStackItem* entry_stack_;
+
+ // Allocate and insert PerIsolateThreadData into the ThreadDataTable
+ // (regardless of whether such data already exists).
+ PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
+
+ // Find the PerThread for this particular (isolate, thread) combination.
+ // If one does not yet exist, allocate a new one.
+ PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
+
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ static Isolate* GetDefaultIsolateForLocking();
+
+ // Initializes the current thread to run this Isolate.
+ // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
+ // at the same time, this should be prevented using external locking.
+ void Enter();
+
+ // Exits the current thread. The previosuly entered Isolate is restored
+ // for the thread.
+ // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
+ // at the same time, this should be prevented using external locking.
+ void Exit();
+
+ void PreallocatedMemoryThreadStart();
+ void PreallocatedMemoryThreadStop();
+ void InitializeThreadLocal();
+
+ void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
+ void MarkCompactPrologue(bool is_compacting,
+ ThreadLocalTop* archived_thread_data);
+ void MarkCompactEpilogue(bool is_compacting,
+ ThreadLocalTop* archived_thread_data);
+
+ void FillCache();
+
+ int stack_trace_nesting_level_;
+ StringStream* incomplete_message_;
+ // The preallocated memory thread singleton.
+ PreallocatedMemoryThread* preallocated_memory_thread_;
+ Address isolate_addresses_[k_isolate_address_count + 1]; // NOLINT
+ NoAllocationStringAllocator* preallocated_message_space_;
+
+ Bootstrapper* bootstrapper_;
+ RuntimeProfiler* runtime_profiler_;
+ CompilationCache* compilation_cache_;
+ Counters* counters_;
+ CpuFeatures* cpu_features_;
+ CodeRange* code_range_;
+ Mutex* break_access_;
+ Heap heap_;
+ Logger* logger_;
+ StackGuard stack_guard_;
+ StatsTable* stats_table_;
+ StubCache* stub_cache_;
+ DeoptimizerData* deoptimizer_data_;
+ ThreadLocalTop thread_local_top_;
+ bool capture_stack_trace_for_uncaught_exceptions_;
+ int stack_trace_for_uncaught_exceptions_frame_limit_;
+ StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
+ TranscendentalCache* transcendental_cache_;
+ MemoryAllocator* memory_allocator_;
+ KeyedLookupCache* keyed_lookup_cache_;
+ ContextSlotCache* context_slot_cache_;
+ DescriptorLookupCache* descriptor_lookup_cache_;
+ v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeImplementer* handle_scope_implementer_;
+ ScannerConstants* scanner_constants_;
+ Zone zone_;
+ PreallocatedStorage in_use_list_;
+ PreallocatedStorage free_list_;
+ bool preallocated_storage_preallocated_;
+ PcToCodeCache* pc_to_code_cache_;
+ StringInputBuffer* write_input_buffer_;
+ GlobalHandles* global_handles_;
+ ContextSwitcher* context_switcher_;
+ ThreadManager* thread_manager_;
+ AstSentinels* ast_sentinels_;
+ RuntimeState runtime_state_;
+ StringInputBuffer liveedit_compare_substrings_buf1_;
+ StringInputBuffer liveedit_compare_substrings_buf2_;
+ StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
+ Builtins builtins_;
+ StringTracker* string_tracker_;
+ unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
+ unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
+ StringInputBuffer objects_string_compare_buffer_a_;
+ StringInputBuffer objects_string_compare_buffer_b_;
+ StaticResource<StringInputBuffer> objects_string_input_buffer_;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>
+ regexp_macro_assembler_canonicalize_;
+ RegExpStack* regexp_stack_;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+ ZoneObjectList frame_element_constant_list_;
+ ZoneObjectList result_constant_list_;
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
+ bool simulator_initialized_;
+ HashMap* simulator_i_cache_;
+ Redirection* simulator_redirection_;
+#endif
+
+#ifdef DEBUG
+ // A static array of histogram info for each type.
+ HistogramInfo heap_histograms_[LAST_TYPE + 1];
+ JSObject::SpillInformation js_spill_information_;
+ int code_kind_statistics_[Code::NUMBER_OF_KINDS];
+#endif
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debugger* debugger_;
+ Debug* debug_;
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ ProducerHeapProfile* producer_heap_profile_;
+#endif
+
+#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
+ type name##_;
+ ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
+#undef GLOBAL_BACKING_STORE
+
+#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
+ type name##_[length];
+ ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
+#undef GLOBAL_ARRAY_BACKING_STORE
+
+#ifdef DEBUG
+ // This class is huge and has a number of fields controlled by
+ // preprocessor defines. Make sure the offsets of these fields agree
+ // between compilation units.
+#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
+ static const intptr_t name##_debug_offset_;
+ ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
+ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
+#undef ISOLATE_FIELD_OFFSET
+#endif
+
+ friend class ExecutionAccess;
+ friend class IsolateInitializer;
+ friend class v8::Isolate;
+ friend class v8::Locker;
+
+ DISALLOW_COPY_AND_ASSIGN(Isolate);
+};
+
+
+// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
+// class as a work around for a bug in the generated code found with these
+// versions of GCC. See V8 issue 122 for details.
+class SaveContext BASE_EMBEDDED {
+ public:
+ explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+ if (isolate->context() != NULL) {
+ context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+ dummy_ = Handle<Context>(isolate->context());
+#endif
+ }
+ isolate->set_save_context(this);
+
+ // If there is no JS frame under the current C frame, use the value 0.
+ JavaScriptFrameIterator it;
+ js_sp_ = it.done() ? 0 : it.frame()->sp();
+ }
+
+ ~SaveContext() {
+ if (context_.is_null()) {
+ Isolate* isolate = Isolate::Current();
+ isolate->set_context(NULL);
+ isolate->set_save_context(prev_);
+ } else {
+ Isolate* isolate = context_->GetIsolate();
+ isolate->set_context(*context_);
+ isolate->set_save_context(prev_);
+ }
+ }
+
+ Handle<Context> context() { return context_; }
+ SaveContext* prev() { return prev_; }
+
+ // Returns true if this save context is below a given JavaScript frame.
+ bool below(JavaScriptFrame* frame) {
+ return (js_sp_ == 0) || (frame->sp() < js_sp_);
+ }
+
+ private:
+ Handle<Context> context_;
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+ Handle<Context> dummy_;
+#endif
+ SaveContext* prev_;
+ Address js_sp_; // The top JS frame's sp when saving context.
+};
+
+
+class AssertNoContextChange BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ AssertNoContextChange() :
+ scope_(Isolate::Current()),
+ context_(Isolate::Current()->context(), Isolate::Current()) {
+ }
+
+ ~AssertNoContextChange() {
+ ASSERT(Isolate::Current()->context() == *context_);
+ }
+
+ private:
+ HandleScope scope_;
+ Handle<Context> context_;
+#else
+ public:
+ AssertNoContextChange() { }
+#endif
+};
+
+
+class ExecutionAccess BASE_EMBEDDED {
+ public:
+ explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
+ Lock(isolate);
+ }
+ ~ExecutionAccess() { Unlock(isolate_); }
+
+ static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+
+ static bool TryLock(Isolate* isolate) {
+ return isolate->break_access_->TryLock();
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
+
+// Support for checking for stack-overflows in C++ code.
+class StackLimitCheck BASE_EMBEDDED {
+ public:
+ explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
+
+ bool HasOverflowed() const {
+ StackGuard* stack_guard = isolate_->stack_guard();
+ // Stack has overflowed in C++ code only if stack pointer exceeds the C++
+ // stack guard and the limits are not set to interrupt values.
+ // TODO(214): Stack overflows are ignored if a interrupt is pending. This
+ // code should probably always use the initial C++ limit.
+ return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
+ stack_guard->IsStackOverflow();
+ }
+ private:
+ Isolate* isolate_;
+};
+
+
+// Support for temporarily postponing interrupts. When the outermost
+// postpone scope is left the interrupts will be re-enabled and any
+// interrupts that occurred while in the scope will be taken into
+// account.
+class PostponeInterruptsScope BASE_EMBEDDED {
+ public:
+ explicit PostponeInterruptsScope(Isolate* isolate)
+ : stack_guard_(isolate->stack_guard()) {
+ stack_guard_->thread_local_.postpone_interrupts_nesting_++;
+ stack_guard_->DisableInterrupts();
+ }
+
+ ~PostponeInterruptsScope() {
+ if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
+ stack_guard_->EnableInterrupts();
+ }
+ }
+ private:
+ StackGuard* stack_guard_;
+};
+
+
+// Temporary macros for accessing current isolate and its subobjects.
+// They provide better readability, especially when used a lot in the code.
+#define HEAP (v8::internal::Isolate::Current()->heap())
+#define FACTORY (v8::internal::Isolate::Current()->factory())
+#define ISOLATE (v8::internal::Isolate::Current())
+#define ZONE (v8::internal::Isolate::Current()->zone())
+#define LOGGER (v8::internal::Isolate::Current()->logger())
+
+
+// Tells whether the global context is marked with out of memory.
+inline bool Context::has_out_of_memory() {
+ return global_context()->out_of_memory()->IsTrue();
+}
+
+
+// Mark the global context with out of memory.
+inline void Context::mark_out_of_memory() {
+ global_context()->set_out_of_memory(HEAP->true_value());
+}
+
+
+// Temporary macro to be used to flag definitions that are indeed static
+// and not per-isolate. (It would be great to be able to grep for [static]!)
+#define RLYSTC static
+
+
+// Temporary macro to be used to flag classes that should be static.
+#define STATIC_CLASS class
+
+
+// Temporary macro to be used to flag classes that are completely converted
+// to be isolate-friendly. Their mix of static/nonstatic methods/fields is
+// correct.
+#define ISOLATED_CLASS class
+
+} } // namespace v8::internal
+
+// TODO(isolates): Get rid of these -inl.h includes and place them only where
+// they're needed.
+#include "allocation-inl.h"
+#include "zone-inl.h"
+#include "frames-inl.h"
+
+#endif // V8_ISOLATE_H_
#include "platform.h"
#include "string-search.h"
#include "runtime.h"
-#include "top.h"
#include "compilation-cache.h"
#include "string-stream.h"
#include "parser.h"
namespace v8 {
namespace internal {
-
Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
Handle<String> pattern,
Handle<String> flags,
Handle<String> pattern,
Handle<String> error_text,
const char* message) {
- Handle<FixedArray> elements = Factory::NewFixedArray(2);
+ Isolate* isolate = re->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
elements->set(0, *pattern);
elements->set(1, *error_text);
- Handle<JSArray> array = Factory::NewJSArrayWithElements(elements);
- Handle<Object> regexp_err = Factory::NewSyntaxError(message, array);
- Top::Throw(*regexp_err);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
+ isolate->Throw(*regexp_err);
}
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
Handle<String> flag_str) {
+ Isolate* isolate = re->GetIsolate();
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
- Handle<FixedArray> cached = CompilationCache::LookupRegExp(pattern, flags);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
bool in_cache = !cached.is_null();
- LOG(RegExpCompileEvent(re, in_cache));
+ LOG(isolate, RegExpCompileEvent(re, in_cache));
Handle<Object> result;
if (in_cache) {
}
pattern = FlattenGetString(pattern);
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
- FlatStringReader reader(pattern);
+ FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&parse_result)) {
// Throw an exception if we fail to parse the pattern.
parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
+ Handle<String> atom_string =
+ isolate->factory()->NewStringFromTwoByte(atom_pattern);
AtomCompile(re, pattern, flags, atom_string);
} else {
IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
// Compilation succeeded so the data is set on the regexp
// and we can store it in the cache.
Handle<FixedArray> data(FixedArray::cast(re->data()));
- CompilationCache::PutRegExp(pattern, flags, data);
+ compilation_cache->PutRegExp(pattern, flags, data);
return re;
}
case JSRegExp::IRREGEXP: {
Handle<Object> result =
IrregexpExec(regexp, subject, index, last_match_info);
- ASSERT(!result.is_null() || Top::has_pending_exception());
+ ASSERT(!result.is_null() || Isolate::Current()->has_pending_exception());
return result;
}
default:
Handle<String> pattern,
JSRegExp::Flags flags,
Handle<String> match_pattern) {
- Factory::SetRegExpAtomData(re,
- JSRegExp::ATOM,
- pattern,
- flags,
- match_pattern);
+ re->GetIsolate()->factory()->SetRegExpAtomData(re,
+ JSRegExp::ATOM,
+ pattern,
+ flags,
+ match_pattern);
}
Handle<String> subject,
int index,
Handle<JSArray> last_match_info) {
+ Isolate* isolate = re->GetIsolate();
+
ASSERT(0 <= index);
ASSERT(index <= subject->length());
int needle_len = needle->length();
if (needle_len != 0) {
- if (index + needle_len > subject->length()) return Factory::null_value();
+ if (index + needle_len > subject->length())
+ return isolate->factory()->null_value();
+
// dispatch on type of strings
index = (needle->IsAsciiRepresentation()
? (seq_sub->IsAsciiRepresentation()
- ? SearchString(seq_sub->ToAsciiVector(),
+ ? SearchString(isolate,
+ seq_sub->ToAsciiVector(),
needle->ToAsciiVector(),
index)
- : SearchString(seq_sub->ToUC16Vector(),
+ : SearchString(isolate,
+ seq_sub->ToUC16Vector(),
needle->ToAsciiVector(),
index))
: (seq_sub->IsAsciiRepresentation()
- ? SearchString(seq_sub->ToAsciiVector(),
+ ? SearchString(isolate,
+ seq_sub->ToAsciiVector(),
needle->ToUC16Vector(),
index)
- : SearchString(seq_sub->ToUC16Vector(),
+ : SearchString(isolate,
+ seq_sub->ToUC16Vector(),
needle->ToUC16Vector(),
index)));
- if (index == -1) return Factory::null_value();
+ if (index == -1) return FACTORY->null_value();
}
ASSERT(last_match_info->HasFastElements());
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
// Compile the RegExp.
+ Isolate* isolate = re->GetIsolate();
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
if (entry->IsJSObject()) {
// If it's a JSObject, a previous compilation failed and threw this object.
// Re-throw the object without trying again.
- Top::Throw(entry);
+ isolate->Throw(entry);
return false;
}
ASSERT(entry->IsTheHole());
}
RegExpCompileData compile_data;
- FlatStringReader reader(pattern);
+ FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&compile_data)) {
// Throw an exception if we fail to parse the pattern.
is_ascii);
if (result.error_message != NULL) {
// Unable to compile regexp.
- Handle<FixedArray> elements = Factory::NewFixedArray(2);
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
elements->set(0, *pattern);
Handle<String> error_message =
- Factory::NewStringFromUtf8(CStrVector(result.error_message));
+ factory->NewStringFromUtf8(CStrVector(result.error_message));
elements->set(1, *error_message);
- Handle<JSArray> array = Factory::NewJSArrayWithElements(elements);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
Handle<Object> regexp_err =
- Factory::NewSyntaxError("malformed_regexp", array);
- Top::Throw(*regexp_err);
+ factory->NewSyntaxError("malformed_regexp", array);
+ isolate->Throw(*regexp_err);
re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
return false;
}
JSRegExp::Flags flags,
int capture_count) {
// Initialize compiled code entries to null.
- Factory::SetRegExpIrregexpData(re,
- JSRegExp::IRREGEXP,
- pattern,
- flags,
- capture_count);
+ re->GetIsolate()->factory()->SetRegExpIrregexpData(re,
+ JSRegExp::IRREGEXP,
+ pattern,
+ flags,
+ capture_count);
}
Handle<String> subject,
int index,
Vector<int> output) {
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
+ Isolate* isolate = regexp->GetIsolate();
+
+ Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
ASSERT(index >= 0);
ASSERT(index <= subject->length());
// A flat ASCII string might have a two-byte first part.
if (subject->IsConsString()) {
- subject = Handle<String>(ConsString::cast(*subject)->first());
+ subject = Handle<String>(ConsString::cast(*subject)->first(), isolate);
}
#ifndef V8_INTERPRETED_REGEXP
- ASSERT(output.length() >=
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
bool is_ascii = subject->IsAsciiRepresentation();
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii));
+ Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,
subject,
output.start(),
output.length(),
- index);
+ index,
+ isolate);
if (res != NativeRegExpMacroAssembler::RETRY) {
ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
- Top::has_pending_exception());
+ isolate->has_pending_exception());
STATIC_ASSERT(
static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
STATIC_ASSERT(
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
register_vector[i] = -1;
}
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii));
+ Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
if (IrregexpInterpreter::Match(byte_codes,
subject,
int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
- ASSERT(Top::has_pending_exception());
+ ASSERT(Isolate::Current()->has_pending_exception());
return Handle<Object>::null();
}
return last_match_info;
}
if (res == RE_EXCEPTION) {
- ASSERT(Top::has_pending_exception());
+ ASSERT(Isolate::Current()->has_pending_exception());
return Handle<Object>::null();
}
ASSERT(res == RE_FAILURE);
- return Factory::null_value();
+ return Isolate::Current()->factory()->null_value();
}
}
-static unibrow::Mapping<unibrow::Ecma262UnCanonicalize> uncanonicalize;
-static unibrow::Mapping<unibrow::CanonicalizationRange> canonrange;
-
-
// Returns the number of characters in the equivalence class, omitting those
// that cannot occur in the source string because it is ASCII.
-static int GetCaseIndependentLetters(uc16 character,
+static int GetCaseIndependentLetters(Isolate* isolate,
+ uc16 character,
bool ascii_subject,
unibrow::uchar* letters) {
- int length = uncanonicalize.get(character, '\0', letters);
+ int length =
+ isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
// Unibrow returns 0 or 1 for characters where case independence is
// trivial.
if (length == 0) {
}
-static inline bool EmitSimpleCharacter(RegExpCompiler* compiler,
+static inline bool EmitSimpleCharacter(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
// Only emits non-letters (things that don't have case). Only used for case
// independent matches.
-static inline bool EmitAtomNonLetter(RegExpCompiler* compiler,
+static inline bool EmitAtomNonLetter(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
bool ascii = compiler->ascii();
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(c, ascii, chars);
+ int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
if (length < 1) {
// This can't match. Must be an ASCII subject and a non-ASCII character.
// We do not need to do anything since the ASCII pass already handled this.
}
-typedef bool EmitCharacterFunction(RegExpCompiler* compiler,
+typedef bool EmitCharacterFunction(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
// Only emits letters (things that have case). Only used for case independent
// matches.
-static inline bool EmitAtomLetter(RegExpCompiler* compiler,
+static inline bool EmitAtomLetter(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
bool ascii = compiler->ascii();
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(c, ascii, chars);
+ int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
if (length <= 1) return false;
// We may not need to check against the end of the input string
// if this character lies before a character that matched.
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
+ Isolate* isolate = Isolate::Current();
ASSERT(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
}
if (compiler->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(c, compiler->ascii(), chars);
+ int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(),
+ chars);
ASSERT(length != 0); // Can only happen if c > char_mask (see above).
if (length == 1) {
// This letter has no case equivalents, so it's nice and simple
Trace* trace,
bool first_element_checked,
int* checked_up_to) {
+ Isolate* isolate = Isolate::Current();
RegExpMacroAssembler* assembler = compiler->macro_assembler();
bool ascii = compiler->ascii();
Label* backtrack = trace->backtrack();
break;
}
if (emit_function != NULL) {
- bool bound_checked = emit_function(compiler,
+ bool bound_checked = emit_function(isolate,
+ compiler,
quarks[j],
backtrack,
cp_offset + j,
}
-static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
+static void AddUncanonicals(Isolate* isolate,
+ ZoneList<CharacterRange>* ranges,
int bottom,
int top);
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
bool is_ascii) {
+ Isolate* isolate = Isolate::Current();
uc16 bottom = from();
uc16 top = to();
if (is_ascii) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
// If this is a singleton we just expand the one character.
- int length = uncanonicalize.get(bottom, '\0', chars);
+ int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
if (chr != bottom) {
unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int pos = bottom;
while (pos < top) {
- int length = canonrange.get(pos, '\0', range);
+ int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
uc16 block_end;
if (length == 0) {
block_end = pos;
block_end = range[0];
}
int end = (block_end > top) ? top : block_end;
- length = uncanonicalize.get(block_end, '\0', range);
+ length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0', range);
for (int i = 0; i < length; i++) {
uc32 c = range[i];
uc16 range_from = c - (block_end - pos);
}
-static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
+static void AddUncanonicals(Isolate* isolate,
+ ZoneList<CharacterRange>* ranges,
int bottom,
int top) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
// case mappings.
for (int i = 0; i < boundary_count; i++) {
if (bottom < boundaries[i] && top >= boundaries[i]) {
- AddUncanonicals(ranges, bottom, boundaries[i] - 1);
- AddUncanonicals(ranges, boundaries[i], top);
+ AddUncanonicals(isolate, ranges, bottom, boundaries[i] - 1);
+ AddUncanonicals(isolate, ranges, boundaries[i], top);
return;
}
}
#ifdef DEBUG
for (int j = bottom; j <= top; j++) {
unsigned current_char = j;
- int length = uncanonicalize.get(current_char, '\0', chars);
+ int length = isolate->jsregexp_uncanonicalize()->get(current_char,
+ '\0', chars);
for (int k = 0; k < length; k++) {
ASSERT(chars[k] == current_char);
}
// Step through the range finding equivalent characters.
ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
for (int i = bottom; i <= top; i++) {
- int length = uncanonicalize.get(i, '\0', chars);
+ int length = isolate->jsregexp_uncanonicalize()->get(i, '\0', chars);
for (int j = 0; j < length; j++) {
uc32 chr = chars[j];
if (chr != i && (chr < bottom || chr > top)) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check;
+ StackLimitCheck check(Isolate::Current());
if (check.HasOverflowed()) {
fail("Stack overflow");
return;
}
-int OffsetsVector::static_offsets_vector_[
- OffsetsVector::kStaticOffsetsVectorSize];
-
}} // namespace v8::internal
struct CompilationResult {
explicit CompilationResult(const char* error_message)
: error_message(error_message),
- code(Heap::the_hole_value()),
+ code(HEAP->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
: error_message(NULL),
public:
inline OffsetsVector(int num_registers)
: offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
vector_ = NewArray<int>(offsets_vector_length_);
} else {
- vector_ = static_offsets_vector_;
+ vector_ = Isolate::Current()->jsregexp_static_offsets_vector();
}
}
inline ~OffsetsVector() {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
DeleteArray(vector_);
vector_ = NULL;
}
static const int kStaticOffsetsVectorSize = 50;
private:
- static Address static_offsets_vector_address() {
- return reinterpret_cast<Address>(&static_offsets_vector_);
+ static Address static_offsets_vector_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector());
}
int* vector_;
int offsets_vector_length_;
- static int static_offsets_vector_[kStaticOffsetsVectorSize];
friend class ExternalReference;
};
namespace internal {
-bool JumpTarget::compiling_deferred_code_ = false;
-
-
void JumpTarget::Jump(Result* arg) {
ASSERT(cgen()->has_valid_frame());
// the directionality of the block. Compute: an entry frame for the
// block.
- Counters::compute_entry_frame.Increment();
+ COUNTERS->compute_entry_frame()->Increment();
#ifdef DEBUG
- if (compiling_deferred_code_) {
+ if (Isolate::Current()->jump_target_compiling_deferred_code()) {
ASSERT(reaching_frames_.length() > 1);
VirtualFrame* frame = reaching_frames_[0];
bool all_identical = true;
DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
+ : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
statement_position_(masm_->positions_recorder()->
current_statement_position()),
position_(masm_->positions_recorder()->current_position()),
- frame_state_(CodeGeneratorScope::Current()->frame()) {
+ frame_state_(CodeGeneratorScope::Current(Isolate::Current())->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
- CodeGeneratorScope::Current()->AddDeferred(this);
+ CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
// after the call is the same as the frame before the call.
void Call();
- static void set_compiling_deferred_code(bool flag) {
- compiling_deferred_code_ = flag;
- }
-
protected:
// Directionality flag set at initialization time.
Directionality direction_;
void DoBind();
private:
- static bool compiling_deferred_code_;
-
// Add a virtual frame reaching this labeled block via a forward jump,
// and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame);
namespace internal {
CodeGenerator* JumpTarget::cgen() {
- return CodeGeneratorScope::Current();
+ return CodeGeneratorScope::Current(Isolate::Current());
}
} } // namespace v8::internal
DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
+ : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
statement_position_(masm_->positions_recorder()->
current_statement_position()),
position_(masm_->positions_recorder()->current_position()),
- frame_state_(*CodeGeneratorScope::Current()->frame()) {
+ frame_state_(*CodeGeneratorScope::Current(Isolate::Current())->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
- CodeGeneratorScope::Current()->AddDeferred(this);
+ CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
#include "lithium-allocator-inl.h"
#include "hydrogen.h"
namespace internal {
-#define DEFINE_OPERAND_CACHE(name, type) \
- name name::cache[name::kNumCachedOperands]; \
- void name::SetupCache() { \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- }
+#define DEFINE_OPERAND_CACHE(name, type) \
+ name name::cache[name::kNumCachedOperands]; \
+ void name::SetupCache() { \
+ for (int i = 0; i < kNumCachedOperands; i++) { \
+ cache[i].ConvertTo(type, i); \
+ } \
+ } \
+ static bool name##_initialize() { \
+ name::SetupCache(); \
+ return true; \
+ } \
+ static bool name##_cache_initialized = name##_initialize();
DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
}
-void LAllocator::Setup() {
- LConstantOperand::SetupCache();
- LStackSlot::SetupCache();
- LDoubleStackSlot::SetupCache();
- LRegister::SetupCache();
- LDoubleRegister::SetupCache();
-}
-
-
const char* LAllocator::RegisterName(int allocation_index) {
ASSERT(mode_ != NONE);
if (mode_ == GENERAL_REGISTERS) {
public:
LAllocator(int first_virtual_register, HGraph* graph);
- static void Setup();
static void TraceAlloc(const char* msg, ...);
// Lithium translation support.
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
#include "lithium.h"
namespace v8 {
}
-static bool CompareSubstrings(Handle<String> s1, int pos1,
+static bool CompareSubstrings(Isolate* isolate, Handle<String> s1, int pos1,
Handle<String> s2, int pos2, int len) {
- static StringInputBuffer buf1;
- static StringInputBuffer buf2;
+ StringInputBuffer& buf1 = *isolate->liveedit_compare_substrings_buf1();
+ StringInputBuffer& buf2 = *isolate->liveedit_compare_substrings_buf2();
buf1.Reset(*s1);
buf1.Seek(pos1);
buf2.Reset(*s2);
class CompareOutputArrayWriter {
public:
CompareOutputArrayWriter()
- : array_(Factory::NewJSArray(10)), current_size_(0) {}
+ : array_(FACTORY->NewJSArray(10)), current_size_(0) {}
Handle<JSArray> GetResult() {
return array_;
// Represents 2 strings as 2 arrays of lines.
class LineArrayCompareInput : public Comparator::Input {
public:
- LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
+ LineArrayCompareInput(Isolate* isolate, Handle<String> s1, Handle<String> s2,
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
- : s1_(s1), s2_(s2), line_ends1_(line_ends1), line_ends2_(line_ends2) {
+ : isolate_(isolate), s1_(s1), s2_(s2), line_ends1_(line_ends1),
+ line_ends2_(line_ends2) {
}
int getLength1() {
return line_ends1_.length();
if (len1 != len2) {
return false;
}
- return CompareSubstrings(s1_, line_start1, s2_, line_start2, len1);
+ return CompareSubstrings(isolate_, s1_, line_start1, s2_, line_start2,
+ len1);
}
private:
+ Isolate* isolate_;
Handle<String> s1_;
Handle<String> s2_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends1(s1);
LineEndsWrapper line_ends2(s2);
- LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
+ LineArrayCompareInput
+ input(Isolate::Current(), s1, s2, line_ends1, line_ends2);
TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
Comparator::CalculateDifference(&input, &output);
}
-static void CompileScriptForTracker(Handle<Script> script) {
+static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
// TODO(635): support extensions.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
// Build AST.
CompilationInfo info(script);
info.MarkAsGlobal();
if (ParserApi::Parse(&info)) {
// Compile the code.
- LiveEditFunctionTracker tracker(info.function());
+ LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (Compiler::MakeCodeForLiveEdit(&info)) {
ASSERT(!info.code().is_null());
tracker.RecordRootFunctionInfo(info.code());
} else {
- Top::StackOverflow();
+ info.isolate()->StackOverflow();
}
}
}
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
static Handle<JSValue> WrapInJSValue(Object* object) {
- Handle<JSFunction> constructor = Top::opaque_reference_function();
+ Handle<JSFunction> constructor =
+ Isolate::Current()->opaque_reference_function();
Handle<JSValue> result =
- Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+ Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
result->set_value(object);
return result;
}
class JSArrayBasedStruct {
public:
static S Create() {
- Handle<JSArray> array = Factory::NewJSArray(S::kSize_);
+ Handle<JSArray> array = FACTORY->NewJSArray(S::kSize_);
return S(array);
}
static S cast(Object* object) {
FunctionInfoListener() {
current_parent_index_ = -1;
len_ = 0;
- result_ = Factory::NewJSArray(10);
+ result_ = FACTORY->NewJSArray(10);
}
void FunctionStarted(FunctionLiteral* fun) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
+ info.SetFunctionCode(function_code, Handle<Object>(HEAP->null_value()));
}
// Saves full information about a function: its code, its scope info
Object* SerializeFunctionScope(Scope* scope) {
HandleScope handle_scope;
- Handle<JSArray> scope_info_list = Factory::NewJSArray(10);
+ Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
int scope_info_length = 0;
// Saves some description of scope. It stores name and indexes of
// scopes of this chain.
Scope* outer_scope = scope->outer_scope();
if (outer_scope == NULL) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
do {
ZoneList<Variable*> list(10);
}
SetElementNonStrict(scope_info_list,
scope_info_length,
- Handle<Object>(Heap::null_value()));
+ Handle<Object>(HEAP->null_value()));
scope_info_length++;
outer_scope = outer_scope->outer_scope();
};
-static FunctionInfoListener* active_function_info_listener = NULL;
-
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
+ Isolate* isolate = Isolate::Current();
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
FunctionInfoListener listener;
Handle<Object> original_source = Handle<Object>(script->source());
script->set_source(*source);
- active_function_info_listener = &listener;
- CompileScriptForTracker(script);
- active_function_info_listener = NULL;
+ isolate->set_active_function_info_listener(&listener);
+ CompileScriptForTracker(isolate, script);
+ isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
return *(listener.GetResult());
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
- ASSERT(!Heap::InNewSpace(substitution));
+ ASSERT(!HEAP->InNewSpace(substitution));
AssertNoAllocation no_allocations_please;
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
{
- Heap::IterateStrongRoots(&visitor, VISIT_ALL);
+ HEAP->IterateStrongRoots(&visitor, VISIT_ALL);
}
// Now iterate over all pointers of all objects, including code_target
DeoptimizationInputData* data =
DeoptimizationInputData::cast(function->code()->deoptimization_data());
- if (data == Heap::empty_fixed_array()) return false;
+ if (data == HEAP->empty_fixed_array()) return false;
FixedArray* literals = data->LiteralArray();
HandleScope scope;
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Top::ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
if (shared_info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
Handle<Code> new_original_code =
- Factory::CopyCode(compile_info_wrapper.GetFunctionCode());
+ FACTORY->CopyCode(compile_info_wrapper.GetFunctionCode());
debug_info->set_original_code(*new_original_code);
}
shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
shared_info->set_construct_stub(
- Builtins::builtin(Builtins::JSConstructStubGeneric));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric));
DeoptimizeDependentFunctions(*shared_info);
- CompilationCache::Remove(shared_info);
+ Isolate::Current()->compilation_cache()->Remove(shared_info);
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
HandleScope scope;
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Top::ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
DeoptimizeDependentFunctions(*shared_info);
- CompilationCache::Remove(shared_info);
+ Isolate::Current()->compilation_cache()->Remove(shared_info);
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
shared_info->set_script(*script_handle);
- CompilationCache::Remove(shared_info);
+ Isolate::Current()->compilation_cache()->Remove(shared_info);
}
// Relocation info section now has different size. We cannot simply
// rewrite it inside code object. Instead we have to create a new
// code object.
- Handle<Code> result(Factory::CopyCode(code, buffer));
+ Handle<Code> result(FACTORY->CopyCode(code, buffer));
return result;
}
}
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Top::ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
}
}
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
static Handle<Script> CreateScriptCopy(Handle<Script> original) {
Handle<String> original_source(String::cast(original->source()));
- Handle<Script> copy = Factory::NewScript(original_source);
+ Handle<Script> copy = FACTORY->NewScript(original_source);
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
Handle<Script> old_script = CreateScriptCopy(original_script);
old_script->set_name(String::cast(*old_script_name));
old_script_object = old_script;
- Debugger::OnAfterCompile(old_script, Debugger::SEND_WHEN_DEBUGGING);
+ Isolate::Current()->debugger()->OnAfterCompile(
+ old_script, Debugger::SEND_WHEN_DEBUGGING);
} else {
- old_script_object = Handle<Object>(Heap::null_value());
+ old_script_object = Handle<Object>(HEAP->null_value());
}
original_script->set_source(*new_source);
// Drop line ends so that they will be recalculated.
- original_script->set_line_ends(Heap::undefined_value());
+ original_script->set_line_ends(HEAP->undefined_value());
return *old_script_object;
}
static bool FixTryCatchHandler(StackFrame* top_frame,
StackFrame* bottom_frame) {
Address* pointer_address =
- &Memory::Address_at(Top::get_address_from_id(Top::k_handler_address));
+ &Memory::Address_at(Isolate::Current()->get_address_from_id(
+ Isolate::k_handler_address));
while (*pointer_address < top_frame->sp()) {
pointer_address = &Memory::Address_at(*pointer_address);
ASSERT(bottom_js_frame->is_java_script());
// Check the nature of the top frame.
- if (pre_top_frame->code()->is_inline_cache_stub() &&
- pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
+ Code* pre_top_frame_code = pre_top_frame->LookupCode(Isolate::Current());
+ if (pre_top_frame_code->is_inline_cache_stub() &&
+ pre_top_frame_code->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
- } else if (pre_top_frame->code() == Debug::debug_break_slot()) {
+ } else if (pre_top_frame_code ==
+ Isolate::Current()->debug()->debug_break_slot()) {
// OK, we can drop debug break slot.
*mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- } else if (pre_top_frame->code() ==
- Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
+ } else if (pre_top_frame_code ==
+ Isolate::Current()->builtins()->builtin(
+ Builtins::FrameDropper_LiveEdit)) {
// OK, we can drop our own code.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- } else if (pre_top_frame->code()->kind() == Code::STUB &&
- pre_top_frame->code()->major_key()) {
+ } else if (pre_top_frame_code->kind() == Code::STUB &&
+ pre_top_frame_code->major_key()) {
// Entry from our unit tests, it's fine, we support this case.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else {
// Make sure FixTryCatchHandler is idempotent.
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
- Handle<Code> code(Builtins::builtin(Builtins::FrameDropper_LiveEdit));
+ Handle<Code> code(Isolate::Current()->builtins()->builtin(
+ Builtins::FrameDropper_LiveEdit));
top_frame->set_pc(code->entry());
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
-
+ Debug* debug = Isolate::Current()->debug();
ZoneScope scope(DELETE_ON_EXIT);
Vector<StackFrame*> frames = CreateStackMap();
int frame_index = 0;
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
- if (frame->id() == Debug::break_frame_id()) {
+ if (frame->id() == debug->break_frame_id()) {
top_frame_index = frame_index;
break;
}
break;
}
}
- Debug::FramesHaveBeenDropped(new_id, drop_mode,
+ debug->FramesHaveBeenDropped(new_id, drop_mode,
restarter_frame_function_pointer);
// Replace "blocked on active" with "replaced on active" status.
Handle<JSArray> shared_info_array, bool do_drop) {
int len = Smi::cast(shared_info_array->length())->value();
- Handle<JSArray> result = Factory::NewJSArray(len);
+ Handle<JSArray> result = FACTORY->NewJSArray(len);
// Fill the default values.
for (int i = 0; i < len; i++) {
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
- ThreadManager::IterateArchivedThreads(&inactive_threads_checker);
+ Isolate::Current()->thread_manager()->IterateArchivedThreads(
+ &inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
}
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = Factory::NewStringFromAscii(vector_message);
+ Handle<String> str = FACTORY->NewStringFromAscii(vector_message);
SetElementNonStrict(result, len, str);
}
return result;
}
-LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionStarted(fun);
+LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
+ FunctionLiteral* fun)
+ : isolate_(isolate) {
+ if (isolate_->active_function_info_listener() != NULL) {
+ isolate_->active_function_info_listener()->FunctionStarted(fun);
}
}
LiveEditFunctionTracker::~LiveEditFunctionTracker() {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionDone();
+ if (isolate_->active_function_info_listener() != NULL) {
+ isolate_->active_function_info_listener()->FunctionDone();
}
}
void LiveEditFunctionTracker::RecordFunctionInfo(
Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionInfo(info, lit->scope());
+ if (isolate_->active_function_info_listener() != NULL) {
+ isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope());
}
}
void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
- active_function_info_listener->FunctionCode(code);
+ isolate_->active_function_info_listener()->FunctionCode(code);
}
-bool LiveEditFunctionTracker::IsActive() {
- return active_function_info_listener != NULL;
+bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
+ return isolate->active_function_info_listener() != NULL;
}
// This ifdef-else-endif section provides working or stub implementation of
// LiveEditFunctionTracker.
-LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
+LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
+ FunctionLiteral* fun) {
}
// also collects compiled function codes.
class LiveEditFunctionTracker {
public:
- explicit LiveEditFunctionTracker(FunctionLiteral* fun);
+ explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
~LiveEditFunctionTracker();
void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
FunctionLiteral* lit);
void RecordRootFunctionInfo(Handle<Code> code);
- static bool IsActive();
+ static bool IsActive(Isolate* isolate);
+
+ private:
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Isolate* isolate_;
+#endif
};
#ifdef ENABLE_DEBUGGER_SUPPORT
inline static void ProcessNonLive(HeapObject* obj) {}
inline static void UpdateReferencesForScavengeGC() {}
- inline static MaybeObject* Capture() { return Heap::undefined_value(); }
+ inline static MaybeObject* Capture() { return HEAP->undefined_value(); }
inline static bool Delete(int id) { return false; }
inline static MaybeObject* Dump(int id1,
int id2,
int start_idx,
int dump_limit,
Handle<JSObject> filter_obj) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static MaybeObject* Info(int start_idx, int dump_limit) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static MaybeObject* Summarize(int id1,
int id2,
Handle<JSObject> filter_obj) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static void Reset() {}
- inline static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
+ inline static Object* GetObj(int obj_id) { return HEAP->undefined_value(); }
inline static Object* GetObjId(Handle<String> address) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static MaybeObject* GetObjRetainers(int obj_id,
Handle<JSObject> instance_filter,
int start,
int count,
Handle<JSObject> filter_obj) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static Object* GetPath(int obj_id1,
int obj_id2,
Handle<JSObject> instance_filter) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
- inline static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
+ inline static Object* PrintObj(int obj_id) { return HEAP->undefined_value(); }
};
#include "v8.h"
#include "log-utils.h"
+#include "string-stream.h"
namespace v8 {
namespace internal {
return data_size;
}
-
-bool Log::is_stopped_ = false;
-Log::WritePtr Log::Write = NULL;
-FILE* Log::output_handle_ = NULL;
-FILE* Log::output_code_handle_ = NULL;
-LogDynamicBuffer* Log::output_buffer_ = NULL;
// Must be the same message as in Logger::PauseProfiler.
-const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
-Mutex* Log::mutex_ = NULL;
-char* Log::message_buffer_ = NULL;
+const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
+
+Log::Log(Logger* logger)
+ : write_to_file_(false),
+ is_stopped_(false),
+ output_handle_(NULL),
+ output_code_handle_(NULL),
+ output_buffer_(NULL),
+ mutex_(NULL),
+ message_buffer_(NULL),
+ logger_(logger) {
+}
+
+
+static void AddIsolateIdIfNeeded(StringStream* stream) {
+ Isolate* isolate = Isolate::Current();
+ if (isolate->IsDefaultIsolate()) return;
+ stream->Add("isolate-%p-", isolate);
+}
-void Log::Init() {
+void Log::Initialize() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
+
+ // --log-all enables all the log flags.
+ if (FLAG_log_all) {
+ FLAG_log_runtime = true;
+ FLAG_log_api = true;
+ FLAG_log_code = true;
+ FLAG_log_gc = true;
+ FLAG_log_suspect = true;
+ FLAG_log_handles = true;
+ FLAG_log_regexp = true;
+ }
+
+ // --prof implies --log-code.
+ if (FLAG_prof) FLAG_log_code = true;
+
+ // --prof_lazy controls --log-code, implies --noprof_auto.
+ if (FLAG_prof_lazy) {
+ FLAG_log_code = false;
+ FLAG_prof_auto = false;
+ }
+
+ bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
+ || FLAG_log_regexp || FLAG_log_state_changes;
+
+ bool open_log_file = start_logging || FLAG_prof_lazy;
+
+ // If we're logging anything, we need to open the log file.
+ if (open_log_file) {
+ if (strcmp(FLAG_logfile, "-") == 0) {
+ OpenStdout();
+ } else if (strcmp(FLAG_logfile, "*") == 0) {
+ OpenMemoryBuffer();
+ } else {
+ if (strchr(FLAG_logfile, '%') != NULL ||
+ !Isolate::Current()->IsDefaultIsolate()) {
+ // If there's a '%' in the log file name we have to expand
+ // placeholders.
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ AddIsolateIdIfNeeded(&stream);
+ for (const char* p = FLAG_logfile; *p; p++) {
+ if (*p == '%') {
+ p++;
+ switch (*p) {
+ case '\0':
+ // If there's a % at the end of the string we back up
+ // one character so we can escape the loop properly.
+ p--;
+ break;
+ case 't': {
+ // %t expands to the current time in milliseconds.
+ double time = OS::TimeCurrentMillis();
+ stream.Add("%.0f", FmtElm(time));
+ break;
+ }
+ case '%':
+ // %% expands (contracts really) to %.
+ stream.Put('%');
+ break;
+ default:
+ // All other %'s expand to themselves.
+ stream.Put('%');
+ stream.Put(*p);
+ break;
+ }
+ } else {
+ stream.Put(*p);
+ }
+ }
+ SmartPointer<const char> expanded = stream.ToCString();
+ OpenFile(*expanded);
+ } else {
+ OpenFile(FLAG_logfile);
+ }
+ }
+ }
+#endif
}
void Log::OpenStdout() {
ASSERT(!IsEnabled());
output_handle_ = stdout;
- Write = WriteToFile;
- Init();
+ write_to_file_ = true;
}
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+ write_to_file_ = true;
if (FLAG_ll_prof) {
// Open a file for logging the contents of code objects so that
// they can be disassembled later.
memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
}
- Write = WriteToFile;
- Init();
}
output_buffer_ = new LogDynamicBuffer(
kDynamicBufferBlockSize, kMaxDynamicBufferSize,
kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
- Write = WriteToMemory;
- Init();
+ write_to_file_ = false;
}
void Log::Close() {
- if (Write == WriteToFile) {
+ if (write_to_file_) {
if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
if (output_code_handle_ != NULL) fclose(output_code_handle_);
output_code_handle_ = NULL;
- } else if (Write == WriteToMemory) {
+ } else {
delete output_buffer_;
output_buffer_ = NULL;
- } else {
- ASSERT(Write == NULL);
}
- Write = NULL;
DeleteArray(message_buffer_);
message_buffer_ = NULL;
int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- if (Write != WriteToMemory) return 0;
+ if (write_to_file_) return 0;
ASSERT(output_buffer_ != NULL);
ASSERT(from_pos >= 0);
ASSERT(max_size >= 0);
}
-LogMessageBuilder::WriteFailureHandler
- LogMessageBuilder::write_failure_handler = NULL;
-
-
-LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
- ASSERT(Log::message_buffer_ != NULL);
+LogMessageBuilder::LogMessageBuilder(Logger* logger)
+ : log_(logger->log_),
+ sl(log_->mutex_),
+ pos_(0) {
+ ASSERT(log_->message_buffer_ != NULL);
}
void LogMessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(Log::message_buffer_ + pos_,
+ Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
va_list args;
va_start(args, format);
void LogMessageBuilder::AppendVA(const char* format, va_list args) {
- Vector<char> buf(Log::message_buffer_ + pos_,
+ Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
int result = v8::internal::OS::VSNPrintF(buf, format, args);
void LogMessageBuilder::Append(const char c) {
if (pos_ < Log::kMessageBufferSize) {
- Log::message_buffer_[pos_++] = c;
+ log_->message_buffer_[pos_++] = c;
}
ASSERT(pos_ <= Log::kMessageBufferSize);
}
ASSERT(len >= 0);
if (len == 0) return;
}
- Vector<char> buf(Log::message_buffer_ + pos_,
+ Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
OS::StrNCpy(buf, str, len);
pos_ += len;
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
- const int written = Log::Write(Log::message_buffer_, pos_);
- if (written != pos_ && write_failure_handler != NULL) {
- write_failure_handler();
+ const int written = log_->write_to_file_ ?
+ log_->WriteToFile(log_->message_buffer_, pos_) :
+ log_->WriteToMemory(log_->message_buffer_, pos_);
+ if (written != pos_) {
+ log_->stop();
+ log_->logger_->LogFailure();
}
}
+
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
#ifdef ENABLE_LOGGING_AND_PROFILING
+class Logger;
+
// A memory buffer that increments its size as you write in it. Size
// is incremented with 'block_size' steps, never exceeding 'max_size'.
// During growth, memory contents are never copied. At the end of the
// Functions and data for performing output of log messages.
-class Log : public AllStatic {
+class Log {
public:
- // Opens stdout for logging.
- static void OpenStdout();
- // Opens file for logging.
- static void OpenFile(const char* name);
-
- // Opens memory buffer for logging.
- static void OpenMemoryBuffer();
+ // Performs process-wide initialization.
+ void Initialize();
// Disables logging, but preserves acquired resources.
- static void stop() { is_stopped_ = true; }
+ void stop() { is_stopped_ = true; }
- // Frees all resources acquired in Open... functions.
- static void Close();
+ // Frees all resources acquired in Initialize and Open... functions.
+ void Close();
// See description in include/v8.h.
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ int GetLogLines(int from_pos, char* dest_buf, int max_size);
// Returns whether logging is enabled.
- static bool IsEnabled() {
+ bool IsEnabled() {
return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
}
static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
private:
- typedef int (*WritePtr)(const char* msg, int length);
+ explicit Log(Logger* logger);
+
+ // Opens stdout for logging.
+ void OpenStdout();
- // Initialization function called from Open... functions.
- static void Init();
+ // Opens file for logging.
+ void OpenFile(const char* name);
- // Write functions assume that mutex_ is acquired by the caller.
- static WritePtr Write;
+ // Opens memory buffer for logging.
+ void OpenMemoryBuffer();
// Implementation of writing to a log file.
- static int WriteToFile(const char* msg, int length) {
+ int WriteToFile(const char* msg, int length) {
ASSERT(output_handle_ != NULL);
size_t rv = fwrite(msg, 1, length, output_handle_);
ASSERT(static_cast<size_t>(length) == rv);
}
// Implementation of writing to a memory buffer.
- static int WriteToMemory(const char* msg, int length) {
+ int WriteToMemory(const char* msg, int length) {
ASSERT(output_buffer_ != NULL);
return output_buffer_->Write(msg, length);
}
+ bool write_to_file_;
+
// Whether logging is stopped (e.g. due to insufficient resources).
- static bool is_stopped_;
+ bool is_stopped_;
// When logging is active, either output_handle_ or output_buffer_ is used
// to store a pointer to log destination. If logging was opened via OpenStdout
// or OpenFile, then output_handle_ is used. If logging was opened
// via OpenMemoryBuffer, then output_buffer_ is used.
// mutex_ should be acquired before using output_handle_ or output_buffer_.
- static FILE* output_handle_;
+ FILE* output_handle_;
// Used when low-level profiling is active to save code object contents.
- static FILE* output_code_handle_;
+ FILE* output_code_handle_;
- static LogDynamicBuffer* output_buffer_;
+ LogDynamicBuffer* output_buffer_;
// Size of dynamic buffer block (and dynamic buffer initial size).
static const int kDynamicBufferBlockSize = 65536;
static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
// Message to "seal" dynamic buffer with.
- static const char* kDynamicBufferSeal;
+ static const char* const kDynamicBufferSeal;
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- static Mutex* mutex_;
+ Mutex* mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
- static char* message_buffer_;
+ char* message_buffer_;
+
+ Logger* logger_;
friend class Logger;
friend class LogMessageBuilder;
public:
// Create a message builder starting from position 0. This acquires the mutex
// in the log as well.
- explicit LogMessageBuilder();
+ explicit LogMessageBuilder(Logger* logger);
~LogMessageBuilder() { }
// Append string data to the log message.
// Write the log message to the log file currently opened.
void WriteToLogFile();
- // A handler that is called when Log::Write fails.
- typedef void (*WriteFailureHandler)();
-
- static void set_write_failure_handler(WriteFailureHandler handler) {
- write_failure_handler = handler;
- }
-
private:
- static WriteFailureHandler write_failure_handler;
+ Log* log_;
ScopedLock sl;
int pos_;
};
void IncrementStateCounter(StateTag state) {
- Counters::state_counters[state].Increment();
+ COUNTERS->state_counters(state)->Increment();
}
void DecrementStateCounter(StateTag state) {
- Counters::state_counters[state].Decrement();
+ COUNTERS->state_counters(state)->Decrement();
}
};
//
class Profiler: public Thread {
public:
- Profiler();
+ explicit Profiler(Isolate* isolate);
void Engage();
void Disengage();
void Run();
// Pause and Resume TickSample data collection.
- static bool paused() { return paused_; }
- static void pause() { paused_ = true; }
- static void resume() { paused_ = false; }
+ bool paused() const { return paused_; }
+ void pause() { paused_ = true; }
+ void resume() { paused_ = false; }
private:
// Returns the next index in the cyclic buffer.
bool running_;
// Tells whether we are currently recording tick samples.
- static bool paused_;
+ bool paused_;
};
-bool Profiler::paused_ = false;
-
//
// StackTracer implementation
//
-void StackTracer::Trace(TickSample* sample) {
+void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
+ ASSERT(isolate->IsInitialized());
+
sample->tos = NULL;
sample->frames_count = 0;
// Avoid collecting traces while doing GC.
if (sample->state == GC) return;
- const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
+ const Address js_entry_sp =
+ Isolate::js_entry_sp(isolate->thread_local_top());
if (js_entry_sp == 0) {
// Not executing JS now.
return;
sample->tos = Memory::Address_at(sample->sp);
int i = 0;
- const Address callback = Top::external_callback();
+ const Address callback = isolate->external_callback();
// Surprisingly, PC can point _exactly_ to callback start, with good
// probability, and this will result in reporting fake nested
// callback call.
sample->stack[i++] = callback;
}
- SafeStackTraceFrameIterator it(sample->fp, sample->sp,
+ SafeStackTraceFrameIterator it(isolate,
+ sample->fp, sample->sp,
sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
//
class Ticker: public Sampler {
public:
- explicit Ticker(int interval) :
- Sampler(interval),
+ explicit Ticker(Isolate* isolate, int interval):
+ Sampler(isolate, interval),
window_(NULL),
profiler_(NULL) {}
protected:
virtual void DoSampleStack(TickSample* sample) {
- StackTracer::Trace(sample);
+ StackTracer::Trace(isolate(), sample);
}
private:
for (int i = 0; i < kBufferSize; i++) {
buffer_[i] = static_cast<byte>(OTHER);
}
- Logger::ticker_->SetWindow(this);
+ LOGGER->ticker_->SetWindow(this);
}
SlidingStateWindow::~SlidingStateWindow() {
- Logger::ticker_->ClearWindow();
+ LOGGER->ticker_->ClearWindow();
}
//
// Profiler implementation.
//
-Profiler::Profiler()
- : Thread("v8:Profiler"),
+Profiler::Profiler(Isolate* isolate)
+ : Thread(isolate, "v8:Profiler"),
head_(0),
tail_(0),
overflow_(false),
buffer_semaphore_(OS::CreateSemaphore(0)),
engaged_(false),
- running_(false) {
+ running_(false),
+ paused_(false) {
}
Start();
// Register to get ticks.
- Logger::ticker_->SetProfiler(this);
+ LOGGER->ticker_->SetProfiler(this);
- Logger::ProfilerBeginEvent();
+ LOGGER->ProfilerBeginEvent();
}
if (!engaged_) return;
// Stop receiving ticks.
- Logger::ticker_->ClearProfiler();
+ LOGGER->ticker_->ClearProfiler();
// Terminate the worker thread by setting running_ to false,
// inserting a fake element in the queue and then wait for
Insert(&sample);
Join();
- LOG(UncheckedStringEvent("profiler", "end"));
+ LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
}
void Profiler::Run() {
TickSample sample;
bool overflow = Remove(&sample);
+ i::Isolate* isolate = ISOLATE;
while (running_) {
- LOG(TickEvent(&sample, overflow));
+ LOG(isolate, TickEvent(&sample, overflow));
overflow = Remove(&sample);
}
}
//
// Logger class implementation.
//
-Ticker* Logger::ticker_ = NULL;
-Profiler* Logger::profiler_ = NULL;
-SlidingStateWindow* Logger::sliding_state_window_ = NULL;
-int Logger::logging_nesting_ = 0;
-int Logger::cpu_profiler_nesting_ = 0;
-int Logger::heap_profiler_nesting_ = 0;
+
+Logger::Logger()
+ : ticker_(NULL),
+ profiler_(NULL),
+ sliding_state_window_(NULL),
+ log_events_(NULL),
+ logging_nesting_(0),
+ cpu_profiler_nesting_(0),
+ heap_profiler_nesting_(0),
+ log_(new Log(this)),
+ is_initialized_(false),
+ last_address_(NULL),
+ prev_sp_(NULL),
+ prev_function_(NULL),
+ prev_to_(NULL),
+ prev_code_(NULL) {
+}
+
+Logger::~Logger() {
+ delete log_;
+}
#define DECLARE_EVENT(ignore1, name) name,
-const char* kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
};
#undef DECLARE_EVENT
void Logger::ProfilerBeginEvent() {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
msg.WriteToLogFile();
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedStringEvent(const char* name, const char* value) {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_handles) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_handles) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
#endif
// caller's responsibility to ensure that log is enabled and that
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
- ASSERT(Log::IsEnabled() && FLAG_log_api);
- LogMessageBuilder msg;
+ ASSERT(log_->IsEnabled() && FLAG_log_api);
+ LogMessageBuilder msg(this);
va_list ap;
va_start(ap, format);
msg.AppendVA(format, ap);
void Logger::ApiNamedSecurityCheck(Object* key) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
if (key->IsString()) {
SmartPointer<char> str =
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
uintptr_t start,
uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
uintptr_t start,
uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
Handle<Object> source = GetProperty(regexp, "source");
if (!source->IsString()) {
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_regexp) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_regexp) return;
+ LogMessageBuilder msg(this);
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
msg.Append(in_cache ? ",hit\n" : ",miss\n");
void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_runtime) return;
+ if (!log_->IsEnabled() || !FLAG_log_runtime) return;
HandleScope scope;
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
if (c == '%' && i <= format.length() - 2) {
void Logger::ApiIndexedSecurityCheck(uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,check-security,%u\n", index);
#endif
}
Object* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(name->IsString());
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- Logger::ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+ LOGGER->ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
#endif
}
JSObject* holder,
uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- Logger::ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+ LOGGER->ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
#endif
}
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- Logger::ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+ LOGGER->ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
#endif
}
void Logger::ApiEntryCall(const char* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
- Logger::ApiEvent("api,%s\n", name);
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
+ LOGGER->ApiEvent("api,%s\n", name);
#endif
}
void Logger::NewEvent(const char* name, void* object, size_t size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
void Logger::DeleteEvent(const char* name, void* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
#endif
}
+void Logger::NewEventStatic(const char* name, void* object, size_t size) {
+ LOGGER->NewEvent(name, object, size);
+}
+
+
+void Logger::DeleteEventStatic(const char* name, void* object) {
+ LOGGER->DeleteEvent(name, object);
+}
+
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
void Logger::CallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("", *str, entry_point);
void Logger::GetterCallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("get ", *str, entry_point);
void Logger::SetterCallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("set ", *str, entry_point);
Code* code,
const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
SharedFunctionInfo* shared,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- if (code == Builtins::builtin(Builtins::LazyCompile)) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ if (code == Isolate::Current()->builtins()->builtin(
+ Builtins::LazyCompile))
+ return;
+
+ LogMessageBuilder msg(this);
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,",
SharedFunctionInfo* shared,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
SmartPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
void Logger::CodeMovingGCEvent() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
msg.WriteToLogFile();
OS::SignalCodeMovingGC();
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[REG_EXP_TAG]);
void Logger::SnapshotPositionEvent(Address addr, int pos) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_snapshot_positions) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
msg.AppendAddress(addr);
msg.Append(",%d", pos);
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append(',');
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append('\n');
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,", name, tag);
uint32_t sec, usec;
void Logger::SuspectReadEvent(String* name, Object* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_suspect) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_suspect) return;
+ LogMessageBuilder msg(this);
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
- : Heap::empty_string();
+ : HEAP->empty_string();
msg.Append("suspect-read,");
msg.Append(class_name);
msg.Append(',');
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
void Logger::HeapSampleStats(const char* space, const char* kind,
intptr_t capacity, intptr_t used) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-sample-stats,\"%s\",\"%s\","
"%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
space, kind, capacity, used);
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
#endif
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
#endif
void Logger::HeapSampleJSConstructorEvent(const char* constructor,
int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
msg.WriteToLogFile();
#endif
}
+// Event starts with comma, so we don't have it in the format string.
+static const char kEventText[] = "heap-js-ret-item,%s";
+// We take placeholder strings into account, but it's OK to be conservative.
+static const int kEventTextLen = sizeof(kEventText)/sizeof(kEventText[0]);
void Logger::HeapSampleJSRetainersEvent(
const char* constructor, const char* event) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- // Event starts with comma, so we don't have it in the format string.
- static const char* event_text = "heap-js-ret-item,%s";
- // We take placeholder strings into account, but it's OK to be conservative.
- static const int event_text_len = StrLength(event_text);
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
const int cons_len = StrLength(constructor);
const int event_len = StrLength(event);
int pos = 0;
// Retainer lists can be long. We may need to split them into multiple events.
do {
- LogMessageBuilder msg;
- msg.Append(event_text, constructor);
+ LogMessageBuilder msg(this);
+ msg.Append(kEventText, constructor);
int to_write = event_len - pos;
- if (to_write > Log::kMessageBufferSize - (cons_len + event_text_len)) {
- int cut_pos = pos + Log::kMessageBufferSize - (cons_len + event_text_len);
+ if (to_write > Log::kMessageBufferSize - (cons_len + kEventTextLen)) {
+ int cut_pos = pos + Log::kMessageBufferSize - (cons_len + kEventTextLen);
ASSERT(cut_pos < event_len);
while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
if (event[cut_pos] != ',') {
void Logger::HeapSampleJSProducerEvent(const char* constructor,
Address* stack) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-js-prod-item,%s", constructor);
while (*stack != NULL) {
msg.Append(",0x%" V8PRIxPTR, *stack++);
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
#endif
void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
+ if (!log_->IsEnabled() || !FLAG_log) return;
StringBuilder s(parameter.length() + 1);
for (int i = 0; i < parameter.length(); ++i) {
s.AddCharacter(static_cast<char>(parameter[i]));
}
char* parameter_string = s.Finalize();
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
msg.Append("debug-queue-event,%s,%15.3f,%s\n",
event_type,
OS::TimeCurrentMillis(),
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
- if (!Log::IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
msg.Append(',');
void Logger::PauseProfiler(int flags, int tag) {
- if (!Log::IsEnabled()) return;
+ if (!log_->IsEnabled()) return;
if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
// It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) {
}
FLAG_log_code = false;
// Must be the same message as Log::kDynamicBufferSeal.
- LOG(UncheckedStringEvent("profiler", "pause"));
+ LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
}
--logging_nesting_;
}
void Logger::ResumeProfiler(int flags, int tag) {
- if (!Log::IsEnabled()) return;
+ if (!log_->IsEnabled()) return;
if (tag != 0) {
UncheckedIntEvent("open-tag", tag);
}
++logging_nesting_;
if (FLAG_prof_lazy) {
profiler_->Engage();
- LOG(UncheckedStringEvent("profiler", "resume"));
+ LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
LogAccessorCallbacks();
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
-void Logger::StopLoggingAndProfiling() {
- Log::stop();
+void Logger::LogFailure() {
PauseProfiler(PROFILER_MODULE_CPU, 0);
}
int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- return Log::GetLogLines(from_pos, dest_buf, max_size);
+ return log_->GetLogLines(from_pos, dest_buf, max_size);
}
tag = Logger::KEYED_CALL_IC_TAG;
break;
}
- PROFILE(CodeCreateEvent(tag, code_object, description));
+ PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description));
}
}
void Logger::LogCodeInfo() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+ if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
#if V8_TARGET_ARCH_IA32
const char arch[] = "ia32";
#elif V8_TARGET_ARCH_X64
#else
const char arch[] = "unknown";
#endif
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
msg.WriteToLogFile();
#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
- if (!FLAG_ll_prof || Log::output_code_handle_ == NULL) return;
- int pos = static_cast<int>(ftell(Log::output_code_handle_));
+ if (!FLAG_ll_prof || log_->output_code_handle_ == NULL) return;
+ int pos = static_cast<int>(ftell(log_->output_code_handle_));
size_t rv = fwrite(code->instruction_start(), 1, code->instruction_size(),
- Log::output_code_handle_);
+ log_->output_code_handle_);
ASSERT(static_cast<size_t>(code->instruction_size()) == rv);
USE(rv);
msg->Append(",%d", pos);
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == Builtins::builtin(Builtins::LazyCompile)) continue;
+ if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
+ Builtins::LazyCompile))
+ continue;
Handle<SharedFunctionInfo> shared = sfis[i];
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
- PROFILE(CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *shared,
- *script_name, line_num + 1));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+ *code_objects[i], *shared,
+ *script_name, line_num + 1));
} else {
// Can't distinguish eval and script here, so always use Script.
- PROFILE(CodeCreateEvent(
- Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code_objects[i], *shared, *script_name));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *code_objects[i], *shared, *script_name));
}
} else {
- PROFILE(CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *shared, *func_name));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+ *code_objects[i], *shared, *func_name));
}
} else if (shared->IsApiFunction()) {
// API function.
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
- PROFILE(CallbackEvent(*func_name, entry_point));
+ PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
}
} else {
- PROFILE(CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code_objects[i], *shared, *func_name));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::LAZY_COMPILE_TAG, *code_objects[i],
+ *shared, *func_name));
}
}
}
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
+ i::Isolate* isolate = ISOLATE;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
if (getter_entry != 0) {
- PROFILE(GetterCallbackEvent(name, getter_entry));
+ PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
- PROFILE(SetterCallbackEvent(name, setter_entry));
+ PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
}
}
}
bool Logger::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- // --log-all enables all the log flags.
- if (FLAG_log_all) {
- FLAG_log_runtime = true;
- FLAG_log_api = true;
- FLAG_log_code = true;
- FLAG_log_gc = true;
- FLAG_log_suspect = true;
- FLAG_log_handles = true;
- FLAG_log_regexp = true;
- }
-
- // --prof implies --log-code.
- if (FLAG_prof) FLAG_log_code = true;
+ // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
+ if (is_initialized_) return true;
+ is_initialized_ = true;
// --ll-prof implies --log-code and --log-snapshot-positions.
if (FLAG_ll_prof) {
FLAG_prof_auto = false;
}
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes;
-
- bool open_log_file = start_logging || FLAG_prof_lazy;
-
- // If we're logging anything, we need to open the log file.
- if (open_log_file) {
- if (strcmp(FLAG_logfile, "-") == 0) {
- Log::OpenStdout();
- } else if (strcmp(FLAG_logfile, "*") == 0) {
- Log::OpenMemoryBuffer();
- } else if (strchr(FLAG_logfile, '%') != NULL) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- for (const char* p = FLAG_logfile; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
- }
- } else {
- stream.Put(*p);
- }
- }
- SmartPointer<const char> expanded = stream.ToCString();
- Log::OpenFile(*expanded);
- } else {
- Log::OpenFile(FLAG_logfile);
- }
- }
+ // TODO(isolates): this assert introduces cyclic dependency (logger
+ // -> thread local top -> heap -> logger).
+ // ASSERT(VMState::is_outermost_external());
+
+ log_->Initialize();
if (FLAG_ll_prof) LogCodeInfo();
- ticker_ = new Ticker(kSamplingIntervalMs);
+ ticker_ = new Ticker(Isolate::Current(), kSamplingIntervalMs);
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
sliding_state_window_ = new SlidingStateWindow();
}
+ bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
+ || FLAG_log_regexp || FLAG_log_state_changes;
+
if (start_logging) {
logging_nesting_ = 1;
}
if (FLAG_prof) {
- profiler_ = new Profiler();
+ profiler_ = new Profiler(Isolate::Current());
if (!FLAG_prof_auto) {
profiler_->pause();
} else {
}
}
- LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
return true;
#else
}
+Sampler* Logger::sampler() {
+ return ticker_;
+}
+
+
void Logger::EnsureTickerStarted() {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(ticker_ != NULL);
void Logger::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- LogMessageBuilder::set_write_failure_handler(NULL);
+ if (!is_initialized_) return;
+ is_initialized_ = false;
// Stop the profiler before closing the file.
if (profiler_ != NULL) {
delete ticker_;
ticker_ = NULL;
- Log::Close();
+ log_->Close();
#endif
}
#endif
}
+
+Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
+List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
+
+
+bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
+ ScopedLock lock(mutex_);
+ for (int i = 0;
+ ActiveSamplersExist() && i < active_samplers_->length();
+ ++i) {
+ func(active_samplers_->at(i), param);
+ }
+ return ActiveSamplersExist();
+}
+
+
+static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
+ bool* flag = reinterpret_cast<bool*>(flag_ptr);
+ *flag |= sampler->IsProfiling();
+}
+
+
+SamplerRegistry::State SamplerRegistry::GetState() {
+ bool flag = false;
+ if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
+ return HAS_NO_SAMPLERS;
+ }
+ return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
+}
+
+
+void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
+ ASSERT(sampler->IsActive());
+ ScopedLock lock(mutex_);
+ if (active_samplers_ == NULL) {
+ active_samplers_ = new List<Sampler*>;
+ } else {
+ ASSERT(!active_samplers_->Contains(sampler));
+ }
+ active_samplers_->Add(sampler);
+}
+
+
+void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
+ ASSERT(sampler->IsActive());
+ ScopedLock lock(mutex_);
+ ASSERT(active_samplers_ != NULL);
+ bool removed = active_samplers_->RemoveElement(sampler);
+ ASSERT(removed);
+ USE(removed);
+}
+
} } // namespace v8::internal
#undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING
-#define LOG(Call) \
- do { \
- if (v8::internal::Logger::is_logging()) \
- v8::internal::Logger::Call; \
+#define LOG(isolate, Call) \
+ do { \
+ v8::internal::Logger* logger = \
+ (isolate)->logger(); \
+ if (logger->is_logging()) \
+ logger->Call; \
} while (false)
#else
-#define LOG(Call) ((void) 0)
+#define LOG(isolate, Call) ((void) 0)
#endif
#define LOG_EVENTS_AND_TAGS_LIST(V) \
// original tags when writing to the log.
+class Sampler;
+
+
class Logger {
public:
#define DECLARE_ENUM(enum_item, ignore) enum_item,
#undef DECLARE_ENUM
// Acquires resources for logging if the right flags are set.
- static bool Setup();
+ bool Setup();
- static void EnsureTickerStarted();
- static void EnsureTickerStopped();
+ void EnsureTickerStarted();
+ void EnsureTickerStopped();
+
+ Sampler* sampler();
// Frees resources acquired in Setup.
- static void TearDown();
+ void TearDown();
// Enable the computation of a sliding window of states.
- static void EnableSlidingStateWindow();
+ void EnableSlidingStateWindow();
// Emits an event with a string value -> (name, value).
- static void StringEvent(const char* name, const char* value);
+ void StringEvent(const char* name, const char* value);
// Emits an event with an int value -> (name, value).
- static void IntEvent(const char* name, int value);
- static void IntPtrTEvent(const char* name, intptr_t value);
+ void IntEvent(const char* name, int value);
+ void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location).
- static void HandleEvent(const char* name, Object** location);
+ void HandleEvent(const char* name, Object** location);
// Emits memory management events for C allocated structures.
- static void NewEvent(const char* name, void* object, size_t size);
- static void DeleteEvent(const char* name, void* object);
+ void NewEvent(const char* name, void* object, size_t size);
+ void DeleteEvent(const char* name, void* object);
+
+ // Static versions of the above, operate on current isolate's logger.
+ // Used in TRACK_MEMORY(TypeName) defined in globals.h
+ static void NewEventStatic(const char* name, void* object, size_t size);
+ static void DeleteEventStatic(const char* name, void* object);
// Emits an event with a tag, and some resource usage information.
// -> (name, tag, <rusage information>).
// Currently, the resource usage information is a process time stamp
// and a real time timestamp.
- static void ResourceEvent(const char* name, const char* tag);
+ void ResourceEvent(const char* name, const char* tag);
// Emits an event that an undefined property was read from an
// object.
- static void SuspectReadEvent(String* name, Object* obj);
+ void SuspectReadEvent(String* name, Object* obj);
// Emits an event when a message is put on or read from a debugging queue.
// DebugTag lets us put a call-site specific label on the event.
- static void DebugTag(const char* call_site_tag);
- static void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
+ void DebugTag(const char* call_site_tag);
+ void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
// ==== Events logged by --log-api. ====
- static void ApiNamedSecurityCheck(Object* key);
- static void ApiIndexedSecurityCheck(uint32_t index);
- static void ApiNamedPropertyAccess(const char* tag,
- JSObject* holder,
- Object* name);
- static void ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
- uint32_t index);
- static void ApiObjectAccess(const char* tag, JSObject* obj);
- static void ApiEntryCall(const char* name);
+ void ApiNamedSecurityCheck(Object* key);
+ void ApiIndexedSecurityCheck(uint32_t index);
+ void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
+ void ApiIndexedPropertyAccess(const char* tag,
+ JSObject* holder,
+ uint32_t index);
+ void ApiObjectAccess(const char* tag, JSObject* obj);
+ void ApiEntryCall(const char* name);
// ==== Events logged by --log-code. ====
// Emits a code event for a callback function.
- static void CallbackEvent(String* name, Address entry_point);
- static void GetterCallbackEvent(String* name, Address entry_point);
- static void SetterCallbackEvent(String* name, Address entry_point);
+ void CallbackEvent(String* name, Address entry_point);
+ void GetterCallbackEvent(String* name, Address entry_point);
+ void SetterCallbackEvent(String* name, Address entry_point);
// Emits a code create event.
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, const char* source);
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, String* name);
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name);
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line);
- static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
- static void CodeMovingGCEvent();
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, const char* source);
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, String* name);
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* name);
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* source, int line);
+ void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+ void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
- static void RegExpCodeCreateEvent(Code* code, String* source);
+ void RegExpCodeCreateEvent(Code* code, String* source);
// Emits a code move event.
- static void CodeMoveEvent(Address from, Address to);
+ void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
- static void CodeDeleteEvent(Address from);
+ void CodeDeleteEvent(Address from);
- static void SharedFunctionInfoMoveEvent(Address from, Address to);
+ void SharedFunctionInfoMoveEvent(Address from, Address to);
- static void SnapshotPositionEvent(Address addr, int pos);
+ void SnapshotPositionEvent(Address addr, int pos);
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
- static void HeapSampleBeginEvent(const char* space, const char* kind);
- static void HeapSampleEndEvent(const char* space, const char* kind);
- static void HeapSampleItemEvent(const char* type, int number, int bytes);
- static void HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes);
- static void HeapSampleJSRetainersEvent(const char* constructor,
+ void HeapSampleBeginEvent(const char* space, const char* kind);
+ void HeapSampleEndEvent(const char* space, const char* kind);
+ void HeapSampleItemEvent(const char* type, int number, int bytes);
+ void HeapSampleJSConstructorEvent(const char* constructor,
+ int number, int bytes);
+ void HeapSampleJSRetainersEvent(const char* constructor,
const char* event);
- static void HeapSampleJSProducerEvent(const char* constructor,
- Address* stack);
- static void HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used);
-
- static void SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end);
- static void SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end);
+ void HeapSampleJSProducerEvent(const char* constructor,
+ Address* stack);
+ void HeapSampleStats(const char* space, const char* kind,
+ intptr_t capacity, intptr_t used);
+
+ void SharedLibraryEvent(const char* library_path,
+ uintptr_t start,
+ uintptr_t end);
+ void SharedLibraryEvent(const wchar_t* library_path,
+ uintptr_t start,
+ uintptr_t end);
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
- static void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
+ void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
// Log an event reported from generated code
- static void LogRuntime(Vector<const char> format, JSArray* args);
+ void LogRuntime(Vector<const char> format, JSArray* args);
#ifdef ENABLE_LOGGING_AND_PROFILING
- static bool is_logging() {
+ bool is_logging() {
return logging_nesting_ > 0;
}
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- static void PauseProfiler(int flags, int tag);
- static void ResumeProfiler(int flags, int tag);
- static int GetActiveProfilerModules();
+ void PauseProfiler(int flags, int tag);
+ void ResumeProfiler(int flags, int tag);
+ int GetActiveProfilerModules();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ int GetLogLines(int from_pos, char* dest_buf, int max_size);
// Logs all compiled functions found in the heap.
- static void LogCompiledFunctions();
+ void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
- static void LogAccessorCallbacks();
+ void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
- static void LogCodeObjects();
+ void LogCodeObjects();
// Converts tag to a corresponding NATIVE_... if the script is native.
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
// Profiler's sampling interval (in milliseconds).
static const int kSamplingIntervalMs = 1;
+ // Callback from Log, stops profiling in case of insufficient resources.
+ void LogFailure();
+
private:
+ Logger();
+ ~Logger();
// Emits the profiler's first message.
- static void ProfilerBeginEvent();
+ void ProfilerBeginEvent();
// Emits callback event messages.
- static void CallbackEventInternal(const char* prefix,
- const char* name,
- Address entry_point);
+ void CallbackEventInternal(const char* prefix,
+ const char* name,
+ Address entry_point);
// Internal configurable move event.
- static void MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to);
+ void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
// Internal configurable move event.
- static void DeleteEventInternal(LogEventsAndTags event,
- Address from);
+ void DeleteEventInternal(LogEventsAndTags event, Address from);
// Emits the source code of a regexp. Used by regexp events.
- static void LogRegExpSource(Handle<JSRegExp> regexp);
+ void LogRegExpSource(Handle<JSRegExp> regexp);
// Used for logging stubs found in the snapshot.
- static void LogCodeObject(Object* code_object);
+ void LogCodeObject(Object* code_object);
// Emits general information about generated code.
- static void LogCodeInfo();
+ void LogCodeInfo();
// Handles code creation when low-level profiling is active.
- static void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
+ void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
// Emits a profiler tick event. Used by the profiler thread.
- static void TickEvent(TickSample* sample, bool overflow);
+ void TickEvent(TickSample* sample, bool overflow);
- static void ApiEvent(const char* name, ...);
+ void ApiEvent(const char* name, ...);
// Logs a StringEvent regardless of whether FLAG_log is true.
- static void UncheckedStringEvent(const char* name, const char* value);
+ void UncheckedStringEvent(const char* name, const char* value);
// Logs an IntEvent regardless of whether FLAG_log is true.
- static void UncheckedIntEvent(const char* name, int value);
- static void UncheckedIntPtrTEvent(const char* name, intptr_t value);
-
- // Stops logging and profiling in case of insufficient resources.
- static void StopLoggingAndProfiling();
+ void UncheckedIntEvent(const char* name, int value);
+ void UncheckedIntPtrTEvent(const char* name, intptr_t value);
// Returns whether profiler's sampler is active.
- static bool IsProfilerSamplerActive();
+ bool IsProfilerSamplerActive();
// The sampler used by the profiler and the sliding state window.
- static Ticker* ticker_;
+ Ticker* ticker_;
// When the statistical profile is active, profiler_
// points to a Profiler, that handles collection
// of samples.
- static Profiler* profiler_;
+ Profiler* profiler_;
// SlidingStateWindow instance keeping a sliding window of the most
// recent VM states.
- static SlidingStateWindow* sliding_state_window_;
+ SlidingStateWindow* sliding_state_window_;
+
+ // An array of log events names.
+ const char* const* log_events_;
// Internal implementation classes with access to
// private members.
friend class EventLog;
+ friend class Isolate;
+ friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
friend class SlidingStateWindow;
friend class LoggerTestHelper;
- static int logging_nesting_;
- static int cpu_profiler_nesting_;
- static int heap_profiler_nesting_;
+
+ int logging_nesting_;
+ int cpu_profiler_nesting_;
+ int heap_profiler_nesting_;
+
+ Log* log_;
+
+ // Guards against multiple calls to TearDown() that can happen in some tests.
+ // 'true' between Setup() and TearDown().
+ bool is_initialized_;
+
+ // Support for 'incremental addresses' in compressed logs:
+ // LogMessageBuilder::AppendAddress(Address addr)
+ Address last_address_;
+ // Logger::TickEvent(...)
+ Address prev_sp_;
+ Address prev_function_;
+ // Logger::MoveEventInternal(...)
+ Address prev_to_;
+ // Logger::FunctionCreateEvent(...)
+ Address prev_code_;
friend class CpuProfiler;
#else
- static bool is_logging() { return false; }
+ bool is_logging() { return false; }
#endif
};
+// Process wide registry of samplers.
+class SamplerRegistry : public AllStatic {
+ public:
+ enum State {
+ HAS_NO_SAMPLERS,
+ HAS_SAMPLERS,
+ HAS_CPU_PROFILING_SAMPLERS
+ };
+
+ typedef void (*VisitSampler)(Sampler*, void*);
+
+ static State GetState();
+
+ // Iterates over all active samplers keeping the internal lock held.
+ // Returns whether there are any active samplers.
+ static bool IterateActiveSamplers(VisitSampler func, void* param);
+
+ // Adds/Removes an active sampler.
+ static void AddActiveSampler(Sampler* sampler);
+ static void RemoveActiveSampler(Sampler* sampler);
+
+ private:
+ static bool ActiveSamplersExist() {
+ return active_samplers_ != NULL && !active_samplers_->is_empty();
+ }
+
+ static Mutex* mutex_; // Protects the state below.
+ static List<Sampler*>* active_samplers_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
+};
+
+
// Class that extracts stack trace, used for profiling.
class StackTracer : public AllStatic {
public:
- static void Trace(TickSample* sample);
+ static void Trace(Isolate* isolate, TickSample* sample);
};
} } // namespace v8::internal
// -------------------------------------------------------------------------
// MarkCompactCollector
-bool MarkCompactCollector::force_compaction_ = false;
-bool MarkCompactCollector::compacting_collection_ = false;
-bool MarkCompactCollector::compact_on_next_gc_ = false;
-
-int MarkCompactCollector::previous_marked_count_ = 0;
-GCTracer* MarkCompactCollector::tracer_ = NULL;
-
-
+MarkCompactCollector::MarkCompactCollector() : // NOLINT
+#ifdef DEBUG
+ state_(IDLE),
+#endif
+ force_compaction_(false),
+ compacting_collection_(false),
+ compact_on_next_gc_(false),
+ previous_marked_count_(0),
+ tracer_(NULL),
#ifdef DEBUG
-MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
-
-// Counters used for debugging the marking phase of mark-compact or mark-sweep
-// collection.
-int MarkCompactCollector::live_bytes_ = 0;
-int MarkCompactCollector::live_young_objects_size_ = 0;
-int MarkCompactCollector::live_old_data_objects_size_ = 0;
-int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
-int MarkCompactCollector::live_code_objects_size_ = 0;
-int MarkCompactCollector::live_map_objects_size_ = 0;
-int MarkCompactCollector::live_cell_objects_size_ = 0;
-int MarkCompactCollector::live_lo_objects_size_ = 0;
+ live_young_objects_size_(0),
+ live_old_pointer_objects_size_(0),
+ live_old_data_objects_size_(0),
+ live_code_objects_size_(0),
+ live_map_objects_size_(0),
+ live_cell_objects_size_(0),
+ live_lo_objects_size_(0),
+ live_bytes_(0),
#endif
+ heap_(NULL),
+ code_flusher_(NULL) { }
void MarkCompactCollector::CollectGarbage() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
- Heap::MarkMapPointersAsEncoded(true);
+ heap_->MarkMapPointersAsEncoded(true);
UpdatePointers();
- Heap::MarkMapPointersAsEncoded(false);
- PcToCodeCache::FlushPcToCodeCache();
+ heap_->MarkMapPointersAsEncoded(false);
+ heap_->isolate()->pc_to_code_cache()->Flush();
RelocateObjects();
} else {
SweepSpaces();
- PcToCodeCache::FlushPcToCodeCache();
+ heap_->isolate()->pc_to_code_cache()->Flush();
}
Finish();
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
- if (!Heap::map_space()->MapPointersEncodable())
+ if (!HEAP->map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
- StubCache::Clear();
+ Isolate::Current()->stub_cache()->Clear();
- ExternalStringTable::CleanUp();
+ heap_->external_string_table_.CleanUp();
// If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return.
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-static MarkingStack marking_stack;
-
-class FlushCode : public AllStatic {
+class CodeFlusher {
public:
- static void AddCandidate(SharedFunctionInfo* shared_info) {
+ explicit CodeFlusher(Isolate* isolate)
+ : isolate_(isolate),
+ jsfunction_candidates_head_(NULL),
+ shared_function_info_candidates_head_(NULL) {}
+
+ void AddCandidate(SharedFunctionInfo* shared_info) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
-
- static void AddCandidate(JSFunction* function) {
+ void AddCandidate(JSFunction* function) {
ASSERT(function->unchecked_code() ==
function->unchecked_shared()->unchecked_code());
jsfunction_candidates_head_ = function;
}
-
- static void ProcessCandidates() {
+ void ProcessCandidates() {
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
private:
- static void ProcessJSFunctionCandidates() {
- Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+ void ProcessJSFunctionCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::LazyCompile);
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
}
- static void ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+ void ProcessSharedFunctionInfoCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::LazyCompile);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
shared_function_info_candidates_head_ = NULL;
}
-
static JSFunction** GetNextCandidateField(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
candidate->address() + JSFunction::kCodeEntryOffset);
}
-
static JSFunction* GetNextCandidate(JSFunction* candidate) {
return *GetNextCandidateField(candidate);
}
-
static void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
}
-
STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
static SharedFunctionInfo** GetNextCandidateField(
SharedFunctionInfo* candidate) {
Code* code = candidate->unchecked_code();
code->address() + Code::kHeaderPaddingStart);
}
-
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
return *GetNextCandidateField(candidate);
}
-
static void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
}
- static JSFunction* jsfunction_candidates_head_;
+ Isolate* isolate_;
+ JSFunction* jsfunction_candidates_head_;
+ SharedFunctionInfo* shared_function_info_candidates_head_;
- static SharedFunctionInfo* shared_function_info_candidates_head_;
+ DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
-JSFunction* FlushCode::jsfunction_candidates_head_ = NULL;
-SharedFunctionInfo* FlushCode::shared_function_info_candidates_head_ = NULL;
+MarkCompactCollector::~MarkCompactCollector() {
+ if (code_flusher_ != NULL) {
+ delete code_flusher_;
+ code_flusher_ = NULL;
+ }
+}
+
static inline HeapObject* ShortCircuitConsString(Object** p) {
// Optimization: If the heap object pointed to by p is a non-symbol
- // cons string whose right substring is Heap::empty_string, update
+ // cons string whose right substring is HEAP->empty_string, update
// it in place to its left substring. Return the updated value.
//
// Here we assume that if we change *p, we replace it with a heap object
//
// The check performed is:
// object->IsConsString() && !object->IsSymbol() &&
- // (ConsString::cast(object)->second() == Heap::empty_string())
+ // (ConsString::cast(object)->second() == HEAP->empty_string())
// except the maps for the object and its possible substrings might be
// marked.
HeapObject* object = HeapObject::cast(*p);
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- if (second != Heap::raw_unchecked_empty_string()) {
+ Heap* heap = map_word.ToMap()->heap();
+ if (second != heap->raw_unchecked_empty_string()) {
return object;
}
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
- if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
+ if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
*p = first;
return HeapObject::cast(first);
table_.GetVisitor(map)(map, obj);
}
- static void EnableCodeFlushing(bool enabled) {
- if (enabled) {
- table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
-
- } else {
- table_.Register(kVisitJSFunction, &VisitJSFunction);
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoGeneric);
- }
- }
-
static void Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticMarkingVisitor,
kVisitStructGeneric>();
}
- INLINE(static void VisitPointer(Object** p)) {
- MarkObjectByPointer(p);
+ INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+ MarkObjectByPointer(heap, p);
}
- INLINE(static void VisitPointers(Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(start, end)) return;
+ if (VisitUnmarkedObjects(heap, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
}
static inline void VisitCodeTarget(RelocInfo* rinfo) {
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
- // marked since they are contained in Heap::non_monomorphic_cache().
+ // marked since they are contained in HEAP->non_monomorphic_cache().
} else {
- MarkCompactCollector::MarkObject(code);
+ HEAP->mark_compact_collector()->MarkObject(code);
}
}
ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
- VisitPointer(&cell);
+ VisitPointer(HEAP, &cell);
if (cell != old_cell) {
rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
}
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence()));
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
- MarkCompactCollector::MarkObject(code);
+ HEAP->mark_compact_collector()->MarkObject(code);
}
// Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(Object** p)) {
+ INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
- MarkCompactCollector::MarkObject(object);
+ heap->mark_compact_collector()->MarkObject(object);
}
+
// Visit an unmarked object.
static inline void VisitUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG
- ASSERT(Heap::Contains(obj));
+ ASSERT(HEAP->Contains(obj));
ASSERT(!obj->IsMarked());
#endif
Map* map = obj->map();
- MarkCompactCollector::SetMark(obj);
+ MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+ collector->SetMark(obj);
// Mark the map pointer and the body.
- MarkCompactCollector::MarkObject(map);
+ collector->MarkObject(map);
IterateBody(map, obj);
}
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
+ static inline bool VisitUnmarkedObjects(Heap* heap,
+ Object** start,
+ Object** end) {
// Return false is we are close to the stack limit.
- StackLimitCheck check;
+ StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
// Visit the unmarked objects.
void> StructObjectVisitor;
static void VisitCode(Map* map, HeapObject* object) {
- reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
+ reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
+ map->heap());
}
// Code flushing support.
static const int kCodeAgeThreshold = 5;
inline static bool HasSourceCode(SharedFunctionInfo* info) {
- Object* undefined = Heap::raw_unchecked_undefined_value();
+ Object* undefined = HEAP->raw_unchecked_undefined_value();
return (info->script() != undefined) &&
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
inline static bool IsCompiled(JSFunction* function) {
- return
- function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ return function->unchecked_code() !=
+ Isolate::Current()->builtins()->builtin(Builtins::LazyCompile);
}
-
inline static bool IsCompiled(SharedFunctionInfo* function) {
- return
- function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ return function->unchecked_code() !=
+ Isolate::Current()->builtins()->builtin(Builtins::LazyCompile);
}
inline static bool IsFlushable(JSFunction* function) {
}
- static bool FlushCodeForFunction(JSFunction* function) {
+ static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
if (!IsFlushable(function)) return false;
// This function's code looks flushable. But we have to postpone the
// SharedFunctionInfo because some of them might be optimized.
// That would make the nonoptimized version of the code nonflushable,
// because it is required for bailing out from optimized code.
- FlushCode::AddCandidate(function);
+ heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
return true;
}
if (!ctx->IsHeapObject()) return false;
Map* map = SafeMap(ctx);
- if (!(map == Heap::raw_unchecked_context_map() ||
- map == Heap::raw_unchecked_catch_context_map() ||
- map == Heap::raw_unchecked_global_context_map())) {
+ if (!(map == HEAP->raw_unchecked_context_map() ||
+ map == HEAP->raw_unchecked_catch_context_map() ||
+ map == HEAP->raw_unchecked_global_context_map())) {
return false;
}
static void VisitSharedFunctionInfoAndFlushCode(Map* map,
HeapObject* object) {
+ MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+ if (!collector->is_code_flushing_enabled()) {
+ VisitSharedFunctionInfoGeneric(map, object);
+ return;
+ }
VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
}
static void VisitSharedFunctionInfoAndFlushCodeGeneric(
Map* map, HeapObject* object, bool known_flush_code_candidate) {
+ Heap* heap = map->heap();
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
if (!known_flush_code_candidate) {
known_flush_code_candidate = IsFlushable(shared);
- if (known_flush_code_candidate) FlushCode::AddCandidate(shared);
+ if (known_flush_code_candidate) {
+ heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
+ }
}
- VisitSharedFunctionInfoFields(object, known_flush_code_candidate);
+ VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
}
- static void VisitCodeEntry(Address entry_address) {
+ static void VisitCodeEntry(Heap* heap, Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
- VisitPointer(&code);
+ VisitPointer(heap, &code);
if (code != old_code) {
Memory::Address_at(entry_address) =
reinterpret_cast<Code*>(code)->entry();
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (!collector->is_code_flushing_enabled()) {
+ VisitJSFunction(map, object);
+ return;
+ }
+
JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
// The function must have a valid context and not be a builtin.
bool flush_code_candidate = false;
if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
- flush_code_candidate = FlushCodeForFunction(jsfunction);
+ flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
}
if (!flush_code_candidate) {
- MarkCompactCollector::MarkObject(
- jsfunction->unchecked_shared()->unchecked_code());
+ collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
// For optimized functions we should retain both non-optimized version
i < count;
i++) {
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- MarkCompactCollector::MarkObject(
- inlined->unchecked_shared()->unchecked_code());
+ collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
}
}
}
static inline void VisitJSFunctionFields(Map* map,
JSFunction* object,
bool flush_code_candidate) {
- VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+
+ VisitPointers(heap,
+ SLOT_ADDR(object, JSFunction::kPropertiesOffset),
SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
if (!flush_code_candidate) {
- VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+ VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
} else {
// Don't visit code object.
SharedFunctionInfo* shared_info = object->unchecked_shared();
if (!shared_info->IsMarked()) {
Map* shared_info_map = shared_info->map();
- MarkCompactCollector::SetMark(shared_info);
- MarkCompactCollector::MarkObject(shared_info_map);
+ collector->SetMark(shared_info);
+ collector->MarkObject(shared_info_map);
VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
shared_info,
true);
}
}
- VisitPointers(SLOT_ADDR(object,
+ VisitPointers(heap,
+ SLOT_ADDR(object,
JSFunction::kCodeEntryOffset + kPointerSize),
SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
}
- static void VisitSharedFunctionInfoFields(HeapObject* object,
+ static void VisitSharedFunctionInfoFields(Heap* heap,
+ HeapObject* object,
bool flush_code_candidate) {
- VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
+ VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
if (!flush_code_candidate) {
- VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+ VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
}
- VisitPointers(SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
+ VisitPointers(heap,
+ SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
SLOT_ADDR(object, SharedFunctionInfo::kSize));
}
class MarkingVisitor : public ObjectVisitor {
public:
+ explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
+
void VisitPointer(Object** p) {
- StaticMarkingVisitor::VisitPointer(p);
+ StaticMarkingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
- StaticMarkingVisitor::VisitPointers(start, end);
+ StaticMarkingVisitor::VisitPointers(heap_, start, end);
}
void VisitCodeTarget(RelocInfo* rinfo) {
void VisitDebugTarget(RelocInfo* rinfo) {
StaticMarkingVisitor::VisitDebugTarget(rinfo);
}
+
+ private:
+ Heap* heap_;
};
class CodeMarkingVisitor : public ThreadVisitor {
public:
+ explicit CodeMarkingVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
void VisitThread(ThreadLocalTop* top) {
for (StackFrameIterator it(top); !it.done(); it.Advance()) {
- MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
+ collector_->MarkObject(it.frame()->unchecked_code());
}
}
+
+ private:
+ MarkCompactCollector* collector_;
};
class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
public:
+ explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkCompactCollector::MarkObject(shared->unchecked_code());
- MarkCompactCollector::MarkObject(shared);
+ collector_->MarkObject(shared->unchecked_code());
+ collector_->MarkObject(shared);
}
}
+
+ private:
+ MarkCompactCollector* collector_;
};
void MarkCompactCollector::PrepareForCodeFlushing() {
+ ASSERT(heap_ == Isolate::Current()->heap());
+
if (!FLAG_flush_code) {
- StaticMarkingVisitor::EnableCodeFlushing(false);
+ EnableCodeFlushing(false);
return;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::IsLoaded() || Debug::has_break_points()) {
- StaticMarkingVisitor::EnableCodeFlushing(false);
+ if (heap_->isolate()->debug()->IsLoaded() ||
+ heap_->isolate()->debug()->has_break_points()) {
+ EnableCodeFlushing(false);
return;
}
#endif
- StaticMarkingVisitor::EnableCodeFlushing(true);
+ EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
- MarkObject(Heap::raw_unchecked_empty_descriptor_array());
+ MarkObject(heap_->raw_unchecked_empty_descriptor_array());
// Make sure we are not referencing the code from the stack.
+ ASSERT(this == heap_->mark_compact_collector());
for (StackFrameIterator it; !it.done(); it.Advance()) {
MarkObject(it.frame()->unchecked_code());
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
- CodeMarkingVisitor code_marking_visitor;
- ThreadManager::IterateArchivedThreads(&code_marking_visitor);
+ CodeMarkingVisitor code_marking_visitor(this);
+ heap_->isolate()->thread_manager()->IterateArchivedThreads(
+ &code_marking_visitor);
- SharedFunctionInfoMarkingVisitor visitor;
- CompilationCache::IterateFunctions(&visitor);
- HandleScopeImplementer::Iterate(&visitor);
+ SharedFunctionInfoMarkingVisitor visitor(this);
+ heap_->isolate()->compilation_cache()->IterateFunctions(&visitor);
+ heap_->isolate()->handle_scope_implementer()->Iterate(&visitor);
ProcessMarkingStack();
}
// Visitor class for marking heap roots.
class RootMarkingVisitor : public ObjectVisitor {
public:
+ explicit RootMarkingVisitor(Heap* heap)
+ : collector_(heap->mark_compact_collector()) { }
+
void VisitPointer(Object** p) {
MarkObjectByPointer(p);
}
Map* map = object->map();
// Mark the object.
- MarkCompactCollector::SetMark(object);
+ collector_->SetMark(object);
// Mark the map pointer and body, and push them on the marking stack.
- MarkCompactCollector::MarkObject(map);
+ collector_->MarkObject(map);
StaticMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- MarkCompactCollector::EmptyMarkingStack();
+ collector_->EmptyMarkingStack();
}
+
+ MarkCompactCollector* collector_;
};
// Since no objects have yet been moved we can safely access the map of
// the object.
if ((*p)->IsExternalString()) {
- Heap::FinalizeExternalString(String::cast(*p));
+ HEAP->FinalizeExternalString(String::cast(*p));
}
// Set the entry to null_value (as deleted).
- *p = Heap::raw_unchecked_null_value();
+ *p = HEAP->raw_unchecked_null_value();
pointers_removed_++;
}
}
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked());
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
if (FLAG_cleanup_caches_in_maps_at_gc) {
- map->ClearCodeCache();
+ map->ClearCodeCache(heap_);
}
SetMark(map);
if (FLAG_collect_maps &&
map->instance_type() <= JS_FUNCTION_TYPE) {
MarkMapContents(map);
} else {
- marking_stack.Push(map);
+ marking_stack_.Push(map);
}
} else {
SetMark(object);
- marking_stack.Push(object);
+ marking_stack_.Push(object);
}
}
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
- StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
+ StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
}
DescriptorArray* descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
+ ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
SetMark(descriptors);
FixedArray* contents = reinterpret_cast<FixedArray*>(
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
if (object->IsHeapObject() && !object->IsMarked()) {
SetMark(object);
- marking_stack.Push(object);
+ marking_stack_.Push(object);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
- marking_stack.Push(descriptors);
+ marking_stack_.Push(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
- HeapObjectIterator iterator(Heap::map_space());
+ HeapObjectIterator iterator(HEAP->map_space());
for (HeapObject* next_object = iterator.next();
next_object != NULL; next_object = iterator.next()) {
if (next_object->IsMap()) { // Could also be ByteArray on free list.
map->instance_type() <= JS_FUNCTION_TYPE) {
map->CreateBackPointers();
} else {
- ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
+ ASSERT(map->instance_descriptors() == HEAP->empty_descriptor_array());
}
}
}
}
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void ScanOverflowedObjects(T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!marking_stack.is_full());
-
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
- if (object->IsOverflowed()) {
- object->ClearOverflow();
- ASSERT(object->IsMarked());
- ASSERT(Heap::Contains(object));
- marking_stack.Push(object);
- if (marking_stack.is_full()) return;
+class OverflowedObjectsScanner : public AllStatic {
+ public:
+ // Fill the marking stack with overflowed objects returned by the given
+ // iterator. Stop when the marking stack is filled or the end of the space
+ // is reached, whichever comes first.
+ template<class T>
+ static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
+ T* it) {
+ // The caller should ensure that the marking stack is initially not full,
+ // so that we don't waste effort pointlessly scanning for objects.
+ ASSERT(!collector->marking_stack_.is_full());
+
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
+ if (object->IsOverflowed()) {
+ object->ClearOverflow();
+ ASSERT(object->IsMarked());
+ ASSERT(HEAP->Contains(object));
+ collector->marking_stack_.Push(object);
+ if (collector->marking_stack_.is_full()) return;
+ }
}
}
-}
+};
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+ SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
// Explicitly mark the prefix.
- MarkingVisitor marker;
+ MarkingVisitor marker(heap_);
symbol_table->IteratePrefix(&marker);
ProcessMarkingStack();
}
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
- Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+ HEAP->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the symbol table specially.
MarkSymbolTable();
// There may be overflowed objects in the heap. Visit them now.
- while (marking_stack.overflowed()) {
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
+ List<ObjectGroup*>* object_groups =
+ heap_->isolate()->global_handles()->object_groups();
for (int i = 0; i < object_groups->length(); i++) {
ObjectGroup* entry = object_groups->at(i);
void MarkCompactCollector::MarkImplicitRefGroups() {
- List<ImplicitRefGroup*>* ref_groups = GlobalHandles::ImplicitRefGroups();
+ List<ImplicitRefGroup*>* ref_groups =
+ heap_->isolate()->global_handles()->implicit_ref_groups();
for (int i = 0; i < ref_groups->length(); i++) {
ImplicitRefGroup* entry = ref_groups->at(i);
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingStack() {
- while (!marking_stack.is_empty()) {
- HeapObject* object = marking_stack.Pop();
+ while (!marking_stack_.is_empty()) {
+ HeapObject* object = marking_stack_.Pop();
ASSERT(object->IsHeapObject());
- ASSERT(Heap::Contains(object));
+ ASSERT(heap_->Contains(object));
ASSERT(object->IsMarked());
ASSERT(!object->IsOverflowed());
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
void MarkCompactCollector::RefillMarkingStack() {
- ASSERT(marking_stack.overflowed());
+ ASSERT(marking_stack_.overflowed());
- SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&new_it);
- if (marking_stack.is_full()) return;
+ SemiSpaceIterator new_it(HEAP->new_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
+ HeapObjectIterator old_pointer_it(HEAP->old_pointer_space(),
&OverflowObjectSize);
- ScanOverflowedObjects(&old_pointer_it);
- if (marking_stack.is_full()) return;
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&old_data_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator old_data_it(HEAP->old_data_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&code_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator code_it(HEAP->code_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&map_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator map_it(HEAP->map_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&cell_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator cell_it(HEAP->cell_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
+ if (marking_stack_.is_full()) return;
- LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&lo_it);
- if (marking_stack.is_full()) return;
+ LargeObjectIterator lo_it(HEAP->lo_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
+ if (marking_stack_.is_full()) return;
- marking_stack.clear_overflowed();
+ marking_stack_.clear_overflowed();
}
// objects in the heap.
void MarkCompactCollector::ProcessMarkingStack() {
EmptyMarkingStack();
- while (marking_stack.overflowed()) {
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
void MarkCompactCollector::ProcessExternalMarking() {
bool work_to_do = true;
- ASSERT(marking_stack.is_empty());
+ ASSERT(marking_stack_.is_empty());
while (work_to_do) {
MarkObjectGroups();
MarkImplicitRefGroups();
- work_to_do = !marking_stack.is_empty();
+ work_to_do = !marking_stack_.is_empty();
ProcessMarkingStack();
}
}
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(heap_->isolate());
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
#endif
// The to space contains live objects, the from space is used as a marking
// stack.
- marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
- Heap::new_space()->FromSpaceHigh());
+ marking_stack_.Initialize(heap_->new_space()->FromSpaceLow(),
+ heap_->new_space()->FromSpaceHigh());
- ASSERT(!marking_stack.overflowed());
+ ASSERT(!marking_stack_.overflowed());
PrepareForCodeFlushing();
- RootMarkingVisitor root_visitor;
+ RootMarkingVisitor root_visitor(heap_);
MarkRoots(&root_visitor);
// The objects reachable from the roots are marked, yet unreachable
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
- GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
+ heap_->isolate()->global_handles()->IdentifyWeakHandles(
+ &IsUnmarkedHeapObject);
// Then we mark the objects and process the transitive closure.
- GlobalHandles::IterateWeakRoots(&root_visitor);
- while (marking_stack.overflowed()) {
+ heap_->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+ SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
- ExternalStringTable::Iterate(&v);
- ExternalStringTable::CleanUp();
+ heap_->external_string_table_.Iterate(&v);
+ heap_->external_string_table_.CleanUp();
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- Heap::ProcessWeakReferences(&mark_compact_object_retainer);
+ heap_->ProcessWeakReferences(&mark_compact_object_retainer);
// Remove object groups after marking phase.
- GlobalHandles::RemoveObjectGroups();
- GlobalHandles::RemoveImplicitRefGroups();
+ heap_->isolate()->global_handles()->RemoveObjectGroups();
+ heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
// Flush code from collected candidates.
- FlushCode::ProcessCandidates();
+ if (is_code_flushing_enabled()) {
+ code_flusher_->ProcessCandidates();
+ }
// Clean up dead objects from the runtime profiler.
- RuntimeProfiler::RemoveDeadSamples();
+ heap_->isolate()->runtime_profiler()->RemoveDeadSamples();
}
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
- if (Heap::new_space()->Contains(obj)) {
+ if (HEAP->new_space()->Contains(obj)) {
live_young_objects_size_ += obj->Size();
- } else if (Heap::map_space()->Contains(obj)) {
+ } else if (HEAP->map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_size_ += obj->Size();
- } else if (Heap::cell_space()->Contains(obj)) {
+ } else if (HEAP->cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_size_ += obj->Size();
- } else if (Heap::old_pointer_space()->Contains(obj)) {
+ } else if (HEAP->old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_size_ += obj->Size();
- } else if (Heap::old_data_space()->Contains(obj)) {
+ } else if (HEAP->old_data_space()->Contains(obj)) {
live_old_data_objects_size_ += obj->Size();
- } else if (Heap::code_space()->Contains(obj)) {
+ } else if (HEAP->code_space()->Contains(obj)) {
live_code_objects_size_ += obj->Size();
- } else if (Heap::lo_space()->Contains(obj)) {
+ } else if (HEAP->lo_space()->Contains(obj)) {
live_lo_objects_size_ += obj->Size();
} else {
UNREACHABLE();
compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
#endif
// Deallocate unmarked objects and clear marked bits for marked objects.
- Heap::lo_space()->FreeUnmarkedObjects();
+ HEAP->lo_space()->FreeUnmarkedObjects();
}
void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(Heap::map_space(), &SizeOfMarkedObject);
+ HeapObjectIterator map_iterator(HEAP->map_space(), &SizeOfMarkedObject);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
// This test will always be false on the first iteration.
if (on_dead_path && current->IsMarked()) {
on_dead_path = false;
- current->ClearNonLiveTransitions(real_prototype);
+ current->ClearNonLiveTransitions(heap_, real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
// Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, large objects to large object space,
// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(HeapObject* object,
+inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
+ HeapObject* object,
int object_size) {
MaybeObject* forwarded;
- if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
forwarded = Failure::Exception();
} else {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
+ OldSpace* target_space = heap->TargetSpace(object);
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
forwarded = target_space->MCAllocateRaw(object_size);
}
Object* result;
if (!forwarded->ToObject(&result)) {
- result = Heap::new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
+ result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
}
return result;
}
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
+ Heap *heap,
HeapObject* ignore,
int object_size) {
- return Heap::old_pointer_space()->MCAllocateRaw(object_size);
+ return heap->old_pointer_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::old_data_space()->MCAllocateRaw(object_size);
+ return heap->old_data_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::code_space()->MCAllocateRaw(object_size);
+ return heap->code_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::map_space()->MCAllocateRaw(object_size);
+ return heap->map_space()->MCAllocateRaw(object_size);
}
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(HeapObject* ignore,
- int object_size) {
- return Heap::cell_space()->MCAllocateRaw(object_size);
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
+ Heap* heap, HeapObject* ignore, int object_size) {
+ return heap->cell_space()->MCAllocateRaw(object_size);
}
// The forwarding address is encoded at the same offset as the current
// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
+inline void EncodeForwardingAddressInNewSpace(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* ignored) {
int offset =
- Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
- Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
+ heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
+ Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
HeapObject::cast(new_object)->address();
}
// The forwarding address is encoded in the map pointer of the object as an
// offset (in terms of live bytes) from the address of the first live object
// in the page.
-inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
+inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* offset) {
template<MarkCompactCollector::AllocationFunction Alloc,
MarkCompactCollector::EncodingFunction Encode,
MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(Address start,
+inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
+ Address start,
Address end,
int* offset) {
// The start address of the current free region while sweeping the space.
HeapObject* object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ collector->tracer()->decrement_marked_count();
object_size = object->Size();
- // Allocation cannot fail, because we are compacting the space.
- Object* forwarded = Alloc(object, object_size)->ToObjectUnchecked();
- Encode(object, object_size, forwarded, offset);
+ Object* forwarded =
+ Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
+ Encode(collector->heap(), object, object_size, forwarded, offset);
#ifdef DEBUG
if (FLAG_gc_verbose) {
EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
EncodeForwardingAddressInNewSpace,
IgnoreNonLiveObject>(
- Heap::new_space()->bottom(),
- Heap::new_space()->top(),
+ this,
+ heap_->new_space()->bottom(),
+ heap_->new_space()->top(),
&ignored);
}
EncodeForwardingAddressesInRange<Alloc,
EncodeForwardingAddressInPagedSpace,
ProcessNonLive>(
+ this,
p->ObjectAreaStart(),
p->AllocationTop(),
&offset);
// to encounter pointers to dead objects during traversal of dirty regions we
// should clear them to avoid encountering them during next dirty regions
// iteration.
-static void MigrateObject(Address dst,
+static void MigrateObject(Heap* heap,
+ Address dst,
Address src,
int size,
bool to_old_space) {
if (to_old_space) {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
} else {
- Heap::CopyBlock(dst, src, size);
+ heap->CopyBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
class StaticPointersToNewGenUpdatingVisitor : public
StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
public:
- static inline void VisitPointer(Object** p) {
+ static inline void VisitPointer(Heap* heap, Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
- if (Heap::new_space()->Contains(obj)) {
- ASSERT(Heap::InFromSpace(*p));
+ if (heap->new_space()->Contains(obj)) {
+ ASSERT(heap->InFromSpace(*p));
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
}
}
// It does not expect to encounter pointers to dead objects.
class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public:
+ explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+
void VisitPointer(Object** p) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
}
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
+ private:
+ Heap* heap_;
};
if (!(*p)->IsHeapObject()) return;
Address old_addr = (*p)->address();
- ASSERT(Heap::InFromSpace(*p));
+ ASSERT(HEAP->InFromSpace(*p));
Address new_addr = Memory::Address_at(old_addr);
}
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
Address old_addr = HeapObject::cast(*p)->address();
Address new_addr = Memory::Address_at(old_addr);
return String::cast(HeapObject::FromAddress(new_addr));
}
-static bool TryPromoteObject(HeapObject* object, int object_size) {
+static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
Object* result;
- if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
MaybeObject* maybe_result =
- Heap::lo_space()->AllocateRawFixedArray(object_size);
+ heap->lo_space()->AllocateRawFixedArray(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size, true);
- MarkCompactCollector::tracer()->
+ MigrateObject(heap, target->address(), object->address(), object_size,
+ true);
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
} else {
- OldSpace* target_space = Heap::TargetSpace(object);
+ OldSpace* target_space = heap->TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
+ MigrateObject(heap,
+ target->address(),
object->address(),
object_size,
- target_space == Heap::old_pointer_space());
- MarkCompactCollector::tracer()->
+ target_space == heap->old_pointer_space());
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
}
-static void SweepNewSpace(NewSpace* space) {
- Heap::CheckNewSpaceExpansionCriteria();
+static void SweepNewSpace(Heap* heap, NewSpace* space) {
+ heap->CheckNewSpaceExpansionCriteria();
Address from_bottom = space->bottom();
Address from_top = space->top();
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
size = object->Size();
survivors_size += size;
// Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
+ if (TryPromoteObject(heap, object, size)) {
continue;
}
// Allocation cannot fail at this point: semispaces are of equal size.
Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target)->address(),
+ MigrateObject(heap,
+ HeapObject::cast(target)->address(),
current,
size,
false);
}
// Second pass: find pointers to new space and update them.
- PointersToNewGenUpdatingVisitor updating_visitor;
+ PointersToNewGenUpdatingVisitor updating_visitor(heap);
// Update pointers in to space.
Address current = space->bottom();
}
// Update roots.
- Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+ heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces.
- Heap::IterateDirtyRegions(Heap::old_pointer_space(),
+ heap->IterateDirtyRegions(heap->old_pointer_space(),
&Heap::IteratePointersInDirtyRegion,
&UpdatePointerToNewGen,
- Heap::WATERMARK_SHOULD_BE_VALID);
+ heap->WATERMARK_SHOULD_BE_VALID);
- Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+ heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
- HeapObjectIterator cell_iterator(Heap::cell_space());
+ HeapObjectIterator cell_iterator(heap->cell_space());
for (HeapObject* cell = cell_iterator.next();
cell != NULL;
cell = cell_iterator.next()) {
}
// Update pointer from the global contexts list.
- updating_visitor.VisitPointer(Heap::global_contexts_list_address());
+ updating_visitor.VisitPointer(heap->global_contexts_list_address());
// Update pointers from external string table.
- Heap::UpdateNewSpaceReferencesInExternalStringTable(
+ heap->UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
// All pointers were updated. Update auxiliary allocation info.
- Heap::IncrementYoungSurvivorsCounter(survivors_size);
+ heap->IncrementYoungSurvivorsCounter(survivors_size);
space->set_age_mark(space->top());
// Update JSFunction pointers from the runtime profiler.
- RuntimeProfiler::UpdateSamplesAfterScavenge();
+ heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
}
-static void SweepSpace(PagedSpace* space) {
+static void SweepSpace(Heap* heap, PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
space->DeallocateBlock(free_start,
is_previous_alive = true;
}
} else {
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ heap->mark_compact_collector()->ReportDeleteIfNeeded(object);
if (is_previous_alive) { // Transition from live to free.
free_start = current;
is_previous_alive = false;
// Objects in the active semispace of the young generation may be
// relocated to the inactive semispace (if not promoted). Set the
// relocation info to the beginning of the inactive semispace.
- Heap::new_space()->MCResetRelocationInfo();
+ heap_->new_space()->MCResetRelocationInfo();
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
ReportDeleteIfNeeded>(
- Heap::old_pointer_space());
+ heap_->old_pointer_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
IgnoreNonLiveObject>(
- Heap::old_data_space());
+ heap_->old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
ReportDeleteIfNeeded>(
- Heap::code_space());
+ heap_->code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
IgnoreNonLiveObject>(
- Heap::cell_space());
+ heap_->cell_space());
// Compute new space next to last after the old and code spaces have been
// non-live map pointers to get the sizes of non-live objects.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
IgnoreNonLiveObject>(
- Heap::map_space());
+ heap_->map_space());
// Write relocation info to the top page, so we can use it later. This is
// done after promoting objects from the new space so we get the correct
// allocation top.
- Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
- Heap::old_data_space()->MCWriteRelocationInfoToPage();
- Heap::code_space()->MCWriteRelocationInfoToPage();
- Heap::map_space()->MCWriteRelocationInfoToPage();
- Heap::cell_space()->MCWriteRelocationInfoToPage();
+ heap_->old_pointer_space()->MCWriteRelocationInfoToPage();
+ heap_->old_data_space()->MCWriteRelocationInfoToPage();
+ heap_->code_space()->MCWriteRelocationInfoToPage();
+ heap_->map_space()->MCWriteRelocationInfoToPage();
+ heap_->cell_space()->MCWriteRelocationInfoToPage();
}
class MapIterator : public HeapObjectIterator {
public:
- MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
+ MapIterator() : HeapObjectIterator(HEAP->map_space(), &SizeCallback) { }
explicit MapIterator(Address start)
- : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
+ : HeapObjectIterator(HEAP->map_space(), start, &SizeCallback) { }
private:
static int SizeCallback(HeapObject* unused) {
class MapCompact {
public:
- explicit MapCompact(int live_maps)
- : live_maps_(live_maps),
- to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
+ explicit MapCompact(Heap* heap, int live_maps)
+ : heap_(heap),
+ live_maps_(live_maps),
+ to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
map_to_evacuate_it_(to_evacuate_start_),
first_map_to_evacuate_(
reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
}
void UpdateMapPointersInRoots() {
- Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
- GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
- LiveObjectList::IterateElements(&map_updating_visitor_);
+ MapUpdatingVisitor map_updating_visitor;
+ heap_->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+ heap_->isolate()->global_handles()->IterateWeakRoots(&map_updating_visitor);
+ LiveObjectList::IterateElements(&map_updating_visitor);
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
- ASSERT(space != Heap::map_space());
+ ASSERT(space != heap_->map_space());
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
- UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
+ UpdateMapPointersInRange(heap_, p->ObjectAreaStart(), p->AllocationTop());
}
}
void UpdateMapPointersInNewSpace() {
- NewSpace* space = Heap::new_space();
- UpdateMapPointersInRange(space->bottom(), space->top());
+ NewSpace* space = heap_->new_space();
+ UpdateMapPointersInRange(heap_, space->bottom(), space->top());
}
void UpdateMapPointersInLargeObjectSpace() {
- LargeObjectIterator it(Heap::lo_space());
+ LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdateMapPointersInObject(obj);
+ UpdateMapPointersInObject(heap_, obj);
}
void Finish() {
- Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+ heap_->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
}
private:
+ Heap* heap_;
int live_maps_;
Address to_evacuate_start_;
MapIterator vacant_map_it_;
// Helper class for updating map pointers in HeapObjects.
class MapUpdatingVisitor: public ObjectVisitor {
public:
+ MapUpdatingVisitor() {}
+
void VisitPointer(Object** p) {
UpdateMapPointer(p);
}
}
};
- static MapUpdatingVisitor map_updating_visitor_;
-
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
HeapObject* next = it->next();
ASSERT(Map::kSize % 4 == 0);
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
- map_to_evacuate->address(),
- Map::kSize);
+ map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
+ vacant_map->address(), map_to_evacuate->address(), Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
return new_map;
}
- static int UpdateMapPointersInObject(HeapObject* obj) {
+ static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
ASSERT(!obj->IsMarked());
Map* map = obj->map();
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap->map_space()->Contains(map));
MapWord map_word = map->map_word();
ASSERT(!map_word.IsMarked());
if (map_word.IsOverflowed()) {
Map* new_map = GetForwardedMap(map_word);
- ASSERT(Heap::map_space()->Contains(new_map));
+ ASSERT(heap->map_space()->Contains(new_map));
obj->set_map(new_map);
#ifdef DEBUG
}
int size = obj->SizeFromMap(map);
- obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
+ MapUpdatingVisitor map_updating_visitor;
+ obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
return size;
}
- static void UpdateMapPointersInRange(Address start, Address end) {
+ static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
HeapObject* object;
int size;
for (Address current = start; current < end; current += size) {
object = HeapObject::FromAddress(current);
- size = UpdateMapPointersInObject(object);
+ size = UpdateMapPointersInObject(heap, object);
ASSERT(size > 0);
}
}
#endif
};
-MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
-
void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space());
- SweepSpace(Heap::old_data_space());
- SweepSpace(Heap::code_space());
- SweepSpace(Heap::cell_space());
+ SweepSpace(heap_, heap_->old_pointer_space());
+ SweepSpace(heap_, heap_->old_data_space());
+ SweepSpace(heap_, heap_->code_space());
+ SweepSpace(heap_, heap_->cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- SweepNewSpace(Heap::new_space());
+ SweepNewSpace(heap_, heap_->new_space());
}
- SweepSpace(Heap::map_space());
+ SweepSpace(heap_, heap_->map_space());
- Heap::IterateDirtyRegions(Heap::map_space(),
- &Heap::IteratePointersInDirtyMapsRegion,
- &UpdatePointerToNewGen,
- Heap::WATERMARK_SHOULD_BE_VALID);
+ heap_->IterateDirtyRegions(heap_->map_space(),
+ &heap_->IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ heap_->WATERMARK_SHOULD_BE_VALID);
- intptr_t live_maps_size = Heap::map_space()->Size();
+ intptr_t live_maps_size = heap_->map_space()->Size();
int live_maps = static_cast<int>(live_maps_size / Map::kSize);
ASSERT(live_map_objects_size_ == live_maps_size);
- if (Heap::map_space()->NeedsCompaction(live_maps)) {
- MapCompact map_compact(live_maps);
+ if (heap_->map_space()->NeedsCompaction(live_maps)) {
+ MapCompact map_compact(heap_, live_maps);
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
- if (space == Heap::map_space()) continue;
+ if (space == heap_->map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
map_compact.UpdateMapPointersInNewSpace();
int MarkCompactCollector::IterateLiveObjectsInRange(
Address start,
Address end,
- HeapObjectCallback size_func) {
+ LiveObjectCallback size_func) {
int live_objects_size = 0;
Address current = start;
while (current < end) {
} else if (encoded_map == kMultiFreeEncoding) {
current += Memory::int_at(current + kIntSize);
} else {
- int size = size_func(HeapObject::FromAddress(current));
+ int size = (this->*size_func)(HeapObject::FromAddress(current));
current += size;
live_objects_size += size;
}
}
-int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
- HeapObjectCallback size_f) {
+int MarkCompactCollector::IterateLiveObjects(
+ NewSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
}
-int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
- HeapObjectCallback size_f) {
+int MarkCompactCollector::IterateLiveObjects(
+ PagedSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
int total = 0;
PageIterator it(space, PageIterator::PAGES_IN_USE);
// Helper class for updating pointers in HeapObjects.
class UpdatingVisitor: public ObjectVisitor {
public:
+ explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointer(Object** p) {
UpdatePointer(p);
}
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
Address new_addr;
- ASSERT(!Heap::InFromSpace(obj));
+ ASSERT(!heap_->InFromSpace(obj));
- if (Heap::new_space()->Contains(obj)) {
+ if (heap_->new_space()->Contains(obj)) {
Address forwarding_pointer_addr =
- Heap::new_space()->FromSpaceLow() +
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ heap_->new_space()->FromSpaceLow() +
+ heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
new_addr = Memory::Address_at(forwarding_pointer_addr);
#ifdef DEBUG
- ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
- Heap::old_data_space()->Contains(new_addr) ||
- Heap::new_space()->FromSpaceContains(new_addr) ||
- Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
- if (Heap::new_space()->FromSpaceContains(new_addr)) {
- ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ ASSERT(heap_->old_pointer_space()->Contains(new_addr) ||
+ heap_->old_data_space()->Contains(new_addr) ||
+ heap_->new_space()->FromSpaceContains(new_addr) ||
+ heap_->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+
+ if (heap_->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
}
#endif
- } else if (Heap::lo_space()->Contains(obj)) {
+ } else if (heap_->lo_space()->Contains(obj)) {
// Don't move objects in the large object space.
return;
}
#endif
}
+
+ Heap* heap_;
};
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
state_ = UPDATE_POINTERS;
#endif
- UpdatingVisitor updating_visitor;
- RuntimeProfiler::UpdateSamplesAfterCompact(&updating_visitor);
- Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
- GlobalHandles::IterateWeakRoots(&updating_visitor);
+ UpdatingVisitor updating_visitor(heap_);
+ heap_->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+ &updating_visitor);
+ heap_->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+ heap_->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
// Update the pointer to the head of the weak list of global contexts.
- updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+ updating_visitor.VisitPointer(&heap_->global_contexts_list_);
LiveObjectList::IterateElements(&updating_visitor);
- int live_maps_size = IterateLiveObjects(Heap::map_space(),
- &UpdatePointersInOldObject);
- int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
- &UpdatePointersInOldObject);
- int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
- &UpdatePointersInOldObject);
- int live_codes_size = IterateLiveObjects(Heap::code_space(),
- &UpdatePointersInOldObject);
- int live_cells_size = IterateLiveObjects(Heap::cell_space(),
- &UpdatePointersInOldObject);
- int live_news_size = IterateLiveObjects(Heap::new_space(),
- &UpdatePointersInNewObject);
+ int live_maps_size = IterateLiveObjects(
+ heap_->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap_->old_pointer_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap_->old_data_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_codes_size = IterateLiveObjects(
+ heap_->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_cells_size = IterateLiveObjects(
+ heap_->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_news_size = IterateLiveObjects(
+ heap_->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
// Large objects do not move, the map word can be updated directly.
- LargeObjectIterator it(Heap::lo_space());
+ LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
UpdatePointersInNewObject(obj);
}
Address forwarded = GetForwardingAddressInOldSpace(old_map);
- ASSERT(Heap::map_space()->Contains(old_map));
- ASSERT(Heap::map_space()->Contains(forwarded));
+ ASSERT(heap_->map_space()->Contains(old_map));
+ ASSERT(heap_->map_space()->Contains(forwarded));
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
int obj_size = obj->SizeFromMap(old_map);
// Update pointers in the object body.
- UpdatingVisitor updating_visitor;
+ UpdatingVisitor updating_visitor(heap_);
obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
return obj_size;
}
int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
// Decode the map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
+ ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// At this point, the first word of map_addr is also encoded, cannot
// cast it to Map* using Map::cast.
#endif
// Update pointers in the object body.
- UpdatingVisitor updating_visitor;
+ UpdatingVisitor updating_visitor(heap_);
obj->IterateBody(type, obj_size, &updating_visitor);
return obj_size;
}
#endif
// Relocates objects, always relocate map objects first. Relocating
// objects in other space relies on map objects to get object size.
- int live_maps_size = IterateLiveObjects(Heap::map_space(),
- &RelocateMapObject);
- int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
- &RelocateOldPointerObject);
- int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
- &RelocateOldDataObject);
- int live_codes_size = IterateLiveObjects(Heap::code_space(),
- &RelocateCodeObject);
- int live_cells_size = IterateLiveObjects(Heap::cell_space(),
- &RelocateCellObject);
- int live_news_size = IterateLiveObjects(Heap::new_space(),
- &RelocateNewObject);
+ int live_maps_size = IterateLiveObjects(
+ heap_->map_space(), &MarkCompactCollector::RelocateMapObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap_->old_pointer_space(),
+ &MarkCompactCollector::RelocateOldPointerObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap_->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+ int live_codes_size = IterateLiveObjects(
+ heap_->code_space(), &MarkCompactCollector::RelocateCodeObject);
+ int live_cells_size = IterateLiveObjects(
+ heap_->cell_space(), &MarkCompactCollector::RelocateCellObject);
+ int live_news_size = IterateLiveObjects(
+ heap_->new_space(), &MarkCompactCollector::RelocateNewObject);
USE(live_maps_size);
USE(live_pointer_olds_size);
ASSERT(live_news_size == live_young_objects_size_);
// Flip from and to spaces
- Heap::new_space()->Flip();
+ heap_->new_space()->Flip();
- Heap::new_space()->MCCommitRelocationInfo();
+ heap_->new_space()->MCCommitRelocationInfo();
// Set age_mark to bottom in to space
- Address mark = Heap::new_space()->bottom();
- Heap::new_space()->set_age_mark(mark);
+ Address mark = heap_->new_space()->bottom();
+ heap_->new_space()->set_age_mark(mark);
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
- Heap::CheckNewSpaceExpansionCriteria();
- Heap::IncrementYoungSurvivorsCounter(live_news_size);
+ heap_->CheckNewSpaceExpansionCriteria();
+ heap_->IncrementYoungSurvivorsCounter(live_news_size);
}
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
+ ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- Map::kSize);
+ heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
#ifdef DEBUG
PagedSpace* space) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(map_addr));
+ Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
+ ASSERT(heap_->map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer.
Address new_addr = GetForwardingAddressInOldSpace(obj);
if (new_addr != old_addr) {
// Move contents.
- if (space == Heap::old_data_space()) {
- Heap::MoveBlock(new_addr, old_addr, obj_size);
+ if (space == heap_->old_data_space()) {
+ heap_->MoveBlock(new_addr, old_addr, obj_size);
} else {
- Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(SharedFunctionInfoMoveEvent(old_addr, new_addr));
+ PROFILE(heap_->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
+ return RelocateOldNonCodeObject(obj, heap_->old_pointer_space());
}
int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::old_data_space());
+ return RelocateOldNonCodeObject(obj, heap_->old_data_space());
}
int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::cell_space());
+ return RelocateOldNonCodeObject(obj, heap_->cell_space());
}
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
+ ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// Reset the map pointer.
- int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
+ int obj_size = RestoreMap(obj, heap_->code_space(), new_addr, map_addr);
Address old_addr = obj->address();
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(new_addr, old_addr, obj_size);
+ heap_->MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
- PROFILE(CodeMoveEvent(old_addr, new_addr));
+ PROFILE(heap_->isolate(), CodeMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
// Get forwarding address
Address old_addr = obj->address();
- int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ int offset = heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
Address new_addr =
- Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
+ Memory::Address_at(heap_->new_space()->FromSpaceLow() + offset);
#ifdef DEBUG
- if (Heap::new_space()->FromSpaceContains(new_addr)) {
- ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ if (heap_->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
- ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
- Heap::TargetSpace(obj) == Heap::old_data_space());
+ ASSERT(heap_->TargetSpace(obj) == heap_->old_pointer_space() ||
+ heap_->TargetSpace(obj) == heap_->old_data_space());
}
#endif
// New and old addresses cannot overlap.
- if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
- Heap::CopyBlock(new_addr, old_addr, obj_size);
+ if (heap_->InNewSpace(HeapObject::FromAddress(new_addr))) {
+ heap_->CopyBlock(new_addr, old_addr, obj_size);
} else {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap_->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
#ifdef DEBUG
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(SharedFunctionInfoMoveEvent(old_addr, new_addr));
+ PROFILE(heap_->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+ if (enable) {
+ if (code_flusher_ != NULL) return;
+ code_flusher_ = new CodeFlusher(heap_->isolate());
+ } else {
+ if (code_flusher_ == NULL) return;
+ delete code_flusher_;
+ code_flusher_ = NULL;
+ }
+}
+
+
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_GDB_JIT_INTERFACE
if (obj->IsCode()) {
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
- PROFILE(CodeDeleteEvent(obj->address()));
+ PROFILE(ISOLATE, CodeDeleteEvent(obj->address()));
}
#endif
}
#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_
+#include "spaces.h"
+
namespace v8 {
namespace internal {
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Forward declarations.
-class RootMarkingVisitor;
+class CodeFlusher;
+class GCTracer;
class MarkingVisitor;
+class RootMarkingVisitor;
+
+
+// ----------------------------------------------------------------------------
+// Marking stack for tracing live objects.
+
+class MarkingStack {
+ public:
+ MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
+
+ void Initialize(Address low, Address high) {
+ top_ = low_ = reinterpret_cast<HeapObject**>(low);
+ high_ = reinterpret_cast<HeapObject**>(high);
+ overflowed_ = false;
+ }
+
+ bool is_full() const { return top_ >= high_; }
+
+ bool is_empty() const { return top_ <= low_; }
+
+ bool overflowed() const { return overflowed_; }
+
+ void clear_overflowed() { overflowed_ = false; }
+
+ // Push the (marked) object on the marking stack if there is room,
+ // otherwise mark the object as overflowed and wait for a rescan of the
+ // heap.
+ void Push(HeapObject* object) {
+ CHECK(object->IsHeapObject());
+ if (is_full()) {
+ object->SetOverflow();
+ overflowed_ = true;
+ } else {
+ *(top_++) = object;
+ }
+ }
+
+ HeapObject* Pop() {
+ ASSERT(!is_empty());
+ HeapObject* object = *(--top_);
+ CHECK(object->IsHeapObject());
+ return object;
+ }
+
+ private:
+ HeapObject** low_;
+ HeapObject** top_;
+ HeapObject** high_;
+ bool overflowed_;
+
+ DISALLOW_COPY_AND_ASSIGN(MarkingStack);
+};
// -------------------------------------------------------------------------
//
// All methods are static.
-class MarkCompactCollector: public AllStatic {
+class OverflowedObjectsScanner;
+
+class MarkCompactCollector {
public:
// Type of functions to compute forwarding addresses of objects in
// compacted spaces. Given an object and its size, return a (non-failure)
// Object* that will be the object after forwarding. There is a separate
// allocation function for each (compactable) space based on the location
// of the object before compaction.
- typedef MaybeObject* (*AllocationFunction)(HeapObject* object,
+ typedef MaybeObject* (*AllocationFunction)(Heap* heap,
+ HeapObject* object,
int object_size);
// Type of functions to encode the forwarding address for an object.
// page as input, and is updated to contain the offset to be used for the
// next live object in the same page. For spaces using a different
// encoding (ie, contiguous spaces), the offset parameter is ignored.
- typedef void (*EncodingFunction)(HeapObject* old_object,
+ typedef void (*EncodingFunction)(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* offset);
// Type of functions to process non-live objects.
typedef void (*ProcessNonLiveFunction)(HeapObject* object);
+ // Pointer to member function, used in IterateLiveObjects.
+ typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
+
// Set the global force_compaction flag, it must be called before Prepare
// to take effect.
- static void SetForceCompaction(bool value) {
+ void SetForceCompaction(bool value) {
force_compaction_ = value;
}
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
- static void Prepare(GCTracer* tracer);
+ void Prepare(GCTracer* tracer);
// Performs a global garbage collection.
- static void CollectGarbage();
+ void CollectGarbage();
// True if the last full GC performed heap compaction.
- static bool HasCompacted() { return compacting_collection_; }
+ bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
- static bool IsCompacting() {
+ bool IsCompacting() {
#ifdef DEBUG
// For the purposes of asserts we don't want this to keep returning true
// after the collection is completed.
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).
- static int previous_marked_count() { return previous_marked_count_; }
+ int previous_marked_count() { return previous_marked_count_; }
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
- static GCTracer* tracer() { return tracer_; }
+ GCTracer* tracer() { return tracer_; }
#ifdef DEBUG
// Checks whether performing mark-compact collection.
- static bool in_use() { return state_ > PREPARE_GC; }
- static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+ bool in_use() { return state_ > PREPARE_GC; }
+ bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
// Determine type of object and emit deletion log event.
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
+ inline Heap* heap() const { return heap_; }
+
+ CodeFlusher* code_flusher() { return code_flusher_; }
+ inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
+ void EnableCodeFlushing(bool enable);
+
private:
+ MarkCompactCollector();
+ ~MarkCompactCollector();
+
#ifdef DEBUG
enum CollectorState {
IDLE,
};
// The current stage of the collector.
- static CollectorState state_;
+ CollectorState state_;
#endif
// Global flag that forces a compaction.
- static bool force_compaction_;
+ bool force_compaction_;
// Global flag indicating whether spaces were compacted on the last GC.
- static bool compacting_collection_;
+ bool compacting_collection_;
// Global flag indicating whether spaces will be compacted on the next GC.
- static bool compact_on_next_gc_;
+ bool compact_on_next_gc_;
// The number of objects left marked at the end of the last completed full
// GC (expected to be zero).
- static int previous_marked_count_;
+ int previous_marked_count_;
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
- static GCTracer* tracer_;
+ GCTracer* tracer_;
// Finishes GC, performs heap verification if enabled.
- static void Finish();
+ void Finish();
// -----------------------------------------------------------------------
// Phase 1: Marking live objects.
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- static void PrepareForCodeFlushing();
+ void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
- static void MarkLiveObjects();
+ void MarkLiveObjects();
- static void MarkUnmarkedObject(HeapObject* obj);
+ void MarkUnmarkedObject(HeapObject* obj);
- static inline void MarkObject(HeapObject* obj) {
+ inline void MarkObject(HeapObject* obj) {
if (!obj->IsMarked()) MarkUnmarkedObject(obj);
}
- static inline void SetMark(HeapObject* obj) {
- tracer_->increment_marked_count();
-#ifdef DEBUG
- UpdateLiveObjectCount(obj);
-#endif
- obj->SetMark();
- }
+ inline void SetMark(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
// in ClearNonLiveTransitions(). All JSObject maps
// connected by map transitions have the same prototype object, which
// is why we can use this field temporarily for back pointers.
- static void CreateBackPointers();
+ void CreateBackPointers();
// Mark a Map and its DescriptorArray together, skipping transitions.
- static void MarkMapContents(Map* map);
- static void MarkDescriptorArray(DescriptorArray* descriptors);
+ void MarkMapContents(Map* map);
+ void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
- static void MarkRoots(RootMarkingVisitor* visitor);
+ void MarkRoots(RootMarkingVisitor* visitor);
// Mark the symbol table specially. References to symbols from the
// symbol table are weak.
- static void MarkSymbolTable();
+ void MarkSymbolTable();
// Mark objects in object groups that have at least one object in the
// group marked.
- static void MarkObjectGroups();
+ void MarkObjectGroups();
// Mark objects in implicit references groups if their parent object
// is marked.
- static void MarkImplicitRefGroups();
+ void MarkImplicitRefGroups();
// Mark all objects which are reachable due to host application
// logic like object groups or implicit references' groups.
- static void ProcessExternalMarking();
+ void ProcessExternalMarking();
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
- static void ProcessMarkingStack();
+ void ProcessMarkingStack();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
- static void EmptyMarkingStack();
+ void EmptyMarkingStack();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
- static void RefillMarkingStack();
+ void RefillMarkingStack();
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
#ifdef DEBUG
- static void UpdateLiveObjectCount(HeapObject* obj);
+ void UpdateLiveObjectCount(HeapObject* obj);
#endif
// We sweep the large object space in the same way whether we are
// compacting or not, because the large object space is never compacted.
- static void SweepLargeObjectSpace();
+ void SweepLargeObjectSpace();
// Test whether a (possibly marked) object is a Map.
static inline bool SafeIsMap(HeapObject* object);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
- static void ClearNonLiveTransitions();
+ void ClearNonLiveTransitions();
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
// Encodes forwarding addresses of objects in compactable parts of the
// heap.
- static void EncodeForwardingAddresses();
+ void EncodeForwardingAddresses();
// Encodes the forwarding addresses of objects in new space.
- static void EncodeForwardingAddressesInNewSpace();
+ void EncodeForwardingAddressesInNewSpace();
// Function template to encode the forwarding addresses of objects in
// paged spaces, parameterized by allocation and non-live processing
// functions.
template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
- static void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
+ void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
// Iterates live objects in a space, passes live objects
// to a callback function which returns the heap size of the object.
// Returns the number of live objects iterated.
- static int IterateLiveObjects(NewSpace* space, HeapObjectCallback size_f);
- static int IterateLiveObjects(PagedSpace* space, HeapObjectCallback size_f);
+ int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
+ int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
// Iterates the live objects between a range of addresses, returning the
// number of live objects.
- static int IterateLiveObjectsInRange(Address start, Address end,
- HeapObjectCallback size_func);
+ int IterateLiveObjectsInRange(Address start, Address end,
+ LiveObjectCallback size_func);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
- static void SweepSpaces();
+ void SweepSpaces();
// -----------------------------------------------------------------------
// Phase 3: Updating pointers in live objects.
friend class UpdatingVisitor; // helper for updating visited objects
// Updates pointers in all spaces.
- static void UpdatePointers();
+ void UpdatePointers();
// Updates pointers in an object in new space.
// Returns the heap size of the object.
- static int UpdatePointersInNewObject(HeapObject* obj);
+ int UpdatePointersInNewObject(HeapObject* obj);
// Updates pointers in an object in old spaces.
// Returns the heap size of the object.
- static int UpdatePointersInOldObject(HeapObject* obj);
+ int UpdatePointersInOldObject(HeapObject* obj);
// Calculates the forwarding address of an object in an old space.
static Address GetForwardingAddressInOldSpace(HeapObject* obj);
// After: Objects have been moved to their new addresses.
// Relocates objects in all spaces.
- static void RelocateObjects();
+ void RelocateObjects();
// Converts a code object's inline target to addresses, convention from
// address to target happens in the marking phase.
- static int ConvertCodeICTargetToAddress(HeapObject* obj);
+ int ConvertCodeICTargetToAddress(HeapObject* obj);
// Relocate a map object.
- static int RelocateMapObject(HeapObject* obj);
+ int RelocateMapObject(HeapObject* obj);
// Relocates an old object.
- static int RelocateOldPointerObject(HeapObject* obj);
- static int RelocateOldDataObject(HeapObject* obj);
+ int RelocateOldPointerObject(HeapObject* obj);
+ int RelocateOldDataObject(HeapObject* obj);
// Relocate a property cell object.
- static int RelocateCellObject(HeapObject* obj);
+ int RelocateCellObject(HeapObject* obj);
// Helper function.
- static inline int RelocateOldNonCodeObject(HeapObject* obj,
- PagedSpace* space);
+ inline int RelocateOldNonCodeObject(HeapObject* obj,
+ PagedSpace* space);
// Relocates an object in the code space.
- static int RelocateCodeObject(HeapObject* obj);
+ int RelocateCodeObject(HeapObject* obj);
// Copy a new object.
- static int RelocateNewObject(HeapObject* obj);
+ int RelocateNewObject(HeapObject* obj);
#ifdef DEBUG
// -----------------------------------------------------------------------
// mark-sweep collection.
// Size of live objects in Heap::to_space_.
- static int live_young_objects_size_;
+ int live_young_objects_size_;
// Size of live objects in Heap::old_pointer_space_.
- static int live_old_pointer_objects_size_;
+ int live_old_pointer_objects_size_;
// Size of live objects in Heap::old_data_space_.
- static int live_old_data_objects_size_;
+ int live_old_data_objects_size_;
// Size of live objects in Heap::code_space_.
- static int live_code_objects_size_;
+ int live_code_objects_size_;
// Size of live objects in Heap::map_space_.
- static int live_map_objects_size_;
+ int live_map_objects_size_;
// Size of live objects in Heap::cell_space_.
- static int live_cell_objects_size_;
+ int live_cell_objects_size_;
// Size of live objects in Heap::lo_space_.
- static int live_lo_objects_size_;
+ int live_lo_objects_size_;
// Number of live bytes in this collection.
- static int live_bytes_;
+ int live_bytes_;
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
friend class UnmarkObjectVisitor;
static void UnmarkObject(HeapObject* obj);
#endif
+
+ Heap* heap_;
+ MarkingStack marking_stack_;
+ CodeFlusher* code_flusher_;
+
+ friend class Heap;
+ friend class OverflowedObjectsScanner;
};
#include "execution.h"
#include "messages.h"
#include "spaces-inl.h"
-#include "top.h"
namespace v8 {
namespace internal {
Vector< Handle<Object> > args,
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
- Handle<String> type_handle = Factory::LookupAsciiSymbol(type);
+ Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
Handle<FixedArray> arguments_elements =
- Factory::NewFixedArray(args.length());
+ FACTORY->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
arguments_elements->set(i, *args[i]);
}
Handle<JSArray> arguments_handle =
- Factory::NewJSArrayWithElements(arguments_elements);
+ FACTORY->NewJSArrayWithElements(arguments_elements);
int start = 0;
int end = 0;
- Handle<Object> script_handle = Factory::undefined_value();
+ Handle<Object> script_handle = FACTORY->undefined_value();
if (loc) {
start = loc->start_pos();
end = loc->end_pos();
}
Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Factory::undefined_value()
+ ? FACTORY->undefined_value()
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Factory::undefined_value()
+ ? FACTORY->undefined_value()
: Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message =
- Factory::NewJSMessageObject(type_handle,
+ FACTORY->NewJSMessageObject(type_handle,
arguments_handle,
start,
end,
Handle<Object> message) {
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- v8::NeanderArray global_listeners(Factory::message_listeners());
+ v8::NeanderArray global_listeners(FACTORY->message_listeners());
int global_length = global_listeners.length();
if (global_length == 0) {
DefaultMessageReport(loc, message);
Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
- Handle<String> fmt_str = Factory::LookupAsciiSymbol("FormatMessage");
+ Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
Handle<JSFunction> fun =
- Handle<JSFunction>(JSFunction::cast(
- Top::builtins()->GetPropertyNoExceptionThrown(*fmt_str)));
+ Handle<JSFunction>(
+ JSFunction::cast(
+ Isolate::Current()->js_builtins_object()->
+ GetPropertyNoExceptionThrown(*fmt_str)));
Object** argv[1] = { data.location() };
bool caught_exception;
Handle<Object> result =
- Execution::TryCall(fun, Top::builtins(), 1, argv, &caught_exception);
+ Execution::TryCall(fun,
+ Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
if (caught_exception || !result->IsString()) {
- return Factory::LookupAsciiSymbol("<error>");
+ return FACTORY->LookupAsciiSymbol("<error>");
}
Handle<String> result_string = Handle<String>::cast(result);
// A string that has been obtained from JS code in this way is
InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
// Boilerplate for exceptions for stack overflows. Used from
-// Top::StackOverflow().
+// Isolate::StackOverflow().
const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
+ bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
__ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
__ lw(a3, MemOperand(t0));
__ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
+ ExternalReference(Isolate::k_pending_exception_address));
__ lw(v0, MemOperand(t0));
__ sw(a3, MemOperand(t0));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ Branch(eq, throw_termination_exception,
- v0, Operand(Factory::termination_exception()));
+ v0, Operand(FACTORY->termination_exception()));
// Handle normal exception.
__ b(throw_normal_exception);
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
- __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ __ LoadExternalReference(t0,
+ ExternalReference(Isolate::k_c_entry_fp_address));
__ lw(t0, MemOperand(t0));
__ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
// Coming in here the fp will be invalid because the PushTryHandler below
// sets it to 0 to signal the existence of the JSEntry frame.
__ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
+ ExternalReference(Isolate::k_pending_exception_address));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
__ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
__ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
__ lw(t1, MemOperand(t0));
__ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
+ ExternalReference(Isolate::k_pending_exception_address));
__ sw(t1, MemOperand(t0));
// Invoke the function by calling through JS entry trampoline builtin.
// displacement since the current stack pointer (sp) points directly
// to the stack handler.
__ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
+ __ LoadExternalReference(t0, ExternalReference(Isolate::k_handler_address));
__ sw(t1, MemOperand(t0));
// This restores sp to its position before PushTryHandler.
__ bind(&exit); // v0 holds result
// Restore the top frame descriptors from the stack.
__ Pop(t1);
- __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ __ LoadExternalReference(t0,
+ ExternalReference(Isolate::k_c_entry_fp_address));
__ sw(t1, MemOperand(t0));
// Reset the stack to the callee saved registers.
const char* NameConverter::NameOfAddress(byte_* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
-
-
-void FastCodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
li(t0, Operand(StackHandler::ENTRY));
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
}
// Save the frame pointer and the context in top.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address));
sw(fp, MemOperand(t0));
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+ LoadExternalReference(t0, ExternalReference(Isolate::k_context_address));
sw(cp, MemOperand(t0));
// Setup argc and the builtin function in callee-saved registers.
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
// Clear top frame.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address));
sw(zero_reg, MemOperand(t0));
// Restore current context from top and clear it in debug mode.
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+ LoadExternalReference(t0, ExternalReference(Isolate::k_context_address));
lw(cp, MemOperand(t0));
#ifdef DEBUG
sw(a3, MemOperand(t0));
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Bootstrapper::NativesSourceLookup(i);
+ i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
context.Dispose();
CppByteSink sink(argv[1]);
void HeapObject::VerifyHeapPointer(Object* p) {
ASSERT(p->IsHeapObject());
- ASSERT(Heap::Contains(HeapObject::cast(p)));
+ ASSERT(HEAP->Contains(HeapObject::cast(p)));
}
map()->NextFreePropertyIndex()));
}
ASSERT(map()->has_fast_elements() ==
- (elements()->map() == Heap::fixed_array_map() ||
- elements()->map() == Heap::fixed_cow_array_map()));
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
}
void Map::MapVerify() {
- ASSERT(!Heap::InNewSpace(this));
+ ASSERT(!HEAP->InNewSpace(this));
ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
ASSERT(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
- instance_size() < Heap::Capacity()));
+ instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
}
void Map::SharedMapVerify() {
MapVerify();
ASSERT(is_shared());
- ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
+ ASSERT_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
ASSERT_EQ(0, pre_allocated_property_fields());
ASSERT_EQ(0, unused_property_fields());
ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
if (IsSymbol()) {
- CHECK(!Heap::InNewSpace(this));
+ CHECK(!HEAP->InNewSpace(this));
}
}
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- ASSERT(number == Heap::nan_value());
+ ASSERT(number == HEAP->nan_value());
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
+ Heap* heap = HEAP;
for (int i = 0; i < len; i++) {
- if (e->get(i) == Heap::the_hole_value()) holes++;
+ if (e->get(i) == heap->the_hole_value()) holes++;
}
info->number_of_fast_used_elements_ += len - holes;
info->number_of_fast_unused_elements_ += holes;
#include "conversions-inl.h"
#include "heap.h"
#include "memory.h"
+#include "isolate.h"
#include "property.h"
#include "spaces.h"
type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(this, offset, mode); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); \
+ }
+
+
+// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
+#define ACCESSORS_GCSAFE(holder, name, type, offset) \
+ type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode); \
}
bool MaybeObject::IsTheHole() {
- return this == Heap::the_hole_value();
+ return !IsFailure() && ToObjectUnchecked()->IsTheHole();
}
bool Object::IsContext() {
- return Object::IsHeapObject()
- && (HeapObject::cast(this)->map() == Heap::context_map() ||
- HeapObject::cast(this)->map() == Heap::catch_context_map() ||
- HeapObject::cast(this)->map() == Heap::global_context_map());
+ if (Object::IsHeapObject()) {
+ Heap* heap = HeapObject::cast(this)->GetHeap();
+ return (HeapObject::cast(this)->map() == heap->context_map() ||
+ HeapObject::cast(this)->map() == heap->catch_context_map() ||
+ HeapObject::cast(this)->map() == heap->global_context_map());
+ }
+ return false;
}
bool Object::IsCatchContext() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::catch_context_map();
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->catch_context_map();
}
bool Object::IsGlobalContext() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::global_context_map();
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->global_context_map();
}
bool Object::IsOddball() {
+ ASSERT(HEAP->is_safe_to_read_maps());
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
}
bool Object::IsBoolean() {
- return IsTrue() || IsFalse();
+ return IsOddball() &&
+ ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
}
bool Object::IsHashTable() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::hash_table_map();
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->hash_table_map();
}
bool Object::IsDictionary() {
- return IsHashTable() && this != Heap::symbol_table();
+ return IsHashTable() && this !=
+ HeapObject::cast(this)->GetHeap()->symbol_table();
}
bool Object::IsSymbolTable() {
- return IsHashTable() && this == Heap::raw_unchecked_symbol_table();
+ return IsHashTable() && this ==
+ HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
}
bool Object::IsUndefined() {
- return this == Heap::undefined_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
}
bool Object::IsNull() {
- return this == Heap::null_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
+}
+
+
+bool Object::IsTheHole() {
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
}
bool Object::IsTrue() {
- return this == Heap::true_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
bool Object::IsFalse() {
- return this == Heap::false_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
}
bool Object::IsArgumentsMarker() {
- return this == Heap::arguments_marker();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
}
}
-
MaybeObject* Object::ToSmi() {
if (IsSmi()) return this;
if (IsHeapNumber()) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
- ASSERT(Heap::IsAllocationAllowed());
+ ASSERT(HEAP->IsAllocationAllowed());
return GetElementWithReceiver(this, index);
}
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-
+// TODO(isolates): Pass heap in to these macros.
#define WRITE_BARRIER(object, offset) \
- Heap::RecordWrite(object->address(), offset);
+ object->GetHeap()->RecordWrite(object->address(), offset);
// CONDITIONAL_WRITE_BARRIER must be issued after the actual
// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(object, offset, mode) \
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
if (mode == UPDATE_WRITE_BARRIER) { \
- Heap::RecordWrite(object->address(), offset); \
+ heap->RecordWrite(object->address(), offset); \
} else { \
ASSERT(mode == SKIP_WRITE_BARRIER); \
- ASSERT(Heap::InNewSpace(object) || \
- !Heap::InNewSpace(READ_FIELD(object, offset)) || \
+ ASSERT(heap->InNewSpace(object) || \
+ !heap->InNewSpace(READ_FIELD(object, offset)) || \
Page::FromAddress(object->address())-> \
IsRegionDirty(object->address() + offset)); \
}
#endif
+Heap* HeapObject::GetHeap() {
+ // During GC, the map pointer in HeapObject is used in various ways that
+ // prevent us from retrieving Heap from the map.
+ // Assert that we are not in GC, implement GC code in a way that it doesn't
+ // pull heap from the map.
+ ASSERT(HEAP->is_safe_to_read_maps());
+ return map()->heap();
+}
+
+
+Isolate* HeapObject::GetIsolate() {
+ Isolate* i = GetHeap()->isolate();
+ ASSERT(i == Isolate::Current());
+ return i;
+}
+
+
Map* HeapObject::map() {
return map_word().ToMap();
}
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
ASSERT(map()->has_fast_elements() ==
- (value->map() == Heap::fixed_array_map() ||
- value->map() == Heap::fixed_cow_array_map()));
+ (value->map() == GetHeap()->fixed_array_map() ||
+ value->map() == GetHeap()->fixed_cow_array_map()));
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsExternalArray());
WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
}
void JSObject::initialize_properties() {
- ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
- WRITE_FIELD(this, kPropertiesOffset, Heap::empty_fixed_array());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
}
void JSObject::initialize_elements() {
ASSERT(map()->has_fast_elements());
- ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
}
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+byte Oddball::kind() {
+ return READ_BYTE_FIELD(this, kKindOffset);
+}
+
+
+void Oddball::set_kind(byte value) {
+ WRITE_BYTE_FIELD(this, kKindOffset, value);
+}
+
+
Object* JSGlobalPropertyCell::value() {
return READ_FIELD(this, kValueOffset);
}
ASSERT(index < 0);
int offset = map()->instance_size() + (index * kPointerSize);
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
return value;
}
void JSObject::InitializeBody(int object_size, Object* value) {
- ASSERT(!value->IsHeapObject() || !Heap::InNewSpace(value));
+ ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
void Struct::InitializeBody(int object_size) {
- Object* value = Heap::undefined_value();
+ Object* value = GetHeap()->undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
void FixedArray::set(int index, Object* value) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
- if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
}
void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
- ASSERT(array->map() != Heap::raw_unchecked_fixed_cow_array_map());
+ ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!HEAP->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
+ set_undefined(GetHeap(), index);
+}
+
+
+void FixedArray::set_undefined(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::undefined_value()));
+ ASSERT(!heap->InNewSpace(heap->undefined_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- Heap::undefined_value());
+ heap->undefined_value());
}
void FixedArray::set_null(int index) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ set_null(GetHeap(), index);
+}
+
+
+void FixedArray::set_null(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
+ ASSERT(!heap->InNewSpace(heap->null_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
}
void FixedArray::set_the_hole(int index) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::the_hole_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::the_hole_value());
+ ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->the_hole_value());
}
}
-void FixedArray::set_unchecked(int index,
+void FixedArray::set_unchecked(Heap* heap,
+ int index,
Object* value,
WriteBarrierMode mode) {
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
}
-void FixedArray::set_null_unchecked(int index) {
+void FixedArray::set_null_unchecked(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
+ ASSERT(!HEAP->InNewSpace(heap->null_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
}
bool DescriptorArray::IsEmpty() {
- ASSERT(this == Heap::empty_descriptor_array() ||
- this->length() > 2);
- return this == Heap::empty_descriptor_array();
+ ASSERT(this->length() > kFirstIndex ||
+ this == HEAP->empty_descriptor_array());
+ return length() <= kFirstIndex;
}
int DescriptorArray::SearchWithCache(String* name) {
- int number = DescriptorLookupCache::Lookup(this, name);
+ int number = GetIsolate()->descriptor_lookup_cache()->Lookup(this, name);
if (number == DescriptorLookupCache::kAbsent) {
number = Search(name);
- DescriptorLookupCache::Update(this, name, number);
+ GetIsolate()->descriptor_lookup_cache()->Update(this, name, number);
}
return number;
}
ASSERT(descriptor_number < number_of_descriptors());
// Make sure none of the elements in desc are in new space.
- ASSERT(!Heap::InNewSpace(desc->GetKey()));
- ASSERT(!Heap::InNewSpace(desc->GetValue()));
+ ASSERT(!HEAP->InNewSpace(desc->GetKey()));
+ ASSERT(!HEAP->InNewSpace(desc->GetValue()));
fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
FixedArray* content_array = GetContentArray();
}
+template<typename Shape, typename Key>
+int HashTable<Shape, Key>::FindEntry(Key key) {
+ return FindEntry(GetIsolate(), key);
+}
+
+
+// Find entry for key otherwise return kNotFound.
+template<typename Shape, typename Key>
+int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ while (true) {
+ Object* element = KeyAt(entry);
+ if (element == isolate->heap()->undefined_value()) break; // Empty entry.
+ if (element != isolate->heap()->null_value() &&
+ Shape::IsMatch(key, element)) return entry;
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return kNotFound;
+}
+
+
bool NumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
void ConsString::set_first(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
}
void ConsString::set_second(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
}
int cache_size = size();
Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
MemsetPointer(entries_start,
- Heap::the_hole_value(),
+ GetHeap()->the_hole_value(),
cache_size - kEntriesIndex);
MakeZeroSize();
}
}
+Heap* Map::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
return HeapObject::
FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
ASSERT(value->IsNull() || value->IsJSObject());
WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
}
}
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(true);
- Counters::map_slow_to_fast_elements.Increment();
+ COUNTERS->map_slow_to_fast_elements()->Increment();
return new_map;
}
}
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(false);
- Counters::map_fast_to_slow_elements.Increment();
+ COUNTERS->map_fast_to_slow_elements()->Increment();
return new_map;
}
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(false);
new_map->set_has_external_array_elements(true);
- Counters::map_to_external_array_elements.Increment();
+ COUNTERS->map_to_external_array_elements()->Increment();
return new_map;
}
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
+ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
+ kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
- return initial_map() != Heap::undefined_value();
+ return initial_map() != HEAP->undefined_value();
}
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
+ ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
}
void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
WriteBarrierMode mode) {
WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(this, kScopeInfoOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
}
bool SharedFunctionInfo::is_compiled() {
- return code() != Builtins::builtin(Builtins::LazyCompile);
+ return code() !=
+ Isolate::Current()->builtins()->builtin(Builtins::LazyCompile);
}
bool JSFunction::IsMarkedForLazyRecompilation() {
- return code() == Builtins::builtin(Builtins::LazyRecompile);
+ return code() == GetIsolate()->builtins()->builtin(Builtins::LazyRecompile);
}
void JSFunction::set_code(Code* value) {
// Skip the write barrier because code is never in new space.
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!HEAP->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
void JSFunction::set_context(Object* value) {
- ASSERT(value == Heap::undefined_value() || value->IsContext());
+ ASSERT(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
WRITE_BARRIER(this, kContextOffset);
}
bool JSFunction::is_compiled() {
- return code() != Builtins::builtin(Builtins::LazyCompile);
+ return code() != GetIsolate()->builtins()->builtin(Builtins::LazyCompile);
}
Code* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!HEAP->InNewSpace(value));
}
JSObject::ElementsKind JSObject::GetElementsKind() {
if (map()->has_fast_elements()) {
- ASSERT(elements()->map() == Heap::fixed_array_map() ||
- elements()->map() == Heap::fixed_cow_array_map());
+ ASSERT(elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map());
return FAST_ELEMENTS;
}
HeapObject* array = elements();
ASSERT(array->IsDictionary());
return DICTIONARY_ELEMENTS;
}
+ ASSERT(!map()->has_fast_elements());
if (array->IsExternalArray()) {
switch (array->map()->instance_type()) {
case EXTERNAL_BYTE_ARRAY_TYPE:
MaybeObject* JSObject::EnsureWritableFastElements() {
ASSERT(HasFastElements());
FixedArray* elems = FixedArray::cast(elements());
- if (elems->map() != Heap::fixed_cow_array_map()) return elems;
+ Isolate* isolate = GetIsolate();
+ if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
Object* writable_elems;
- { MaybeObject* maybe_writable_elems =
- Heap::CopyFixedArrayWithMap(elems, Heap::fixed_array_map());
+ { MaybeObject* maybe_writable_elems = isolate->heap()->CopyFixedArrayWithMap(
+ elems, isolate->heap()->fixed_array_map());
if (!maybe_writable_elems->ToObject(&writable_elems)) {
return maybe_writable_elems;
}
}
set_elements(FixedArray::cast(writable_elems));
- Counters::cow_arrays_converted.Increment();
+ isolate->counters()->cow_arrays_converted()->Increment();
return writable_elems;
}
Object* JSObject::BypassGlobalProxy() {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return Heap::undefined_value();
+ if (proto->IsNull()) return GetHeap()->undefined_value();
ASSERT(proto->IsJSGlobalObject());
return proto;
}
bool JSObject::HasHiddenPropertiesObject() {
ASSERT(!IsJSGlobalProxy());
return GetPropertyAttributePostInterceptor(this,
- Heap::hidden_symbol(),
+ GetHeap()->hidden_symbol(),
false) != ABSENT;
}
// object.
Object* result =
GetLocalPropertyPostInterceptor(this,
- Heap::hidden_symbol(),
+ GetHeap()->hidden_symbol(),
&attributes)->ToObjectUnchecked();
return result;
}
MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
ASSERT(!IsJSGlobalProxy());
- return SetPropertyPostInterceptor(Heap::hidden_symbol(),
+ return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
hidden_obj,
DONT_ENUM,
kNonStrictMode);
}
-void Map::ClearCodeCache() {
+bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
+ ASSERT(other->IsNumber());
+ return key == static_cast<uint32_t>(other->Number());
+}
+
+
+uint32_t NumberDictionaryShape::Hash(uint32_t key) {
+ return ComputeIntegerHash(key);
+}
+
+
+uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
+ ASSERT(other->IsNumber());
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
+}
+
+
+MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
+ return Isolate::Current()->heap()->NumberFromUint32(key);
+}
+
+
+bool StringDictionaryShape::IsMatch(String* key, Object* other) {
+ // We know that all entries in a hash table had their hash keys created.
+ // Use that knowledge to have fast failure.
+ if (key->Hash() != String::cast(other)->Hash()) return false;
+ return key->Equals(String::cast(other));
+}
+
+
+uint32_t StringDictionaryShape::Hash(String* key) {
+ return key->Hash();
+}
+
+
+uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
+ return String::cast(other)->Hash();
+}
+
+
+MaybeObject* StringDictionaryShape::AsObject(String* key) {
+ return key;
+}
+
+
+void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
- ASSERT(!Heap::InNewSpace(Heap::raw_unchecked_empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, Heap::raw_unchecked_empty_fixed_array());
+ ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
+ WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
}
// constantly growing.
Expand(required_size + (required_size >> 3));
// It's a performance benefit to keep a frequently used array in new-space.
- } else if (!Heap::new_space()->Contains(elts) &&
+ } else if (!GetHeap()->new_space()->Contains(elts) &&
required_size < kArraySizeThatFitsComfortablyInNewSpace) {
// Expand will allocate a new backing store in new space even if the size
// we asked for isn't larger than what we had before.
MaybeObject* FixedArray::Copy() {
if (length() == 0) return this;
- return Heap::CopyFixedArray(this);
+ return GetHeap()->CopyFixedArray(this);
+}
+
+
+Relocatable::Relocatable(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ isolate_ = isolate;
+ prev_ = isolate->relocatable_top();
+ isolate->set_relocatable_top(this);
+}
+
+
+Relocatable::~Relocatable() {
+ ASSERT(isolate_ == Isolate::Current());
+ ASSERT_EQ(isolate_->relocatable_top(), this);
+ isolate_->set_relocatable_top(prev_);
}
template<typename StaticVisitor>
class BodyVisitorBase : public AllStatic {
public:
- INLINE(static void IteratePointers(HeapObject* object,
+ INLINE(static void IteratePointers(Heap* heap,
+ HeapObject* object,
int start_offset,
int end_offset)) {
Object** start_slot = reinterpret_cast<Object**>(object->address() +
start_offset);
Object** end_slot = reinterpret_cast<Object**>(object->address() +
end_offset);
- StaticVisitor::VisitPointers(start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
}
};
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- object, BodyDescriptor::kStartOffset, object_size);
+ map->heap(),
+ object,
+ BodyDescriptor::kStartOffset,
+ object_size);
return static_cast<ReturnType>(object_size);
}
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- object, BodyDescriptor::kStartOffset, object_size);
+ map->heap(),
+ object,
+ BodyDescriptor::kStartOffset,
+ object_size);
return static_cast<ReturnType>(object_size);
}
};
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
BodyVisitorBase<StaticVisitor>::IteratePointers(
- object, BodyDescriptor::kStartOffset, BodyDescriptor::kEndOffset);
+ map->heap(),
+ object,
+ BodyDescriptor::kStartOffset,
+ BodyDescriptor::kEndOffset);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
return table_.GetVisitor(map)(map, obj);
}
- static inline void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(p);
+ static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
}
private:
template<typename StaticVisitor>
-void Code::CodeIterateBody() {
+void Code::CodeIterateBody(Heap* heap) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
RelocIterator it(this, mode_mask);
StaticVisitor::VisitPointer(
+ heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
StaticVisitor::VisitPointer(
+ heap,
reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>();
+ it.rinfo()->template Visit<StaticVisitor>(heap);
}
}
#include "disassembler.h"
#endif
-
namespace v8 {
namespace internal {
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(constructor);
+ { MaybeObject* maybe_result =
+ constructor->GetHeap()->AllocateJSObject(constructor);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSValue::cast(result)->set_value(value);
MaybeObject* Object::ToObject() {
- Context* global_context = Top::context()->global_context();
if (IsJSObject()) {
return this;
} else if (IsNumber()) {
+ Isolate* isolate = Isolate::Current();
+ Context* global_context = isolate->context()->global_context();
return CreateJSValue(global_context->number_function(), this);
} else if (IsBoolean()) {
+ Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+ Context* global_context = isolate->context()->global_context();
return CreateJSValue(global_context->boolean_function(), this);
} else if (IsString()) {
+ Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+ Context* global_context = isolate->context()->global_context();
return CreateJSValue(global_context->string_function(), this);
}
Object* Object::ToBoolean() {
- if (IsTrue()) return Heap::true_value();
- if (IsFalse()) return Heap::false_value();
+ if (IsTrue()) return this;
+ if (IsFalse()) return this;
if (IsSmi()) {
- return Heap::ToBoolean(Smi::cast(this)->value() != 0);
+ return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
+ }
+ if (IsUndefined() || IsNull()) {
+ return HeapObject::cast(this)->GetHeap()->false_value();
}
- if (IsUndefined() || IsNull()) return Heap::false_value();
// Undetectable object is false
if (IsUndetectableObject()) {
- return Heap::false_value();
+ return HeapObject::cast(this)->GetHeap()->false_value();
}
if (IsString()) {
- return Heap::ToBoolean(String::cast(this)->length() != 0);
+ return HeapObject::cast(this)->GetHeap()->ToBoolean(
+ String::cast(this)->length() != 0);
}
if (IsHeapNumber()) {
return HeapNumber::cast(this)->HeapNumberToBoolean();
}
- return Heap::true_value();
+ return Isolate::Current()->heap()->true_value();
}
void Object::Lookup(String* name, LookupResult* result) {
if (IsJSObject()) return JSObject::cast(this)->Lookup(name, result);
Object* holder = NULL;
- Context* global_context = Top::context()->global_context();
if (IsString()) {
+ Heap* heap = HeapObject::cast(this)->GetHeap();
+ Context* global_context = heap->isolate()->context()->global_context();
holder = global_context->string_function()->instance_prototype();
} else if (IsNumber()) {
+ Heap* heap = Isolate::Current()->heap();
+ Context* global_context = heap->isolate()->context()->global_context();
holder = global_context->number_function()->instance_prototype();
} else if (IsBoolean()) {
+ Heap* heap = HeapObject::cast(this)->GetHeap();
+ Context* global_context = heap->isolate()->context()->global_context();
holder = global_context->boolean_function()->instance_prototype();
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
Object* structure,
String* name,
Object* holder) {
+ Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually proxy
// callbacks should be phased out.
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
MaybeObject* value = (callback->getter)(receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value;
}
JSObject* self = JSObject::cast(receiver);
JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
- LOG(ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(data->data(), self, holder_handle);
+ LOG(isolate, ApiNamedPropertyAccess("load", self, name));
+ CustomArguments args(isolate, data->data(), self, holder_handle);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = call_fun(v8::Utils::ToLocal(key), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
- if (result.IsEmpty()) return Heap::undefined_value();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsEmpty()) {
+ return isolate->heap()->undefined_value();
+ }
return *v8::Utils::OpenHandle(*result);
}
JSFunction::cast(getter));
}
// Getter is not a function.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
UNREACHABLE();
Handle<JSFunction> fun(JSFunction::cast(getter));
Handle<Object> self(receiver);
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = fun->GetHeap()->isolate()->debug();
// Handle stepping into a getter if step into is active.
- if (Debug::StepInActive()) {
- Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
}
#endif
bool has_pending_exception;
LookupResult* result,
String* name,
PropertyAttributes* attributes) {
+ Heap* heap = name->GetHeap();
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// No accessible property found.
*attributes = ABSENT;
- Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return Heap::undefined_value();
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ return heap->undefined_value();
}
LookupResult* result,
String* name,
bool continue_search) {
+ Heap* heap = name->GetHeap();
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
}
}
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return ABSENT;
}
Object* value,
PropertyDetails details) {
ASSERT(!HasFastProperties());
+ Heap* heap = name->GetHeap();
int entry = property_dictionary()->FindEntry(name);
if (entry == StringDictionary::kNotFound) {
Object* store_value = value;
if (IsGlobalObject()) {
- { MaybeObject* maybe_store_value =
- Heap::AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) {
- return maybe_store_value;
- }
- }
+ MaybeObject* maybe_store_value =
+ heap->AllocateJSGlobalPropertyCell(value);
+ if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
Object* dict;
{ MaybeObject* maybe_dict =
MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
ASSERT(!HasFastProperties());
+ Heap* heap = GetHeap();
StringDictionary* dictionary = property_dictionary();
int entry = dictionary->FindEntry(name);
if (entry != StringDictionary::kNotFound) {
if (IsGlobalObject()) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsDontDelete()) {
- if (mode != FORCE_DELETION) return Heap::false_value();
+ if (mode != FORCE_DELETION) return heap->false_value();
// When forced to delete global properties, we have to make a
// map change to invalidate any ICs that think they can load
// from the DontDelete cell without checking if it contains
}
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(Heap::the_hole_value());
+ cell->set_value(heap->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
return dictionary->DeleteProperty(entry, mode);
}
}
- return Heap::true_value();
+ return heap->true_value();
}
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
+ Heap* heap = name->GetHeap();
// Traverse the prototype chain from the current object (this) to
// the holder and check for access rights. This avoid traversing the
// holder will always be the interceptor holder and the search may
// only continue with a current object just after the interceptor
// holder in the prototype chain.
- Object* last = result->IsProperty() ? result->holder() : Heap::null_value();
+ Object* last = result->IsProperty() ? result->holder() : heap->null_value();
for (Object* current = this; true; current = current->GetPrototype()) {
if (current->IsAccessCheckNeeded()) {
// Check if we're allowed to read from the current object. Note
// property from the current object, we still check that we have
// access to it.
JSObject* checked = JSObject::cast(current);
- if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+ if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
return checked->GetPropertyWithFailedAccessCheck(receiver,
result,
name,
if (!result->IsProperty()) {
*attributes = ABSENT;
- return Heap::undefined_value();
+ return heap->undefined_value();
}
*attributes = result->GetAttributes();
Object* value;
case NORMAL:
value = holder->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? Heap::undefined_value() : value;
+ return value->IsTheHole() ? heap->undefined_value() : value;
case FIELD:
value = holder->FastPropertyAt(result->GetFieldIndex());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? Heap::undefined_value() : value;
+ return value->IsTheHole() ? heap->undefined_value() : value;
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS:
}
Object* holder = NULL;
- Context* global_context = Top::context()->global_context();
+ Context* global_context = Isolate::Current()->context()->global_context();
if (IsString()) {
holder = global_context->string_function()->instance_prototype();
} else if (IsNumber()) {
} else {
// Undefined and null have no indexed properties.
ASSERT(IsUndefined() || IsNull());
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
Object* Object::GetPrototype() {
// The object is either a number, a string, a boolean, or a real JS object.
if (IsJSObject()) return JSObject::cast(this)->map()->prototype();
- Context* context = Top::context()->global_context();
+ Heap* heap = Isolate::Current()->heap();
+ Context* context = heap->isolate()->context()->global_context();
if (IsNumber()) return context->number_function()->instance_prototype();
if (IsString()) return context->string_function()->instance_prototype();
if (IsBoolean()) {
return context->boolean_function()->instance_prototype();
} else {
- return Heap::null_value();
+ return heap->null_value();
}
}
// allowed. This is to avoid an assertion failure when allocating.
// Flattening strings is the only case where we always allow
// allocation because no GC is performed if the allocation fails.
- if (!Heap::IsAllocationAllowed()) return this;
+ if (!HEAP->IsAllocationAllowed()) return this;
#endif
+ Heap* heap = GetHeap();
switch (StringShape(this).representation_tag()) {
case kConsStringTag: {
ConsString* cs = ConsString::cast(this);
// There's little point in putting the flat string in new space if the
// cons string is in old space. It can never get GCed until there is
// an old space GC.
- PretenureFlag tenure = Heap::InNewSpace(this) ? pretenure : TENURED;
+ PretenureFlag tenure = heap->InNewSpace(this) ? pretenure : TENURED;
int len = length();
Object* object;
String* result;
if (IsAsciiRepresentation()) {
- { MaybeObject* maybe_object = Heap::AllocateRawAsciiString(len, tenure);
+ { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
len - first_length);
} else {
{ MaybeObject* maybe_object =
- Heap::AllocateRawTwoByteString(len, tenure);
+ heap->AllocateRawTwoByteString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
len - first_length);
}
cs->set_first(result);
- cs->set_second(Heap::empty_string());
+ cs->set_second(heap->empty_string());
return result;
}
default:
resource->length() * sizeof(smart_chars[0])) == 0);
}
#endif // DEBUG
-
+ Heap* heap = GetHeap();
int size = this->Size(); // Byte size of the original string.
if (size < ExternalString::kSize) {
// The string is too small to fit an external String in its place. This can
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
this->set_map(is_ascii ?
- Heap::external_string_with_ascii_data_map() :
- Heap::external_string_map());
+ heap->external_string_with_ascii_data_map() :
+ heap->external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length);
self->set_hash_field(hash_field);
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
this->set_map(is_ascii ?
- Heap::external_symbol_with_ascii_data_map() :
- Heap::external_symbol_map());
+ heap->external_symbol_with_ascii_data_map() :
+ heap->external_symbol_map());
}
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
- Heap::CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
return true;
}
resource->length() * sizeof(smart_chars[0])) == 0);
}
#endif // DEBUG
-
+ Heap* heap = GetHeap();
int size = this->Size(); // Byte size of the original string.
if (size < ExternalString::kSize) {
// The string is too small to fit an external String in its place. This can
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(Heap::external_ascii_string_map());
+ this->set_map(heap->external_ascii_string_map());
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_length(length);
self->set_hash_field(hash_field);
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- this->set_map(Heap::external_ascii_symbol_map());
+ this->set_map(heap->external_ascii_symbol_map());
}
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
- Heap::CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
return true;
}
// All other JSObjects are rather similar to each other (JSObject,
// JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
default: {
+ Heap* heap = GetHeap();
Object* constructor = map()->constructor();
bool printed = false;
if (constructor->IsHeapObject() &&
- !Heap::Contains(HeapObject::cast(constructor))) {
+ !heap->Contains(HeapObject::cast(constructor))) {
accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
} else {
bool global_object = IsJSGlobalProxy();
if (constructor->IsJSFunction()) {
- if (!Heap::Contains(JSFunction::cast(constructor)->shared())) {
+ if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
Object* constructor_name =
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
- // if (!Heap::InNewSpace(this)) PrintF("*", this);
- if (!Heap::Contains(this)) {
+ // if (!HEAP->InNewSpace(this)) PrintF("*", this);
+ Heap* heap = GetHeap();
+ if (!heap->Contains(this)) {
accumulator->Add("!!!INVALID POINTER!!!");
return;
}
- if (!Heap::Contains(map())) {
+ if (!heap->Contains(map())) {
accumulator->Add("!!!INVALID MAP!!!");
return;
}
if (u.bits.exp == 2047) {
// Detect NaN for IEEE double precision floating point.
if ((u.bits.man_low | u.bits.man_high) != 0)
- return Heap::false_value();
+ return GetHeap()->false_value();
}
if (u.bits.exp == 0) {
// Detect +0, and -0 for IEEE double precision floating point.
if ((u.bits.man_low | u.bits.man_high) == 0)
- return Heap::false_value();
+ return GetHeap()->false_value();
}
- return Heap::true_value();
+ return GetHeap()->true_value();
}
String* JSObject::class_name() {
if (IsJSFunction()) {
- return Heap::function_class_symbol();
+ return GetHeap()->function_class_symbol();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
return String::cast(constructor->shared()->instance_class_name());
}
// If the constructor is not present, return "Object".
- return Heap::Object_symbol();
+ return GetHeap()->Object_symbol();
}
if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
}
// If the constructor is not present, return "Object".
- return Heap::Object_symbol();
+ return GetHeap()->Object_symbol();
}
// Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier.
+ Isolate* isolate = GetHeap()->isolate();
StringInputBuffer buffer(name);
- if (!ScannerConstants::IsIdentifier(&buffer)
- && name != Heap::hidden_symbol()) {
+ if (!isolate->scanner_constants()->IsIdentifier(&buffer)
+ && name != HEAP->hidden_symbol()) {
Object* obj;
{ MaybeObject* maybe_obj =
NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
// global object_function's map and there is not a transition for name.
bool allow_map_transition =
!old_descriptors->Contains(name) &&
- (Top::context()->global_context()->object_function()->map() != map());
+ (isolate->context()->global_context()->object_function()->
+ map() != map());
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
String* name,
JSFunction* function,
PropertyAttributes attributes) {
- ASSERT(!Heap::InNewSpace(function));
+ Heap* heap = GetHeap();
+ ASSERT(!heap->InNewSpace(function));
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
// If the old map is the global object map (from new Object()),
// then transitions are not added to it, so we are done.
- if (old_map == Top::context()->global_context()->object_function()->map()) {
+ if (old_map == heap->isolate()->context()->global_context()->
+ object_function()->map()) {
return function;
}
Object* value,
PropertyAttributes attributes) {
ASSERT(!HasFastProperties());
+ Heap* heap = GetHeap();
StringDictionary* dict = property_dictionary();
Object* store_value = value;
if (IsGlobalObject()) {
return value;
}
{ MaybeObject* maybe_store_value =
- Heap::AllocateJSGlobalPropertyCell(value);
+ heap->AllocateJSGlobalPropertyCell(value);
if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
JSGlobalPropertyCell::cast(store_value)->set_value(value);
Object* value,
PropertyAttributes attributes) {
ASSERT(!IsJSGlobalProxy());
+ Heap* heap = GetHeap();
if (!map()->is_extensible()) {
Handle<Object> args[1] = {Handle<String>(name)};
- return Top::Throw(*Factory::NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ return heap->isolate()->Throw(
+ *FACTORY->NewTypeError("object_not_extensible", HandleVector(args, 1)));
}
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
+ if (value->IsJSFunction() && !heap->InNewSpace(value)) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
return result;
}
// Do not add transitions to the map of "new Object()".
- if (map() == Top::context()->global_context()->object_function()->map()) {
+ if (map() == GetHeap()->isolate()->context()->global_context()->
+ object_function()->map()) {
return result;
}
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
Handle<JSObject> this_handle(this);
Handle<String> name_handle(name);
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
Handle<Object> value_unhole(value->IsTheHole() ?
- Heap::undefined_value() :
- value);
+ isolate->heap()->undefined_value() :
+ value,
+ isolate);
result = setter(v8::Utils::ToLocal(name_handle),
v8::Utils::ToLocal(value_unhole),
info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
MaybeObject* raw_result =
*value_handle,
attributes,
strict_mode);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
String* name,
Object* value,
JSObject* holder) {
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually proxy
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
MaybeObject* obj = (callback->setter)(this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (obj->IsFailure()) return obj;
return *value_handle;
}
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
Handle<String> key(name);
- LOG(ApiNamedPropertyAccess("store", this, name));
- CustomArguments args(data->data(), this, JSObject::cast(holder));
+ LOG(isolate, ApiNamedPropertyAccess("store", this, name));
+ CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
call_fun(v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value_handle),
info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
} else {
Handle<String> key(name);
- Handle<Object> holder_handle(holder);
+ Handle<Object> holder_handle(holder, isolate);
Handle<Object> args[2] = { key, holder_handle };
- return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2)));
}
}
MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
Object* value) {
- Handle<Object> value_handle(value);
- Handle<JSFunction> fun(JSFunction::cast(setter));
- Handle<JSObject> self(this);
+ Isolate* isolate = GetIsolate();
+ Handle<Object> value_handle(value, isolate);
+ Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
+ Handle<JSObject> self(this, isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
- if (Debug::StepInActive()) {
- Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
}
#endif
bool has_pending_exception;
void JSObject::LookupCallbackSetterInPrototypes(String* name,
LookupResult* result) {
+ Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
- pt != Heap::null_value();
+ pt != heap->null_value();
pt = pt->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty()) {
MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
Object* value,
bool* found) {
+ Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
- pt != Heap::null_value();
+ pt != heap->null_value();
pt = pt->GetPrototype()) {
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
}
}
*found = false;
- return Heap::the_hole_value();
+ return heap->the_hole_value();
}
String* name,
LookupResult* result) {
DescriptorArray* descriptors = instance_descriptors();
- int number = DescriptorLookupCache::Lookup(descriptors, name);
+ DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+ int number = cache->Lookup(descriptors, name);
if (number == DescriptorLookupCache::kAbsent) {
number = descriptors->Search(name);
- DescriptorLookupCache::Update(descriptors, name, number);
+ cache->Update(descriptors, name, number);
}
if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(holder, descriptors->GetDetails(number), number);
void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
LookupResult* result) {
+ Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
- pt != Heap::null_value();
+ pt != heap->null_value();
pt = JSObject::cast(pt)->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
String* name,
Object* value,
bool check_prototype) {
+ Heap* heap = GetHeap();
if (check_prototype && !result->IsProperty()) {
LookupCallbackSetterInPrototypes(name, result);
}
HandleScope scope;
Handle<Object> value_handle(value);
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
}
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
+ Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
// reallocating them.
if (!name->IsSymbol() && name->length() <= 2) {
Object* symbol_version;
- { MaybeObject* maybe_symbol_version = Heap::LookupSymbol(name);
+ { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name);
if (maybe_symbol_version->ToObject(&symbol_version)) {
name = String::cast(symbol_version);
}
// Check access rights if needed.
if (IsAccessCheckNeeded()
- && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(result, name, value, true);
}
Handle<String> key(name);
Handle<Object> holder(this);
Handle<Object> args[2] = { key, holder };
- return Top::Throw(*Factory::NewTypeError("strict_read_only_property",
- HandleVector(args, 2)));
+ return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, 2)));
} else {
return value;
}
ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
JSFunction* function =
JSFunction::cast(target_descriptors->GetValue(number));
- ASSERT(!Heap::InNewSpace(function));
+ ASSERT(!HEAP->InNewSpace(function));
if (value == function) {
set_map(target_map);
return value;
String* name,
Object* value,
PropertyAttributes attributes) {
+ Heap* heap = GetHeap();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
LocalLookup(name, &result);
// Check access rights if needed.
if (IsAccessCheckNeeded()
- && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(&result, name, value, false);
}
if (continue_search) {
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
- if (pt != Heap::null_value()) {
+ if (!pt->IsNull()) {
return JSObject::cast(pt)->
GetPropertyAttributeWithReceiver(receiver, name);
}
JSObject* receiver,
String* name,
bool continue_search) {
+ Isolate* isolate = GetIsolate();
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- CustomArguments args(interceptor->data(), receiver, this);
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
- LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = query(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) {
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) return DONT_ENUM;
LookupResult* result,
String* name,
bool continue_search) {
+ Heap* heap = GetHeap();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+ !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
return GetPropertyAttributeWithFailedAccessCheck(receiver,
result,
name,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set(index, result);
- Counters::normalized_maps.Increment();
+ COUNTERS->normalized_maps()->Increment();
return result;
}
UNIQUE_NORMALIZED_MAP);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- Counters::normalized_maps.Increment();
+ COUNTERS->normalized_maps()->Increment();
set_map(Map::cast(obj));
}
// The global object is always normalized.
ASSERT(!IsGlobalObject());
-
// JSGlobalProxy must never be normalized
ASSERT(!IsJSGlobalProxy());
+ Heap* heap = GetHeap();
+
// Allocate new content.
int property_count = map()->NumberOfDescribedProperties();
if (expected_additional_properties > 0) {
int index = map()->instance_descriptors()->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index);
- { MaybeObject* maybe_obj = Top::context()->global_context()->
- normalized_map_cache()->Get(this, mode);
+ { MaybeObject* maybe_obj = heap->isolate()->context()->global_context()->
+ normalized_map_cache()->Get(this, mode);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
int new_instance_size = new_map->instance_size();
int instance_size_delta = map()->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- Heap::CreateFillerObjectAt(this->address() + new_instance_size,
+ heap->CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
set_map(new_map);
+ map()->set_instance_descriptors(heap->empty_descriptor_array());
set_properties(dictionary);
- Counters::props_to_dictionary.Increment();
+ heap->isolate()->counters()->props_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
set_map(new_map);
set_elements(dictionary);
- Counters::elements_to_dictionary.Increment();
+ new_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+ Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
DeleteMode mode) {
// Check local property, ignore interceptor.
+ Heap* heap = GetHeap();
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
- if (!result.IsProperty()) return Heap::true_value();
+ if (!result.IsProperty()) return heap->true_value();
// Normalize object if needed.
Object* obj;
MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<String> name_handle(name);
Handle<JSObject> this_handle(this);
if (!interceptor->deleter()->IsUndefined()) {
v8::NamedPropertyDeleter deleter =
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = deleter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
return *v8::Utils::OpenHandle(*result);
}
MaybeObject* raw_result =
this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
DeleteMode mode) {
+ Heap* heap = GetHeap();
ASSERT(!HasExternalArrayElements());
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
UNREACHABLE();
break;
}
- return Heap::true_value();
+ return heap->true_value();
}
MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return Heap::false_value();
+ if (interceptor->deleter()->IsUndefined()) return heap->false_value();
v8::IndexedPropertyDeleter deleter =
v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
Handle<JSObject> this_handle(this);
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = deleter(index, info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
return *v8::Utils::OpenHandle(*result);
}
MaybeObject* raw_result =
this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return Heap::false_value();
+ !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+ return isolate->heap()->false_value();
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return Heap::false_value();
+ if (proto->IsNull()) return isolate->heap()->false_value();
ASSERT(proto->IsJSGlobalObject());
return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
}
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
Object* result = dictionary->DeleteProperty(entry, mode);
- if (mode == STRICT_DELETION && result == Heap::false_value()) {
+ if (mode == STRICT_DELETION && result ==
+ isolate->heap()->false_value()) {
// In strict mode, deleting a non-configurable property throws
// exception. dictionary->DeleteProperty will return false_value()
// if a non-configurable property is being deleted.
HandleScope scope;
- Handle<Object> i = Factory::NewNumberFromUint(index);
+ Handle<Object> i = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { i, Handle<Object>(this) };
- return Top::Throw(*Factory::NewTypeError("strict_delete_property",
- HandleVector(args, 2)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, 2)));
}
}
break;
UNREACHABLE();
break;
}
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
ASSERT(name->IsString());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return Heap::false_value();
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+ return isolate->heap()->false_value();
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return Heap::false_value();
+ if (proto->IsNull()) return isolate->heap()->false_value();
ASSERT(proto->IsJSGlobalObject());
return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
}
} else {
LookupResult result;
LocalLookup(name, &result);
- if (!result.IsProperty()) return Heap::true_value();
+ if (!result.IsProperty()) return isolate->heap()->true_value();
// Ignore attributes if forcing a deletion.
if (result.IsDontDelete() && mode != FORCE_DELETION) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
- return Top::Throw(*Factory::NewTypeError("strict_delete_property",
- HandleVector(args, 2)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, 2)));
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
// Check for interceptor.
if (result.type() == INTERCEPTOR) {
// Check whether this object references another object.
bool JSObject::ReferencesObject(Object* obj) {
+ Heap* heap = GetHeap();
AssertNoAllocation no_alloc;
// Is the object the constructor for this object?
// Check if the object is among the named properties.
Object* key = SlowReverseLookup(obj);
- if (key != Heap::undefined_value()) {
+ if (!key->IsUndefined()) {
return true;
}
}
case DICTIONARY_ELEMENTS: {
key = element_dictionary()->SlowReverseLookup(obj);
- if (key != Heap::undefined_value()) {
+ if (!key->IsUndefined()) {
return true;
}
break;
if (IsJSFunction()) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
+ heap->isolate()->context()->global_context()->
+ arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
MaybeObject* JSObject::PreventExtensions() {
+ Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, Heap::undefined_value(), v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- return Heap::false_value();
+ !isolate->MayNamedAccess(this,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+ return isolate->heap()->false_value();
}
if (IsJSGlobalProxy()) {
// - This object has no elements.
// - No prototype has enumerable properties/elements.
bool JSObject::IsSimpleEnum() {
+ Heap* heap = GetHeap();
for (Object* o = this;
- o != Heap::null_value();
+ o != heap->null_value();
o = JSObject::cast(o)->GetPrototype()) {
JSObject* curr = JSObject::cast(o);
if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
void JSObject::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
+ Heap* heap = GetHeap();
+
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
}
// Check __proto__ before interceptor.
- if (name->Equals(Heap::Proto_symbol()) && !IsJSContextExtensionObject()) {
+ if (name->Equals(heap->Proto_symbol()) &&
+ !IsJSContextExtensionObject()) {
result->ConstantResult(this);
return;
}
// Check for lookup interceptor except when bootstrapping.
- if (HasNamedInterceptor() && !Bootstrapper::IsActive()) {
+ if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
result->InterceptorResult(this);
return;
}
void JSObject::Lookup(String* name, LookupResult* result) {
// Ecma-262 3rd 8.6.2.4
+ Heap* heap = GetHeap();
for (Object* current = this;
- current != Heap::null_value();
+ current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookup(name, result);
if (result->IsProperty()) return;
// Search object and it's prototype chain for callback properties.
void JSObject::LookupCallback(String* name, LookupResult* result) {
+ Heap* heap = GetHeap();
for (Object* current = this;
- current != Heap::null_value();
+ current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty() && result->type() == CALLBACKS) return;
MaybeObject* JSObject::DefineGetterSetter(String* name,
PropertyAttributes attributes) {
+ Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
name->TryFlatten();
if (!CanSetCallback(name)) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
uint32_t index = 0;
case EXTERNAL_FLOAT_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return Heap::undefined_value();
+ return heap->undefined_value();
case DICTIONARY_ELEMENTS: {
// Lookup the index.
NumberDictionary* dictionary = element_dictionary();
if (entry != NumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsReadOnly()) return Heap::undefined_value();
+ if (details.IsReadOnly()) return heap->undefined_value();
if (details.type() == CALLBACKS) {
if (result->IsFixedArray()) {
return result;
LookupResult result;
LocalLookup(name, &result);
if (result.IsProperty()) {
- if (result.IsReadOnly()) return Heap::undefined_value();
+ if (result.IsReadOnly()) return heap->undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
// Allocate the fixed array to hold getter and setter.
Object* structure;
- { MaybeObject* maybe_structure = Heap::AllocateFixedArray(2, TENURED);
+ { MaybeObject* maybe_structure = heap->AllocateFixedArray(2, TENURED);
if (!maybe_structure->ToObject(&structure)) return maybe_structure;
}
bool JSObject::CanSetCallback(String* name) {
ASSERT(!IsAccessCheckNeeded()
- || Top::MayNamedAccess(this, name, v8::ACCESS_SET));
+ || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
Object* fun,
PropertyAttributes attributes) {
ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+ Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return Heap::undefined_value();
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return isolate->heap()->undefined_value();
}
if (IsJSGlobalProxy()) {
MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
+ Isolate* isolate = GetIsolate();
String* name = String::cast(info->name());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return Heap::undefined_value();
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return isolate->heap()->undefined_value();
}
if (IsJSGlobalProxy()) {
name->TryFlatten();
if (!CanSetCallback(name)) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element) {
- if (IsJSArray()) return Heap::undefined_value();
+ if (IsJSArray()) return isolate->heap()->undefined_value();
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
case EXTERNAL_FLOAT_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
case DICTIONARY_ELEMENTS:
break;
default:
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
Object* ok;
{ MaybeObject* maybe_ok =
Object* JSObject::LookupAccessor(String* name, bool is_getter) {
+ Heap* heap = GetHeap();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return Heap::undefined_value();
+ !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return heap->undefined_value();
}
// Make the lookup and include prototypes.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
- obj != Heap::null_value();
+ obj != heap->null_value();
obj = JSObject::cast(obj)->GetPrototype()) {
JSObject* js_object = JSObject::cast(obj);
if (js_object->HasDictionaryElements()) {
}
} else {
for (Object* obj = this;
- obj != Heap::null_value();
+ obj != heap->null_value();
obj = JSObject::cast(obj)->GetPrototype()) {
LookupResult result;
JSObject::cast(obj)->LocalLookup(name, &result);
if (result.IsProperty()) {
- if (result.IsReadOnly()) return Heap::undefined_value();
+ if (result.IsReadOnly()) return heap->undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
if (obj->IsFixedArray()) {
}
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
Object* JSObject::SlowReverseLookup(Object* value) {
+ Heap* heap = GetHeap();
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
}
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
} else {
return property_dictionary()->SlowReverseLookup(value);
}
MaybeObject* Map::CopyDropDescriptors() {
+ Heap* heap = GetHeap();
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateMap(instance_type(), instance_size());
+ heap->AllocateMap(instance_type(), instance_size());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Map::cast(result)->set_prototype(prototype());
// pointing to the same transition which is bad because the garbage
// collector relies on being able to reverse pointers from transitions
// to maps. If properties need to be retained use CopyDropTransitions.
- Map::cast(result)->set_instance_descriptors(Heap::empty_descriptor_array());
+ Map::cast(result)->set_instance_descriptors(
+ heap->empty_descriptor_array());
// Please note instance_type and instance_size are set when allocated.
Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields());
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->set_is_shared(false);
- Map::cast(result)->ClearCodeCache();
+ Map::cast(result)->ClearCodeCache(heap);
return result;
}
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateMap(instance_type(), new_instance_size);
+ GetHeap()->AllocateMap(instance_type(), new_instance_size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateCodeCache();
+ { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set_code_cache(result);
if (!code_cache()->IsFixedArray()) {
return CodeCache::cast(code_cache())->Lookup(name, flags);
} else {
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
}
void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
Map* current = this;
- while (current != Heap::meta_map()) {
+ Map* meta_map = heap()->meta_map();
+ while (current != meta_map) {
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
*RawField(current, Map::kInstanceDescriptorsOffset));
- if (d == Heap::empty_descriptor_array()) {
+ if (d == heap()->empty_descriptor_array()) {
Map* prev = current->map();
- current->set_map(Heap::meta_map());
+ current->set_map(meta_map);
callback(current, data);
current = prev;
continue;
}
}
if (!map_done) continue;
- *map_or_index_field = Heap::fixed_array_map();
+ *map_or_index_field = heap()->fixed_array_map();
Map* prev = current->map();
- current->set_map(Heap::meta_map());
+ current->set_map(meta_map);
callback(current, data);
current = prev;
}
Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
+ Heap* heap = GetHeap();
FixedArray* cache = default_cache();
int length = cache->length();
for (int i = 0; i < length; i += kCodeCacheEntrySize) {
}
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
return cache->Lookup(name, flags);
} else {
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
}
MUST_USE_RESULT MaybeObject* AsObject() {
ASSERT(code_ != NULL);
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
CodeCacheHashTableKey key(name, flags);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
void CodeCacheHashTable::RemoveByIndex(int index) {
ASSERT(index >= 0);
- set(EntryToIndex(index), Heap::null_value());
- set(EntryToIndex(index) + 1, Heap::null_value());
+ Heap* heap = GetHeap();
+ set(EntryToIndex(index), heap->null_value());
+ set(EntryToIndex(index) + 1, heap->null_value());
ElementRemoved();
}
MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
+ Heap* heap = GetHeap();
ASSERT(!array->HasExternalArrayElements());
switch (array->GetElementsKind()) {
case JSObject::FAST_ELEMENTS:
// Allocate a temporary fixed array.
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateFixedArray(size);
+ { MaybeObject* maybe_object = heap->AllocateFixedArray(size);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* key_array = FixedArray::cast(object);
UNREACHABLE();
}
UNREACHABLE();
- return Heap::null_value(); // Failure case needs to "return" a value.
+ return heap->null_value(); // Failure case needs to "return" a value.
}
MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
+ Heap* heap = GetHeap();
int len0 = length();
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
// Allocate the result
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(len0 + extra);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(len0 + extra);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Fill in the content
MaybeObject* FixedArray::CopySize(int new_length) {
- if (new_length == 0) return Heap::empty_fixed_array();
+ Heap* heap = GetHeap();
+ if (new_length == 0) return heap->empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(new_length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* result = FixedArray::cast(obj);
MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
+ Heap* heap = Isolate::Current()->heap();
if (number_of_descriptors == 0) {
- return Heap::empty_descriptor_array();
+ return heap->empty_descriptor_array();
}
// Allocate the array of keys.
Object* array;
{ MaybeObject* maybe_array =
- Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+ heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
if (!maybe_array->ToObject(&array)) return maybe_array;
}
// Do not use DescriptorArray::cast on incomplete object.
// Allocate the content array and set it in the descriptor array.
{ MaybeObject* maybe_array =
- Heap::AllocateFixedArray(number_of_descriptors << 1);
+ heap->AllocateFixedArray(number_of_descriptors << 1);
if (!maybe_array->ToObject(&array)) return maybe_array;
}
result->set(kContentArrayIndex, array);
MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- return Heap::AllocateFixedArray(LengthFor(deopt_entry_count),
+ return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
pretenure);
}
MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return Heap::empty_fixed_array();
- return Heap::AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
+ if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
+ return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
pretenure);
}
#endif
-static StaticResource<StringInputBuffer> string_input_buffer;
-
-
bool String::LooksValid() {
- if (!Heap::Contains(this)) return false;
+ if (!Isolate::Current()->heap()->Contains(this)) return false;
return true;
}
// doesn't make Utf8Length faster, but it is very likely that
// the string will be accessed later (for example by WriteUtf8)
// so it's still a good idea.
+ Heap* heap = GetHeap();
TryFlatten();
- Access<StringInputBuffer> buffer(&string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ heap->isolate()->objects_string_input_buffer());
buffer->Reset(0, this);
int result = 0;
while (buffer->has_more())
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
return SmartPointer<char>(NULL);
}
+ Heap* heap = GetHeap();
// Negative length means the to the end of the string.
if (length < 0) length = kMaxInt - offset;
// Compute the size of the UTF-8 string. Start at the specified offset.
- Access<StringInputBuffer> buffer(&string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ heap->isolate()->objects_string_input_buffer());
buffer->Reset(offset, this);
int character_position = offset;
int utf8_bytes = 0;
SmartPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
ASSERT(NativeAllocationChecker::allocation_allowed());
-
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
return SmartPointer<uc16>();
}
+ Heap* heap = GetHeap();
- Access<StringInputBuffer> buffer(&string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ heap->isolate()->objects_string_input_buffer());
buffer->Reset(this);
uc16* result = NewArray<uc16>(length() + 1);
}
-Relocatable* Relocatable::top_ = NULL;
-
-
void Relocatable::PostGarbageCollectionProcessing() {
- Relocatable* current = top_;
+ Isolate* isolate = Isolate::Current();
+ Relocatable* current = isolate->relocatable_top();
while (current != NULL) {
current->PostGarbageCollection();
current = current->prev_;
// Reserve space for statics needing saving and restoring.
int Relocatable::ArchiveSpacePerThread() {
- return sizeof(top_);
+ return sizeof(Isolate::Current()->relocatable_top());
}
// Archive statics that are thread local.
char* Relocatable::ArchiveState(char* to) {
- *reinterpret_cast<Relocatable**>(to) = top_;
- top_ = NULL;
+ Isolate* isolate = Isolate::Current();
+ *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
+ isolate->set_relocatable_top(NULL);
return to + ArchiveSpacePerThread();
}
// Restore statics that are thread local.
char* Relocatable::RestoreState(char* from) {
- top_ = *reinterpret_cast<Relocatable**>(from);
+ Isolate* isolate = Isolate::Current();
+ isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
return from + ArchiveSpacePerThread();
}
void Relocatable::Iterate(ObjectVisitor* v) {
- Iterate(v, top_);
+ Isolate* isolate = Isolate::Current();
+ Iterate(v, isolate->relocatable_top());
}
}
-FlatStringReader::FlatStringReader(Handle<String> str)
- : str_(str.location()),
+FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
+ : Relocatable(isolate),
+ str_(str.location()),
length_(str->length()) {
PostGarbageCollection();
}
-FlatStringReader::FlatStringReader(Vector<const char> input)
- : str_(0),
+FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
+ : Relocatable(isolate),
+ str_(0),
is_ascii_(true),
length_(input.length()),
start_(input.start()) { }
}
-static StringInputBuffer string_compare_buffer_b;
-
-
template <typename IteratorA>
-static inline bool CompareStringContentsPartial(IteratorA* ia, String* b) {
+static inline bool CompareStringContentsPartial(Isolate* isolate,
+ IteratorA* ia,
+ String* b) {
if (b->IsFlat()) {
if (b->IsAsciiRepresentation()) {
VectorIterator<char> ib(b->ToAsciiVector());
return CompareStringContents(ia, &ib);
}
} else {
- string_compare_buffer_b.Reset(0, b);
- return CompareStringContents(ia, &string_compare_buffer_b);
+ isolate->objects_string_compare_buffer_b()->Reset(0, b);
+ return CompareStringContents(ia,
+ isolate->objects_string_compare_buffer_b());
}
}
-static StringInputBuffer string_compare_buffer_a;
-
-
bool String::SlowEquals(String* other) {
+ Heap* heap = GetHeap();
+
// Fast check: negative check with lengths.
int len = length();
if (len != other->length()) return false;
Vector<const char>(str2, len));
}
+ Isolate* isolate = heap->isolate();
if (lhs->IsFlat()) {
if (lhs->IsAsciiRepresentation()) {
Vector<const char> vec1 = lhs->ToAsciiVector();
}
} else {
VectorIterator<char> buf1(vec1);
- string_compare_buffer_b.Reset(0, rhs);
- return CompareStringContents(&buf1, &string_compare_buffer_b);
+ isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
+ return CompareStringContents(&buf1,
+ isolate->objects_string_compare_buffer_b());
}
} else {
Vector<const uc16> vec1 = lhs->ToUC16Vector();
}
} else {
VectorIterator<uc16> buf1(vec1);
- string_compare_buffer_b.Reset(0, rhs);
- return CompareStringContents(&buf1, &string_compare_buffer_b);
+ isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
+ return CompareStringContents(&buf1,
+ isolate->objects_string_compare_buffer_b());
}
}
} else {
- string_compare_buffer_a.Reset(0, lhs);
- return CompareStringContentsPartial(&string_compare_buffer_a, rhs);
+ isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
+ return CompareStringContentsPartial(isolate,
+ isolate->objects_string_compare_buffer_a(), rhs);
}
}
if (StringShape(this).IsSymbol()) return false;
Map* map = this->map();
- if (map == Heap::string_map()) {
- this->set_map(Heap::undetectable_string_map());
+ Heap* heap = map->GetHeap();
+ if (map == heap->string_map()) {
+ this->set_map(heap->undetectable_string_map());
return true;
- } else if (map == Heap::ascii_string_map()) {
- this->set_map(Heap::undetectable_ascii_string_map());
+ } else if (map == heap->ascii_string_map()) {
+ this->set_map(heap->undetectable_ascii_string_map());
return true;
}
// Rest cannot be marked as undetectable
bool String::IsEqualTo(Vector<const char> str) {
+ Isolate* isolate = GetIsolate();
int slen = length();
Access<ScannerConstants::Utf8Decoder>
- decoder(ScannerConstants::utf8_decoder());
+ decoder(isolate->scanner_constants()->utf8_decoder());
decoder->Reset(str.start(), str.length());
int i;
for (i = 0; i < slen && decoder->has_more(); i++) {
MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
+ Heap* heap = GetHeap();
if (start == 0 && end == length()) return this;
- MaybeObject* result = Heap::AllocateSubString(this, start, end, pretenure);
+ MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
return result;
}
}
-void Map::ClearNonLiveTransitions(Object* real_prototype) {
+void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
// Live DescriptorArray objects will be marked, so we must use
// low-level accessors to get and modify their data.
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
*RawField(this, Map::kInstanceDescriptorsOffset));
- if (d == Heap::raw_unchecked_empty_descriptor_array()) return;
+ if (d == heap->raw_unchecked_empty_descriptor_array()) return;
Smi* NullDescriptorDetails =
PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
FixedArray* contents = reinterpret_cast<FixedArray*>(
if (!target->IsMarked()) {
ASSERT(target->IsMap());
contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(i);
+ contents->set_null_unchecked(heap, i);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
ASSERT(is_compiled() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
- ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
+ Builtins* builtins = GetIsolate()->builtins();
+ ReplaceCode(builtins->builtin(Builtins::LazyRecompile));
}
Object* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSObject());
-
+ Heap* heap = GetHeap();
if (has_initial_map()) {
initial_map()->set_prototype(value);
} else {
// prototype is put into the initial map where it belongs.
set_prototype_or_initial_map(value);
}
- Heap::ClearInstanceofCache();
+ heap->ClearInstanceofCache();
return value;
}
// used for constructing objects to the original object prototype.
// See ECMA-262 13.2.2.
if (!value->IsJSObject()) {
+ Heap* heap = GetHeap();
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
map()->set_constructor(value);
map()->set_non_instance_prototype(true);
construct_prototype =
- Top::context()->global_context()->initial_object_prototype();
+ heap->isolate()->context()->global_context()->
+ initial_object_prototype();
} else {
map()->set_non_instance_prototype(false);
}
ASSERT(shared()->strict_mode() || map() == global_context->function_map());
set_map(no_prototype_map);
- set_prototype_or_initial_map(Heap::the_hole_value());
+ set_prototype_or_initial_map(GetHeap()->the_hole_value());
return this;
}
}
-MaybeObject* Oddball::Initialize(const char* to_string, Object* to_number) {
+MaybeObject* Oddball::Initialize(const char* to_string,
+ Object* to_number,
+ byte kind) {
Object* symbol;
- { MaybeObject* maybe_symbol = Heap::LookupAsciiSymbol(to_string);
+ { MaybeObject* maybe_symbol =
+ Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
}
set_to_string(String::cast(symbol));
set_to_number(to_number);
+ set_kind(kind);
return this;
}
Object* SharedFunctionInfo::GetSourceCode() {
- if (!HasSourceCode()) return Heap::undefined_value();
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ if (!HasSourceCode()) return isolate->heap()->undefined_value();
+ HandleScope scope(isolate);
Object* source = Script::cast(script())->source();
- return *SubString(Handle<String>(String::cast(source)),
+ return *SubString(Handle<String>(String::cast(source), isolate),
start_position(), end_position());
}
bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
+ Heap* heap = GetHeap();
+
// Check the basic conditions for generating inline constructor code.
if (!FLAG_inline_new
|| !has_only_simple_this_property_assignments()
// Traverse the proposed prototype chain looking for setters for properties of
// the same names as are set by the inline constructor.
for (Object* obj = prototype;
- obj != Heap::null_value();
+ obj != heap->null_value();
obj = obj->GetPrototype()) {
JSObject* js_object = JSObject::cast(obj);
for (int i = 0; i < this_property_assignments_count(); i++) {
void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
+ Heap* heap = GetHeap();
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
- set_this_property_assignments(Heap::undefined_value());
+ set_this_property_assignments(heap->undefined_value());
set_this_property_assignments_count(0);
}
set_construction_count(kGenerousAllocationCount);
}
set_initial_map(map);
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ Builtins* builtins = map->heap()->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::JSConstructStubGeneric),
construct_stub());
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+ set_construct_stub(builtins->builtin(Builtins::JSConstructStubCountdown));
}
// then StartInobjectTracking will be called again the next time the
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
- set_initial_map(Heap::raw_unchecked_undefined_value());
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ set_initial_map(map->heap()->raw_unchecked_undefined_value());
+ Builtins* builtins = map->heap()->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::JSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ set_construct_stub(builtins->builtin(Builtins::JSConstructStubGeneric));
// It is safe to clear the flag: it will be set again if the map is live.
set_live_objects_may_exist(false);
}
// Resume inobject slack tracking.
set_initial_map(map);
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ Builtins* builtins = map->heap()->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::JSConstructStubGeneric),
*RawField(this, kConstructStubOffset));
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+ set_construct_stub(builtins->builtin(Builtins::JSConstructStubCountdown));
// The map survived the gc, so there may be objects referencing it.
set_live_objects_may_exist(true);
}
ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
Map* map = Map::cast(initial_map());
- set_initial_map(Heap::undefined_value());
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ Heap* heap = map->heap();
+ set_initial_map(heap->undefined_value());
+ Builtins* builtins = heap->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::JSConstructStubCountdown),
construct_stub());
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ set_construct_stub(builtins->builtin(Builtins::JSConstructStubGeneric));
int slack = map->unused_property_fields();
map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
void Code::InvalidateRelocation() {
- HandleScope scope;
- set_relocation_info(Heap::empty_byte_array());
+ set_relocation_info(GetHeap()->empty_byte_array());
}
MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
int length) {
+ Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* elems = FixedArray::cast(obj);
MaybeObject* JSArray::Initialize(int capacity) {
+ Heap* heap = GetHeap();
ASSERT(capacity >= 0);
set_length(Smi::FromInt(0));
FixedArray* new_elements;
if (capacity == 0) {
- new_elements = Heap::empty_fixed_array();
+ new_elements = heap->empty_fixed_array();
} else {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
new_elements = FixedArray::cast(obj);
Handle<FixedArray> old_backing(FixedArray::cast(elements()));
int old_size = old_backing->length();
int new_size = required_size > old_size ? required_size : old_size;
- Handle<FixedArray> new_backing = Factory::NewFixedArray(new_size);
+ Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
self->SetContent(*new_backing);
}
-static Failure* ArrayLengthRangeError() {
+static Failure* ArrayLengthRangeError(Heap* heap) {
HandleScope scope;
- return Top::Throw(*Factory::NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ return heap->isolate()->Throw(
+ *FACTORY->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
}
MaybeObject* JSObject::SetElementsLength(Object* len) {
+ Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
Object* smi_length = Smi::FromInt(0);
if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
const int value = Smi::cast(smi_length)->value();
- if (value < 0) return ArrayLengthRangeError();
+ if (value < 0) return ArrayLengthRangeError(heap);
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
int old_capacity = FixedArray::cast(elements())->length();
if (len->ToArrayIndex(&length)) {
return SetSlowElements(len);
} else {
- return ArrayLengthRangeError();
+ return ArrayLengthRangeError(heap);
}
}
// len is not a number so make the array size one and
// set only element to len.
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(1);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(1);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray::cast(obj)->set(0, len);
MaybeObject* JSObject::SetPrototype(Object* value,
bool skip_hidden_prototypes) {
+ Heap* heap = GetHeap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
if (!value->IsJSObject() && !value->IsNull()) return value;
// prototype cycles are prevented.
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
- for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+ for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
if (JSObject::cast(pt) == this) {
// Cycle detected.
HandleScope scope;
- return Top::Throw(*Factory::NewError("cyclic_proto",
- HandleVector<Object>(NULL, 0)));
+ return heap->isolate()->Throw(
+ *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
}
}
Map::cast(new_map)->set_prototype(value);
real_receiver->set_map(Map::cast(new_map));
- Heap::ClearInstanceofCache();
+ heap->ClearInstanceofCache();
return value;
}
if (this->IsStringObjectWithCharacterAt(index)) return true;
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return false;
+ if (pt->IsNull()) return false;
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
+ Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
- CustomArguments args(interceptor->data(), receiver, this);
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = query(index, info);
}
if (!result.IsEmpty()) {
} else if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(index, info);
}
if (!result.IsEmpty()) return true;
JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
+ Heap* heap = GetHeap();
+
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return UNDEFINED_ELEMENT;
}
bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
+ Heap* heap = GetHeap();
+
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
if (this->IsStringObjectWithCharacterAt(index)) return true;
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return false;
+ if (pt->IsNull()) return false;
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
+ Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetter setter =
v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = setter(index, v8::Utils::ToLocal(value_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
MaybeObject* raw_result =
*value_handle,
strict_mode,
check_prototype);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
Object* structure,
uint32_t index,
Object* holder) {
+ Isolate* isolate = GetIsolate();
ASSERT(!structure->IsProxy());
// api style callbacks.
AccessorInfo* data = AccessorInfo::cast(structure);
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<Object> number = Factory::NewNumberFromUint(index);
- Handle<String> key(Factory::NumberToString(number));
- LOG(ApiNamedPropertyAccess("load", *self, *key));
- CustomArguments args(data->data(), *self, *holder_handle);
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<String> key(isolate->factory()->NumberToString(number));
+ LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
+ CustomArguments args(isolate, data->data(), *self, *holder_handle);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = call_fun(v8::Utils::ToLocal(key), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
- if (result.IsEmpty()) return Heap::undefined_value();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsEmpty()) return isolate->heap()->undefined_value();
return *v8::Utils::OpenHandle(*result);
}
JSFunction::cast(getter));
}
// Getter is not a function.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
UNREACHABLE();
uint32_t index,
Object* value,
JSObject* holder) {
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually proxy
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
- Handle<Object> number = Factory::NewNumberFromUint(index);
- Handle<String> key(Factory::NumberToString(number));
- LOG(ApiNamedPropertyAccess("store", this, *key));
- CustomArguments args(data->data(), this, JSObject::cast(holder));
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<String> key(isolate->factory()->NumberToString(number));
+ LOG(isolate, ApiNamedPropertyAccess("store", this, *key));
+ CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
call_fun(v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value_handle),
info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
if (setter->IsJSFunction()) {
return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
} else {
- Handle<Object> holder_handle(holder);
- Handle<Object> key(Factory::NewNumberFromUint(index));
+ Handle<Object> holder_handle(holder, isolate);
+ Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder_handle };
- return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2)));
}
}
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
+ Heap* heap = GetHeap();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
HandleScope scope;
Handle<Object> value_handle(value);
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
}
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
+ Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
case FAST_ELEMENTS:
// Fast case.
// If put fails instrict mode, throw exception.
if (!dictionary->ValueAtPut(entry, value) &&
strict_mode == kStrictMode) {
- Handle<Object> number(Factory::NewNumberFromUint(index));
+ Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
Handle<Object> holder(this);
Handle<Object> args[2] = { number, holder };
- return Top::Throw(
- *Factory::NewTypeError("strict_read_only_property",
- HandleVector(args, 2)));
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("strict_read_only_property",
+ HandleVector(args, 2)));
}
}
} else {
// When we set the is_extensible flag to false we always force
// the element into dictionary mode (and force them to stay there).
if (!map()->is_extensible()) {
- Handle<Object> number(Factory::NewNumberFromUint(index));
- Handle<String> index_string(Factory::NumberToString(number));
+ Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
+ Handle<String> index_string(
+ isolate->factory()->NumberToString(number));
Handle<Object> args[1] = { index_string };
- return Top::Throw(*Factory::NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
}
Object* result;
{ MaybeObject* maybe_result = dictionary->AtNumberPut(index, value);
// All possible cases have been handled above. Add a return to avoid the
// complaints from the compiler.
UNREACHABLE();
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
if (index >= old_len && index != 0xffffffff) {
Object* len;
{ MaybeObject* maybe_len =
- Heap::NumberFromDouble(static_cast<double>(index) + 1);
+ GetHeap()->NumberFromDouble(static_cast<double>(index) + 1);
if (!maybe_len->ToObject(&len)) return maybe_len;
}
set_length(len);
MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
uint32_t index) {
+ Heap* heap = GetHeap();
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
switch (GetElementsKind()) {
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return Heap::undefined_value();
+ if (pt->IsNull()) return heap->undefined_value();
return pt->GetElementWithReceiver(receiver, index);
}
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
+ Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<Object> this_handle(receiver);
Handle<JSObject> holder_handle(this);
if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- CustomArguments args(interceptor->data(), receiver, this);
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(index, info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
}
MaybeObject* raw_result =
holder_handle->GetElementPostInterceptor(*this_handle, index);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
uint32_t index) {
+ Heap* heap = GetHeap();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_GET)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return Heap::undefined_value();
+ !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ return heap->undefined_value();
}
if (HasIndexedInterceptor()) {
}
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return Heap::undefined_value();
+ if (pt == heap->null_value()) return heap->undefined_value();
return pt->GetElementWithReceiver(receiver, index);
}
ExternalIntArray* array = ExternalIntArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
int32_t value = array->get(index);
- return Heap::NumberFromInt32(value);
+ return GetHeap()->NumberFromInt32(value);
}
break;
}
ExternalUnsignedIntArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
uint32_t value = array->get(index);
- return Heap::NumberFromUint32(value);
+ return GetHeap()->NumberFromUint32(value);
}
break;
}
ExternalFloatArray* array = ExternalFloatArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
float value = array->get(index);
- return Heap::AllocateHeapNumber(value);
+ return GetHeap()->AllocateHeapNumber(value);
}
break;
}
UNREACHABLE();
break;
}
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
JSObject* receiver,
String* name,
PropertyAttributes* attributes) {
+ Heap* heap = GetHeap();
// Check local property in holder, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
*attributes = ABSENT;
- if (pt == Heap::null_value()) return Heap::undefined_value();
+ if (pt->IsNull()) return heap->undefined_value();
return pt->GetPropertyWithReceiver(receiver, name, attributes);
}
JSObject* receiver,
String* name,
PropertyAttributes* attributes) {
+ Heap* heap = GetHeap();
// Check local property in holder, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
if (result.IsProperty()) {
return GetProperty(receiver, &result, name, attributes);
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
JSObject* receiver,
String* name,
PropertyAttributes* attributes) {
+ Isolate* isolate = GetIsolate();
InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- CustomArguments args(interceptor->data(), receiver, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
*attributes = NONE;
return *v8::Utils::OpenHandle(*result);
*receiver_handle,
*name_handle,
attributes);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
bool JSObject::HasRealNamedProperty(String* key) {
+ Heap* heap = GetHeap();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ !heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
bool JSObject::HasRealElementProperty(uint32_t index) {
+ Heap* heap = GetHeap();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
}
// All possibilities have been handled above already.
UNREACHABLE();
- return Heap::null_value();
+ return heap->null_value();
}
bool JSObject::HasRealNamedCallbackProperty(String* key) {
+ Heap* heap = GetHeap();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ !heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
}
-bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
-}
-
-
-uint32_t NumberDictionaryShape::Hash(uint32_t key) {
- return ComputeIntegerHash(key);
-}
-
-
-uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
-}
-
-
-MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
- return Heap::NumberFromUint32(key);
-}
-
-
-bool StringDictionaryShape::IsMatch(String* key, Object* other) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (key->Hash() != String::cast(other)->Hash()) return false;
- return key->Equals(String::cast(other));
-}
-
-
-uint32_t StringDictionaryShape::Hash(String* key) {
- return key->Hash();
-}
-
-
-uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
- return String::cast(other)->Hash();
-}
-
-
-MaybeObject* StringDictionaryShape::AsObject(String* key) {
- return key;
-}
-
-
// StringKey simply carries a string object as key.
class StringKey : public HashTableKey {
public:
MUST_USE_RESULT MaybeObject* AsObject() {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(3);
+ { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Heap::AllocateSymbol(string_, chars_, hash_field_);
+ return Isolate::Current()->heap()->AllocateSymbol(
+ string_, chars_, hash_field_);
}
Vector<const char> string_;
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Heap::AllocateAsciiSymbol(string_, hash_field_);
+ return HEAP->AllocateAsciiSymbol(string_, hash_field_);
}
};
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Heap::AllocateTwoByteSymbol(string_, hash_field_);
+ return HEAP->AllocateTwoByteSymbol(string_, hash_field_);
}
};
// SymbolKey carries a string/symbol object as key.
class SymbolKey : public HashTableKey {
public:
- explicit SymbolKey(String* string) : string_(string) { }
+ explicit SymbolKey(String* string)
+ : string_(string) { }
bool IsMatch(Object* string) {
return String::cast(string)->Equals(string_);
// Attempt to flatten the string, so that symbols will most often
// be flat strings.
string_ = string_->TryFlattenGetString();
+ Heap* heap = string_->GetHeap();
// Transform string to symbol if possible.
- Map* map = Heap::SymbolMapForString(string_);
+ Map* map = heap->SymbolMapForString(string_);
if (map != NULL) {
string_->set_map(map);
ASSERT(string_->IsSymbol());
}
// Otherwise allocate a new symbol.
StringInputBuffer buffer(string_);
- return Heap::AllocateInternalSymbol(&buffer,
+ return heap->AllocateInternalSymbol(&buffer,
string_->length(),
string_->hash_field());
}
}
Object* obj;
- { MaybeObject* maybe_obj =
- Heap::AllocateHashTable(EntryToIndex(capacity), pretenure);
+ { MaybeObject* maybe_obj = Isolate::Current()->heap()->
+ AllocateHashTable(EntryToIndex(capacity), pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HashTable::cast(obj)->SetNumberOfElements(0);
}
-// Find entry for key otherwise return kNotFound.
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Key key) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- while (true) {
- Object* element = KeyAt(entry);
- if (element->IsUndefined()) break; // Empty entry.
- if (!element->IsNull() && Shape::IsMatch(key, element)) return entry;
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-
// Find entry for key otherwise return kNotFound.
int StringDictionary::FindEntry(String* key) {
if (!key->IsSymbol()) {
template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
+ Heap* heap = GetHeap();
int capacity = Capacity();
int nof = NumberOfElements() + n;
int nod = NumberOfDeletedElements();
const int kMinCapacityForPretenure = 256;
bool pretenure =
- (capacity > kMinCapacityForPretenure) && !Heap::InNewSpace(this);
+ (capacity > kMinCapacityForPretenure) && !heap->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
+ Heap* heap = GetHeap();
ASSERT(HasDictionaryElements());
// Must stay in dictionary mode, either because of requires_slow_elements,
// or because we are not going to sort (and therefore compact) all of the
if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
// Allocate space for result before we start mutating the object.
Object* new_double;
- { MaybeObject* maybe_new_double = Heap::AllocateHeapNumber(0.0);
+ { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
}
result_double = HeapNumber::cast(new_double);
// allocation. Bailout.
return Smi::FromInt(-1);
}
- new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details)->
+ new_dict->AddNumberEntry(pos, heap->undefined_value(), no_details)->
ToObjectUnchecked();
pos++;
undefs--;
// If the object is in dictionary mode, it is converted to fast elements
// mode.
MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
+ Heap* heap = GetHeap();
ASSERT(!HasExternalArrayElements());
if (HasDictionaryElements()) {
}
Map* new_map = Map::cast(obj);
- PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED;
+ PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
Object* new_array;
{ MaybeObject* maybe_new_array =
- Heap::AllocateFixedArray(dict->NumberOfElements(), tenure);
+ heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
}
FixedArray* fast_elements = FixedArray::cast(new_array);
// Pessimistically allocate space for return value before
// we start mutating the array.
Object* new_double;
- { MaybeObject* maybe_new_double = Heap::AllocateHeapNumber(0.0);
+ { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
}
result_double = HeapNumber::cast(new_double);
template<typename ExternalArrayClass, typename ValueType>
-static MaybeObject* ExternalArrayIntSetter(ExternalArrayClass* receiver,
+static MaybeObject* ExternalArrayIntSetter(Heap* heap,
+ ExternalArrayClass* receiver,
uint32_t index,
Object* value) {
ValueType cast_value = 0;
}
receiver->set(index, cast_value);
}
- return Heap::NumberFromInt32(cast_value);
+ return heap->NumberFromInt32(cast_value);
}
MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalByteArray, int8_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalShortArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalShortArray, int16_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalIntArray, int32_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
+ Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
}
set(index, cast_value);
}
- return Heap::NumberFromUint32(cast_value);
+ return heap->NumberFromUint32(cast_value);
}
MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
float cast_value = 0;
+ Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
}
set(index, cast_value);
}
- return Heap::AllocateHeapNumber(cast_value);
+ return heap->AllocateHeapNumber(cast_value);
}
MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
ASSERT(!HasFastProperties());
+ Heap* heap = GetHeap();
int entry = property_dictionary()->FindEntry(name);
if (entry == StringDictionary::kNotFound) {
Object* cell;
{ MaybeObject* maybe_cell =
- Heap::AllocateJSGlobalPropertyCell(Heap::the_hole_value());
+ heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
PropertyDetails details(NONE, NORMAL);
Object* CompilationCacheTable::Lookup(String* src) {
+ Heap* heap = GetHeap();
StringKey key(src);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return heap->undefined_value();
return get(EntryToIndex(entry) + 1);
}
StrictModeFlag strict_mode) {
StringSharedKey key(src, context->closure()->shared(), strict_mode);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
Object* CompilationCacheTable::LookupRegExp(String* src,
JSRegExp::Flags flags) {
+ Heap* heap = GetHeap();
RegExpKey key(src, flags);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return heap->undefined_value();
return get(EntryToIndex(entry) + 1);
}
void CompilationCacheTable::Remove(Object* value) {
+ Object* null_value = GetHeap()->null_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
if (get(value_index) == value) {
- fast_set(this, entry_index, Heap::null_value());
- fast_set(this, value_index, Heap::null_value());
+ fast_set(this, entry_index, null_value);
+ fast_set(this, value_index, null_value);
ElementRemoved();
}
}
Object* MapCache::Lookup(FixedArray* array) {
+ Heap* heap = GetHeap();
SymbolsKey key(array);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return heap->undefined_value();
return get(EntryToIndex(entry) + 1);
}
template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
+ Heap* heap = Dictionary<Shape, Key>::GetHeap();
int length = HashTable<Shape, Key>::NumberOfElements();
// Allocate and initialize iteration order array.
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* iteration_order = FixedArray::cast(obj);
}
// Allocate array with enumeration order.
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* enumeration_order = FixedArray::cast(obj);
// Do nothing if the interval [from, to) is empty.
if (from >= to) return;
+ Heap* heap = GetHeap();
int removed_entries = 0;
- Object* sentinel = Heap::null_value();
+ Object* sentinel = heap->null_value();
int capacity = Capacity();
for (int i = 0; i < capacity; i++) {
Object* key = KeyAt(i);
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
JSObject::DeleteMode mode) {
+ Heap* heap = Dictionary<Shape, Key>::GetHeap();
PropertyDetails details = DetailsAt(entry);
// Ignore attributes if forcing a deletion.
if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
- return Heap::false_value();
+ return heap->false_value();
}
- SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
+ SetEntry(entry, heap->null_value(), heap->null_value(), Smi::FromInt(0));
HashTable<Shape, Key>::ElementRemoved();
- return Heap::true_value();
+ return heap->true_value();
}
// Backwards lookup (slow).
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
+ Heap* heap = Dictionary<Shape, Key>::GetHeap();
int capacity = HashTable<Shape, Key>::Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
if (e == value) return k;
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
MaybeObject* StringDictionary::TransformPropertiesToFastFor(
JSObject* obj, int unused_property_fields) {
+ Heap* heap = GetHeap();
// Make sure we preserve dictionary representation if there are too many
// descriptors.
if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
ASSERT(type != FIELD);
instance_descriptor_length++;
if (type == NORMAL &&
- (!value->IsJSFunction() || Heap::InNewSpace(value))) {
+ (!value->IsJSFunction() || heap->InNewSpace(value))) {
number_of_fields += 1;
}
}
// Allocate the fixed array for the fields.
Object* fields;
{ MaybeObject* maybe_fields =
- Heap::AllocateFixedArray(number_of_allocated_fields);
+ heap->AllocateFixedArray(number_of_allocated_fields);
if (!maybe_fields->ToObject(&fields)) return maybe_fields;
}
Object* value = ValueAt(i);
// Ensure the key is a symbol before writing into the instance descriptor.
Object* key;
- { MaybeObject* maybe_key = Heap::LookupSymbol(String::cast(k));
+ { MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
if (!maybe_key->ToObject(&key)) return maybe_key;
}
PropertyDetails details = DetailsAt(i);
PropertyType type = details.type();
- if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
+ if (value->IsJSFunction() && !heap->InNewSpace(value)) {
ConstantFunctionDescriptor d(String::cast(key),
JSFunction::cast(value),
details.attributes(),
// Get the break point info object for this code position.
Object* DebugInfo::GetBreakPointInfo(int code_position) {
+ Heap* heap = GetHeap();
// Find the index of the break point info object for this code position.
int index = GetBreakPointInfoIndex(code_position);
// Return the break point info object if any.
- if (index == kNoBreakPointInfo) return Heap::undefined_value();
+ if (index == kNoBreakPointInfo) return heap->undefined_value();
return BreakPointInfo::cast(break_points()->get(index));
}
int source_position,
int statement_position,
Handle<Object> break_point_object) {
+ Isolate* isolate = Isolate::Current();
Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
if (!break_point_info->IsUndefined()) {
BreakPointInfo::SetBreakPoint(
Handle<FixedArray> old_break_points =
Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
Handle<FixedArray> new_break_points =
- Factory::NewFixedArray(old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction);
+ isolate->factory()->NewFixedArray(
+ old_break_points->length() +
+ Debug::kEstimatedNofBreakPointsInFunction);
debug_info->set_break_points(*new_break_points);
for (int i = 0; i < old_break_points->length(); i++) {
ASSERT(index != kNoBreakPointInfo);
// Allocate new BreakPointInfo object and set the break point.
- Handle<BreakPointInfo> new_break_point_info =
- Handle<BreakPointInfo>::cast(Factory::NewStruct(BREAK_POINT_INFO_TYPE));
+ Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
+ isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
new_break_point_info->set_code_position(Smi::FromInt(code_position));
new_break_point_info->set_source_position(Smi::FromInt(source_position));
new_break_point_info->
set_statement_position(Smi::FromInt(statement_position));
- new_break_point_info->set_break_point_objects(Heap::undefined_value());
+ new_break_point_info->set_break_point_objects(
+ isolate->heap()->undefined_value());
BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
debug_info->break_points()->set(index, *new_break_point_info);
}
// Get the break point objects for a code position.
Object* DebugInfo::GetBreakPointObjects(int code_position) {
+ Heap* heap = GetHeap();
Object* break_point_info = GetBreakPointInfo(code_position);
if (break_point_info->IsUndefined()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
return BreakPointInfo::cast(break_point_info)->break_point_objects();
}
Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
Handle<Object> break_point_object) {
- if (debug_info->break_points()->IsUndefined()) return Heap::undefined_value();
+ Heap* heap = Isolate::Current()->heap();
+ if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (!debug_info->break_points()->get(i)->IsUndefined()) {
Handle<BreakPointInfo> break_point_info =
}
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
+ Isolate* isolate = Isolate::Current();
// If there are no break points just ignore.
if (break_point_info->break_point_objects()->IsUndefined()) return;
// If there is a single break point clear it if it is the same.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
if (break_point_info->break_point_objects() == *break_point_object) {
- break_point_info->set_break_point_objects(Heap::undefined_value());
+ break_point_info->set_break_point_objects(
+ isolate->heap()->undefined_value());
}
return;
}
Handle<FixedArray>(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<FixedArray> new_array =
- Factory::NewFixedArray(old_array->length() - 1);
+ isolate->factory()->NewFixedArray(old_array->length() - 1);
int found_count = 0;
for (int i = 0; i < old_array->length(); i++) {
if (old_array->get(i) == *break_point_object) {
if (break_point_info->break_point_objects() == *break_point_object) return;
// If there was one break point object before replace with array.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = Factory::NewFixedArray(2);
+ Handle<FixedArray> array = FACTORY->NewFixedArray(2);
array->set(0, break_point_info->break_point_objects());
array->set(1, *break_point_object);
break_point_info->set_break_point_objects(*array);
Handle<FixedArray>(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<FixedArray> new_array =
- Factory::NewFixedArray(old_array->length() + 1);
+ FACTORY->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
// If the break point was there before just ignore.
if (old_array->get(i) == *break_point_object) return;
// Oddball testing.
INLINE(bool IsUndefined());
INLINE(bool IsNull());
+ INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
enum Type {
RETRY_AFTER_GC = 0,
EXCEPTION = 1, // Returning this marker tells the real exception
- // is in Top::pending_exception.
+ // is in Isolate::pending_exception.
INTERNAL_ERROR = 2,
OUT_OF_MEMORY_EXCEPTION = 3
};
inline MapWord map_word();
inline void set_map_word(MapWord map_word);
+ // The Heap the object was allocated in. Used also to access Isolate.
+ // This method can not be used during GC, it ASSERTs this.
+ inline Heap* GetHeap();
+ // Convenience method to get current isolate. This method can be
+ // accessed only when its result is the same as
+ // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
+ inline Isolate* GetIsolate();
+
// Converts an address to a HeapObject pointer.
static inline HeapObject* FromAddress(Address address);
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
+ // TODO(isolates): duplicate.
+ inline void set_undefined(Heap* heap, int index);
inline void set_null(int index);
+ // TODO(isolates): duplicate.
+ inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
// Setters with less debug checks for the GC to use.
inline void set_unchecked(int index, Smi* value);
- inline void set_null_unchecked(int index);
- inline void set_unchecked(int index, Object* value, WriteBarrierMode mode);
+ inline void set_null_unchecked(Heap* heap, int index);
+ inline void set_unchecked(Heap* heap, int index, Object* value,
+ WriteBarrierMode mode);
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
// Returns the number of descriptors in the array.
int number_of_descriptors() {
- return IsEmpty() ? 0 : length() - kFirstIndex;
+ ASSERT(length() > kFirstIndex || IsEmpty());
+ int len = length();
+ return len <= kFirstIndex ? 0 : len - kFirstIndex;
}
int NextEnumerationIndex() {
(FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
// Find entry for key otherwise return kNotFound.
- int FindEntry(Key key);
+ inline int FindEntry(Key key);
+ int FindEntry(Isolate* isolate, Key key);
protected:
class SymbolTableShape {
public:
- static bool IsMatch(HashTableKey* key, Object* value) {
+ static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
}
- static uint32_t Hash(HashTableKey* key) {
+ static inline uint32_t Hash(HashTableKey* key) {
return key->Hash();
}
- static uint32_t HashForObject(HashTableKey* key, Object* object) {
+ static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
return key->AsObject();
}
class MapCacheShape {
public:
- static bool IsMatch(HashTableKey* key, Object* value) {
+ static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
}
- static uint32_t Hash(HashTableKey* key) {
+ static inline uint32_t Hash(HashTableKey* key) {
return key->Hash();
}
- static uint32_t HashForObject(HashTableKey* key, Object* object) {
+ static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
return key->AsObject();
}
inline void CodeIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
- inline void CodeIterateBody();
+ inline void CodeIterateBody(Heap* heap);
#ifdef OBJECT_PRINT
inline void CodePrint() {
CodePrint(stdout);
// Code cache operations.
// Clears the code cache.
- inline void ClearCodeCache();
+ inline void ClearCodeCache(Heap* heap);
// Update code cache.
MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
// Also, restore the original prototype on the targets of these
// transitions, so that we do not process this map again while
// following back pointers.
- void ClearNonLiveTransitions(Object* real_prototype);
+ void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline int visitor_id();
inline void set_visitor_id(int visitor_id);
+ // Returns the heap this map belongs to.
+ inline Heap* heap();
+
typedef void (*TraverseCallback)(Map* map, void* data);
void TraverseTransitionTree(TraverseCallback callback, void* data);
// iterating or updating after gc.
class Relocatable BASE_EMBEDDED {
public:
- inline Relocatable() : prev_(top_) { top_ = this; }
- virtual ~Relocatable() {
- ASSERT_EQ(top_, this);
- top_ = prev_;
- }
+ explicit inline Relocatable(Isolate* isolate);
+ inline virtual ~Relocatable();
virtual void IterateInstance(ObjectVisitor* v) { }
virtual void PostGarbageCollection() { }
static void Iterate(ObjectVisitor* v, Relocatable* top);
static char* Iterate(ObjectVisitor* v, char* t);
private:
- static Relocatable* top_;
+ Isolate* isolate_;
Relocatable* prev_;
};
// must be valid as long as the reader is being used.
class FlatStringReader : public Relocatable {
public:
- explicit FlatStringReader(Handle<String> str);
- explicit FlatStringReader(Vector<const char> input);
+ FlatStringReader(Isolate* isolate, Handle<String> str);
+ FlatStringReader(Isolate* isolate, Vector<const char> input);
void PostGarbageCollection();
inline uc32 Get(int index);
int length() { return length_; }
// [to_number]: Cached to_number computed at startup.
DECL_ACCESSORS(to_number, Object)
+ inline byte kind();
+ inline void set_kind(byte kind);
+
// Casting.
static inline Oddball* cast(Object* obj);
// Initialize the fields.
MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
- Object* to_number);
+ Object* to_number,
+ byte kind);
// Layout description.
static const int kToStringOffset = HeapObject::kHeaderSize;
static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kSize = kToNumberOffset + kPointerSize;
+ static const int kKindOffset = kToNumberOffset + kPointerSize;
+ static const int kSize = kKindOffset + kPointerSize;
+
+ static const byte kFalse = 0;
+ static const byte kTrue = 1;
+ static const byte kNotBooleanMask = ~1;
+ static const byte kTheHole = 2;
+ static const byte kNull = 3;
+ static const byte kArgumentMarker = 4;
+ static const byte kUndefined = 5;
+ static const byte kOther = 6;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
: materialized_literal_count_(0),
expected_property_count_(0),
only_simple_this_property_assignments_(false),
- this_property_assignments_(Factory::empty_fixed_array()),
+ this_property_assignments_(
+ Isolate::Current()->factory()->empty_fixed_array()),
loop_count_(0),
variable_(variable),
parent_(*variable) {
if (static_cast<unsigned>(symbol_id)
>= static_cast<unsigned>(symbol_cache_.length())) {
if (scanner().is_literal_ascii()) {
- return Factory::LookupAsciiSymbol(scanner().literal_ascii_string());
+ return isolate()->factory()->LookupAsciiSymbol(
+ scanner().literal_ascii_string());
} else {
- return Factory::LookupTwoByteSymbol(scanner().literal_uc16_string());
+ return isolate()->factory()->LookupTwoByteSymbol(
+ scanner().literal_uc16_string());
}
}
return LookupCachedSymbol(symbol_id);
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
if (scanner().is_literal_ascii()) {
- result = Factory::LookupAsciiSymbol(scanner().literal_ascii_string());
+ result = isolate()->factory()->LookupAsciiSymbol(
+ scanner().literal_ascii_string());
} else {
- result = Factory::LookupTwoByteSymbol(scanner().literal_uc16_string());
+ result = isolate()->factory()->LookupTwoByteSymbol(
+ scanner().literal_uc16_string());
}
symbol_cache_.at(symbol_id) = result;
return result;
}
- Counters::total_preparse_symbols_skipped.Increment();
+ COUNTERS->total_preparse_symbols_skipped()->Increment();
return result;
}
bool allow_natives_syntax,
v8::Extension* extension,
ScriptDataImpl* pre_data)
- : symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
+ : isolate_(script->GetIsolate()),
+ symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
script_(script),
- scanner_(),
+ scanner_(isolate_),
top_scope_(NULL),
with_nesting_level_(0),
temp_scope_(NULL),
StrictModeFlag strict_mode) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(&Counters::parse);
- Counters::total_parse_size.Increment(source->length());
+ HistogramTimerScope timer(COUNTERS->parse());
+ COUNTERS->total_parse_size()->Increment(source->length());
fni_ = new FuncNameInferrer();
// Initialize parser state.
in_global_context
? Scope::GLOBAL_SCOPE
: Scope::EVAL_SCOPE;
- Handle<String> no_name = Factory::empty_symbol();
+ Handle<String> no_name = isolate()->factory()->empty_symbol();
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, type, inside_with());
false,
temp_scope.ContainsLoops());
} else if (stack_overflow_) {
- Top::StackOverflow();
+ isolate()->StackOverflow();
}
}
FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(&Counters::parse_lazy);
+ HistogramTimerScope timer(COUNTERS->parse_lazy());
Handle<String> source(String::cast(script_->source()));
- Counters::total_parse_size.Increment(source->length());
+ COUNTERS->total_parse_size()->Increment(source->length());
Handle<SharedFunctionInfo> shared_info = info->shared_info();
// Initialize parser state.
{
// Parse the function literal.
- Handle<String> no_name = Factory::empty_symbol();
+ Handle<String> no_name = isolate()->factory()->empty_symbol();
Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
if (!info->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info, scope);
// not safe to do before scope has been deleted.
if (result == NULL) {
zone_scope->DeleteOnExit();
- if (stack_overflow_) Top::StackOverflow();
+ if (stack_overflow_) isolate()->StackOverflow();
} else {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
MessageLocation location(script_,
source_location.beg_pos,
source_location.end_pos);
- Handle<FixedArray> elements = Factory::NewFixedArray(args.length());
+ Factory* factory = isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = Factory::NewStringFromUtf8(CStrVector(args[i]));
+ Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
elements->set(i, *arg_string);
}
- Handle<JSArray> array = Factory::NewJSArrayWithElements(elements);
- Handle<Object> result = Factory::NewSyntaxError(type, array);
- Top::Throw(*result, &location);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(type, array);
+ isolate()->Throw(*result, &location);
}
MessageLocation location(script_,
source_location.beg_pos,
source_location.end_pos);
- Handle<FixedArray> elements = Factory::NewFixedArray(args.length());
+ Factory* factory = isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
elements->set(i, *args[i]);
}
- Handle<JSArray> array = Factory::NewJSArrayWithElements(elements);
- Handle<Object> result = Factory::NewSyntaxError(type, array);
- Top::Throw(*result, &location);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(type, array);
+ isolate()->Throw(*result, &location);
}
// form this.x = y;
Handle<FixedArray> GetThisPropertyAssignments() {
if (names_ == NULL) {
- return Factory::empty_fixed_array();
+ return FACTORY->empty_fixed_array();
}
ASSERT(names_ != NULL);
ASSERT(assigned_arguments_ != NULL);
ASSERT_EQ(names_->length(), assigned_arguments_->length());
ASSERT_EQ(names_->length(), assigned_constants_->length());
Handle<FixedArray> assignments =
- Factory::NewFixedArray(names_->length() * 3);
+ FACTORY->NewFixedArray(names_->length() * 3);
for (int i = 0; i < names_->length(); i++) {
assignments->set(i * 3, *names_->at(i));
assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
uint32_t dummy;
if (literal != NULL &&
literal->handle()->IsString() &&
- !String::cast(*(literal->handle()))->Equals(Heap::Proto_symbol()) &&
+ !String::cast(*(literal->handle()))->Equals(HEAP->Proto_symbol()) &&
!String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
Handle<String> key = Handle<String>::cast(literal->handle());
EnsureAllocation();
names_->Add(name);
assigned_arguments_->Add(index);
- assigned_constants_->Add(Factory::undefined_value());
+ assigned_constants_->Add(FACTORY->undefined_value());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
// Check "use strict" directive (ES5 14.1).
if (!top_scope_->is_strict_mode() &&
- directive->Equals(Heap::use_strict()) &&
+ directive->Equals(isolate()->heap()->use_strict()) &&
token_loc.end_pos - token_loc.beg_pos ==
- Heap::use_strict()->length() + 2) {
+ isolate()->heap()->use_strict()->length() + 2) {
top_scope_->EnableStrictMode();
// "use strict" is the only directive for now.
directive_prologue = false;
var->mode() == Variable::CONST);
const char* type = (var->mode() == Variable::VAR) ? "var" : "const";
Handle<String> type_string =
- Factory::NewStringFromUtf8(CStrVector(type), TENURED);
+ isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
Expression* expression =
- NewThrowTypeError(Factory::redeclaration_symbol(),
+ NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
type_string, name);
top_scope_->SetIllegalRedeclaration(expression);
}
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared =
- Factory::NewSharedFunctionInfo(name, literals, code,
+ isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
shared->set_construct_stub(*construct_stub);
}
static bool IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(Factory::eval_symbol()) ||
- string.is_identical_to(Factory::arguments_symbol());
+ return string.is_identical_to(FACTORY->eval_symbol()) ||
+ string.is_identical_to(FACTORY->arguments_symbol());
}
// If the variable declaration declares exactly one non-const
// the number of arguments (1 or 2).
initialize =
new CallRuntime(
- Factory::InitializeConstGlobal_symbol(),
+ isolate()->factory()->InitializeConstGlobal_symbol(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
arguments);
} else {
// the number of arguments (2 or 3).
initialize =
new CallRuntime(
- Factory::InitializeVarGlobal_symbol(),
+ isolate()->factory()->InitializeVarGlobal_symbol(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
arguments);
}
//
// To be consistent with KJS we report the syntax error at runtime.
if (!top_scope_->is_function_scope()) {
- Handle<String> type = Factory::illegal_return_symbol();
+ Handle<String> type = isolate()->factory()->illegal_return_symbol();
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return new ExpressionStatement(throw_error);
}
if (peek() == Token::LBRACE) {
// Allocate a temporary for holding the finally state while
// executing the finally block.
- catch_var = top_scope_->NewTemporary(Factory::catch_var_symbol());
+ catch_var =
+ top_scope_->NewTemporary(isolate()->factory()->catch_var_symbol());
Literal* name_literal = new Literal(name);
VariableProxy* catch_var_use = new VariableProxy(catch_var);
Expression* obj = new CatchExtensionObject(name_literal, catch_var_use);
// error here but for compatibility with JSC we choose to report
// the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_for_in_symbol();
expression = NewThrowReferenceError(type);
}
ForInStatement* loop = new ForInStatement(labels);
// for compatibility with JSC we choose to report the error at
// runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_assignment_symbol();
expression = NewThrowReferenceError(type);
}
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type);
}
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_postfix_op_symbol();
expression = NewThrowReferenceError(type);
}
// is called without a receiver and it refers to the original eval
// function.
VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
+ if (callee != NULL &&
+ callee->IsVariable(isolate()->factory()->eval_symbol())) {
Handle<String> name = callee->name();
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
case Token::NULL_LITERAL:
Consume(Token::NULL_LITERAL);
- result = new Literal(Factory::null_value());
+ result = new Literal(isolate()->factory()->null_value());
break;
case Token::TRUE_LITERAL:
Consume(Token::TRUE_LITERAL);
- result = new Literal(Factory::true_value());
+ result = new Literal(isolate()->factory()->true_value());
break;
case Token::FALSE_LITERAL:
Consume(Token::FALSE_LITERAL);
- result = new Literal(Factory::false_value());
+ result = new Literal(isolate()->factory()->false_value());
break;
case Token::IDENTIFIER:
// Allocate a fixed array with all the literals.
Handle<FixedArray> literals =
- Factory::NewFixedArray(values->length(), TENURED);
+ isolate()->factory()->NewFixedArray(values->length(), TENURED);
// Fill in the literals.
bool is_simple = true;
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0) {
- literals->set_map(Heap::fixed_cow_array_map());
+ literals->set_map(isolate()->heap()->fixed_cow_array_map());
}
return new ArrayLiteral(literals, values,
Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
ASSERT(IsCompileTimeValue(expression));
- Handle<FixedArray> result = Factory::NewFixedArray(2, TENURED);
+ Handle<FixedArray> result = FACTORY->NewFixedArray(2, TENURED);
ObjectLiteral* object_literal = expression->AsObjectLiteral();
if (object_literal != NULL) {
ASSERT(object_literal->is_simple());
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(expression);
}
- return Factory::undefined_value();
+ return isolate()->factory()->undefined_value();
}
// Defined in ast.cc
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
- Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
+ Handle<Object> key_handle = FACTORY->NewNumberFromUint(hash);
key = key_handle.location();
map = &elems;
} else {
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
- Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ Handle<String> name = FACTORY->NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
map = &props;
next == Token::STRING || is_keyword) {
Handle<String> name;
if (is_keyword) {
- name = Factory::LookupAsciiSymbol(Token::String(next));
+ name = isolate_->factory()->LookupAsciiSymbol(Token::String(next));
} else {
name = GetSymbol(CHECK_OK);
}
// Computation of literal_index must happen before pre parse bailout.
int literal_index = temp_scope_->NextMaterializedLiteralIndex();
- Handle<FixedArray> constant_properties =
- Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
+ Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
+ number_of_boilerplate_properties * 2, TENURED);
bool is_simple = true;
bool fast_elements = true;
// this is the actual function name, otherwise this is the name of the
// variable declared and initialized with the function (expression). In
// that case, we don't have a function name (it's empty).
- Handle<String> name = is_named ? var_name : Factory::empty_symbol();
+ Handle<String> name =
+ is_named ? var_name : isolate()->factory()->empty_symbol();
// The function name, if any.
- Handle<String> function_name = Factory::empty_symbol();
+ Handle<String> function_name = isolate()->factory()->empty_symbol();
if (is_named && (type == EXPRESSION || type == NESTED)) {
function_name = name;
}
// End position greater than end of stream is safe, and hard to check.
ReportInvalidPreparseData(name, CHECK_OK);
}
- Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
+ COUNTERS->total_preparse_skipped()->Increment(
+ end_pos - function_block_pos);
// Seek to position just before terminal '}'.
scanner().SeekForward(end_pos - 1);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
- this_property_assignments = Factory::empty_fixed_array();
+ this_property_assignments = isolate()->factory()->empty_fixed_array();
Expect(Token::RBRACE, CHECK_OK);
} else {
ParseSourceElements(body, Token::RBRACE, CHECK_OK);
top_scope_->ForceEagerCompilation();
}
- Runtime::Function* function = Runtime::FunctionForSymbol(name);
+ const Runtime::Function* function = Runtime::FunctionForSymbol(name);
// Check for built-in IS_VAR macro.
if (function != NULL &&
Literal* Parser::GetLiteralUndefined() {
- return new Literal(Factory::undefined_value());
+ return new Literal(isolate()->factory()->undefined_value());
}
Literal* Parser::GetLiteralTheHole() {
- return new Literal(Factory::the_hole_value());
+ return new Literal(isolate()->factory()->the_hole_value());
}
Literal* Parser::NewNumberLiteral(double number) {
- return new Literal(Factory::NewNumber(number, TENURED));
+ return new Literal(isolate()->factory()->NewNumber(number, TENURED));
}
Expression* Parser::NewThrowReferenceError(Handle<String> type) {
- return NewThrowError(Factory::MakeReferenceError_symbol(),
+ return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
type, HandleVector<Object>(NULL, 0));
}
Handle<Object> first) {
int argc = first.is_null() ? 0 : 1;
Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
- return NewThrowError(Factory::MakeSyntaxError_symbol(), type, arguments);
+ return NewThrowError(
+ isolate()->factory()->MakeSyntaxError_symbol(), type, arguments);
}
Handle<Object> elements[] = { first, second };
Vector< Handle<Object> > arguments =
HandleVector<Object>(elements, ARRAY_SIZE(elements));
- return NewThrowError(Factory::MakeTypeError_symbol(), type, arguments);
+ return NewThrowError(
+ isolate()->factory()->MakeTypeError_symbol(), type, arguments);
}
Handle<String> type,
Vector< Handle<Object> > arguments) {
int argc = arguments.length();
- Handle<FixedArray> elements = Factory::NewFixedArray(argc, TENURED);
+ Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
+ TENURED);
for (int i = 0; i < argc; i++) {
Handle<Object> element = arguments[i];
if (!element.is_null()) {
elements->set(i, *element);
}
}
- Handle<JSArray> array = Factory::NewJSArrayWithElements(elements, TENURED);
+ Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
+ TENURED);
ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
args->Add(new Literal(type));
if (result.is_null() || scanner_.Next() != Token::EOS) {
if (stack_overflow_) {
// Scanner failed.
- Top::StackOverflow();
+ isolate()->StackOverflow();
} else {
// Parse failed. Scanner's current token is the unexpected token.
Token::Value token = scanner_.current_token();
}
Scanner::Location source_location = scanner_.location();
- MessageLocation location(Factory::NewScript(script),
+ Factory* factory = isolate()->factory();
+ MessageLocation location(factory->NewScript(script),
source_location.beg_pos,
source_location.end_pos);
Handle<JSArray> array;
if (name_opt == NULL) {
- array = Factory::NewJSArray(0);
+ array = factory->NewJSArray(0);
} else {
- Handle<String> name = Factory::NewStringFromUtf8(CStrVector(name_opt));
- Handle<FixedArray> element = Factory::NewFixedArray(1);
+ Handle<String> name = factory->NewStringFromUtf8(CStrVector(name_opt));
+ Handle<FixedArray> element = factory->NewFixedArray(1);
element->set(0, *name);
- array = Factory::NewJSArrayWithElements(element);
+ array = factory->NewJSArrayWithElements(element);
}
- Handle<Object> result = Factory::NewSyntaxError(message, array);
- Top::Throw(*result, &location);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ isolate()->Throw(*result, &location);
return Handle<Object>::null();
}
}
Handle<String> JsonParser::GetString() {
int literal_length = scanner_.literal_length();
if (literal_length == 0) {
- return Factory::empty_string();
+ return isolate()->factory()->empty_string();
}
if (scanner_.is_literal_ascii()) {
- return Factory::NewStringFromAscii(scanner_.literal_ascii_string());
+ return isolate()->factory()->NewStringFromAscii(
+ scanner_.literal_ascii_string());
} else {
- return Factory::NewStringFromTwoByte(scanner_.literal_uc16_string());
+ return isolate()->factory()->NewStringFromTwoByte(
+ scanner_.literal_uc16_string());
}
}
case Token::STRING:
return GetString();
case Token::NUMBER:
- return Factory::NewNumber(scanner_.number());
+ return isolate()->factory()->NewNumber(scanner_.number());
case Token::FALSE_LITERAL:
- return Factory::false_value();
+ return isolate()->factory()->false_value();
case Token::TRUE_LITERAL:
- return Factory::true_value();
+ return isolate()->factory()->true_value();
case Token::NULL_LITERAL:
- return Factory::null_value();
+ return isolate()->factory()->null_value();
case Token::LBRACE:
return ParseJsonObject();
case Token::LBRACK:
// Parse a JSON object. Scanner must be right after '{' token.
Handle<Object> JsonParser::ParseJsonObject() {
Handle<JSFunction> object_constructor(
- Top::global_context()->object_function());
- Handle<JSObject> json_object = Factory::NewJSObject(object_constructor);
+ isolate()->global_context()->object_function());
+ Handle<JSObject> json_object =
+ isolate()->factory()->NewJSObject(object_constructor);
if (scanner_.peek() == Token::RBRACE) {
scanner_.Next();
} else {
- if (StackLimitCheck().HasOverflowed()) {
+ if (StackLimitCheck(isolate()).HasOverflowed()) {
stack_overflow_ = true;
return Handle<Object>::null();
}
uint32_t index;
if (key->AsArrayIndex(&index)) {
SetOwnElement(json_object, index, value, kNonStrictMode);
- } else if (key->Equals(Heap::Proto_symbol())) {
+ } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
// We can't remove the __proto__ accessor since it's hardcoded
// in several places. Instead go along and add the value as
// the prototype of the created object if possible.
if (token == Token::RBRACK) {
scanner_.Next();
} else {
- if (StackLimitCheck().HasOverflowed()) {
+ if (StackLimitCheck(isolate()).HasOverflowed()) {
stack_overflow_ = true;
return Handle<Object>::null();
}
// Allocate a fixed array with all the elements.
Handle<FixedArray> fast_elements =
- Factory::NewFixedArray(elements.length());
+ isolate()->factory()->NewFixedArray(elements.length());
for (int i = 0, n = elements.length(); i < n; i++) {
fast_elements->set(i, *elements[i]);
}
- return Factory::NewJSArrayWithElements(fast_elements);
+ return isolate()->factory()->NewJSArrayWithElements(fast_elements);
}
// ----------------------------------------------------------------------------
RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
bool multiline)
- : error_(error),
- captures_(NULL),
- in_(in),
- current_(kEndMarker),
- next_pos_(0),
- capture_count_(0),
- has_more_(true),
- multiline_(multiline),
- simple_(false),
- contains_anchor_(false),
- is_scanned_for_captures_(false),
- failed_(false) {
+ : isolate_(Isolate::Current()),
+ error_(error),
+ captures_(NULL),
+ in_(in),
+ current_(kEndMarker),
+ next_pos_(0),
+ capture_count_(0),
+ has_more_(true),
+ multiline_(multiline),
+ simple_(false),
+ contains_anchor_(false),
+ is_scanned_for_captures_(false),
+ failed_(false) {
Advance();
}
void RegExpParser::Advance() {
if (next_pos_ < in()->length()) {
- StackLimitCheck check;
+ StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
- ReportError(CStrVector(Top::kStackOverflowMessage));
- } else if (Zone::excess_allocation()) {
+ ReportError(CStrVector(Isolate::kStackOverflowMessage));
+ } else if (isolate()->zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
current_ = in()->Get(next_pos_);
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
- *error_ = Factory::NewStringFromAscii(message, NOT_TENURED);
+ *error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
// Zip to the end to make sure the no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
}
}
-class SourceCharacter {
- public:
- static bool Is(uc32 c) {
- switch (c) {
- // case ']': case '}':
- // In spidermonkey and jsc these are treated as source characters
- // so we do too.
- case '^': case '$': case '\\': case '.': case '*': case '+':
- case '?': case '(': case ')': case '[': case '{': case '|':
- case RegExpParser::kEndMarker:
- return false;
- default:
- return true;
- }
- }
-};
-
-
-static unibrow::Predicate<SourceCharacter> source_character;
-
-
-static inline bool IsSourceCharacter(uc32 c) {
- return source_character.get(c);
-}
#ifdef DEBUG
// Currently only used in an ASSERT.
static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
bool allow_lazy,
ParserRecorder* recorder) {
- V8JavaScriptScanner scanner;
+ Isolate* isolate = Isolate::Current();
+ V8JavaScriptScanner scanner(isolate);
scanner.Initialize(source);
- intptr_t stack_limit = StackGuard::real_climit();
+ intptr_t stack_limit = isolate->stack_guard()->real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
recorder,
allow_lazy,
stack_limit)) {
- Top::StackOverflow();
+ isolate->StackOverflow();
return NULL;
}
DeleteArray(args[i]);
}
DeleteArray(args.start());
- ASSERT(Top::has_pending_exception());
+ ASSERT(info->isolate()->has_pending_exception());
} else {
Handle<String> source = Handle<String>(String::cast(script->source()));
result = parser.ParseProgram(source,
int disjunction_capture_index_;
};
+ Isolate* isolate() { return isolate_; }
+
uc32 current() { return current_; }
bool has_more() { return has_more_; }
bool has_next() { return next_pos_ < in()->length(); }
FlatStringReader* in() { return in_; }
void ScanForCaptures();
+ Isolate* isolate_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
FlatStringReader* in_;
PARSE_EAGERLY
};
+ Isolate* isolate() { return isolate_; }
+
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context,
if (stack_overflow_) {
return Token::ILLEGAL;
}
- if (StackLimitCheck().HasOverflowed()) {
+ if (StackLimitCheck(isolate()).HasOverflowed()) {
// Any further calls to Next or peek will return the illegal token.
// The current call must return the next token, which might already
// have been peek'ed.
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
- return Factory::NewStringFromAscii(scanner().literal_ascii_string(),
- tenured);
+ return isolate_->factory()->NewStringFromAscii(
+ scanner().literal_ascii_string(), tenured);
} else {
- return Factory::NewStringFromTwoByte(scanner().literal_uc16_string(),
- tenured);
+ return isolate_->factory()->NewStringFromTwoByte(
+ scanner().literal_uc16_string(), tenured);
}
}
Handle<String> NextLiteralString(PretenureFlag tenured) {
if (scanner().is_next_literal_ascii()) {
- return Factory::NewStringFromAscii(scanner().next_literal_ascii_string(),
- tenured);
+ return isolate_->factory()->NewStringFromAscii(
+ scanner().next_literal_ascii_string(), tenured);
} else {
- return Factory::NewStringFromTwoByte(scanner().next_literal_uc16_string(),
- tenured);
+ return isolate_->factory()->NewStringFromTwoByte(
+ scanner().next_literal_uc16_string(), tenured);
}
}
Handle<String> type,
Vector< Handle<Object> > arguments);
+ Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
}
private:
- JsonParser() { }
+ JsonParser() : isolate_(Isolate::Current()), scanner_(isolate_) { }
~JsonParser() { }
+ Isolate* isolate() { return isolate_; }
+
// Parse a string containing a single JSON value.
Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
// Parse a single JSON value from input (grammar production JSONValue).
// Converts the currently parsed literal to a JavaScript String.
Handle<String> GetString();
+ Isolate* isolate_;
JsonScanner scanner_;
bool stack_overflow_;
};
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name("v8:<unknown>");
}
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name(name);
}
// one) so we initialize it here too.
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
}
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
#include "v8.h"
#include "platform.h"
-#include "top.h"
#include "v8threads.h"
#include "vm-state-inl.h"
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+ i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
- LOG(SharedLibraryEvent(lib_name, start, end));
+ LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to setup
// reading the next entry.
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name("v8:<unknown>");
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name(name);
}
0, 0, 0);
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
-static int vm_tid_ = 0;
-
-
#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
// Android runs a fairly new Linux kernel, so signal info is there,
// but the C library doesn't have the structs defined.
#ifndef V8_HOST_ARCH_MIPS
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
- if (vm_tid_ != GetThreadID()) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = Top::current_vm_state();
+ sample->state = isolate->current_vm_state();
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
// Implement this on MIPS.
UNIMPLEMENTED();
#endif
- active_sampler_->SampleStack(sample);
- active_sampler_->Tick(sample);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
#endif
}
class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ int vm_tid() const { return vm_tid_; }
+
+ private:
+ const int vm_tid_;
+};
+
+
+class SignalSender : public Thread {
public:
enum SleepInterval {
- FULL_INTERVAL,
- HALF_INTERVAL
+ HALF_INTERVAL,
+ FULL_INTERVAL
};
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- signal_handler_installed_(false),
- vm_tgid_(getpid()),
- signal_sender_launched_(false) {
+ explicit SignalSender(int interval)
+ : Thread(NULL), vm_tgid_(getpid()), interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Install a signal handler.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+ // Start a thread that sends SIGPROF signal to VM threads.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+
+ // Restore the old signal handler.
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
}
- void SignalSender() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
- SendProfilingSignal();
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state = SamplerRegistry::GetState();
+ while (state != SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
- RuntimeProfiler::NotifyTick();
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
} else {
- if (sampler_->IsProfiling()) SendProfilingSignal();
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
Sleep(FULL_INTERVAL);
}
}
}
- void SendProfilingSignal() {
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
- syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+ syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
- useconds_t interval = sampler_->interval_ * 1000 - 100;
+ useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
USE(result);
}
- Sampler* sampler_;
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- int vm_tgid_;
- bool signal_sender_launched_;
- pthread_t signal_sender_thread_;
+ const int vm_tgid_;
+ const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-static void* SenderEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->SignalSender();
- return 0;
-}
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
- ASSERT(!data_->signal_sender_launched_);
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
ASSERT(!IsActive());
- vm_tid_ = GetThreadID();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- data_->signal_handler_installed_ =
- sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
-
- // Start a thread that sends SIGPROF signal to VM thread.
- // Sending the signal ourselves instead of relying on itimer provides
- // much better accuracy.
SetActive(true);
- if (pthread_create(
- &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
- data_->signal_sender_launched_ = true;
- }
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for signal sender termination (it will exit after setting
- // active_ to false).
- if (data_->signal_sender_launched_) {
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->signal_sender_thread_, NULL);
- data_->signal_sender_launched_ = false;
- }
-
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
MAP_PRIVATE | MAP_ANON,
kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
if (code_ptr == NULL) continue;
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
+ LOG(Isolate::Current(),
+ SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name("v8:<unknown>");
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name(name);
}
thread->thread_handle_data()->thread_ = pthread_self();
SetThreadName(thread->name());
ASSERT(thread->IsValid());
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
class Sampler::PlatformData : public Malloced {
public:
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- task_self_(mach_task_self()),
- profiled_thread_(0),
- sampler_thread_(0) {
+ PlatformData() : profiled_thread_(mach_thread_self()) {}
+
+ ~PlatformData() {
+ // Deallocate Mach port for thread.
+ mach_port_deallocate(mach_task_self(), profiled_thread_);
}
- Sampler* sampler_;
+ thread_act_t profiled_thread() { return profiled_thread_; }
+
+ private:
// Note: for profiled_thread_ Mach primitives are used instead of PThread's
// because the latter doesn't provide thread manipulation primitives required.
// For details, consult "Mac OS X Internals" book, Section 7.3.
- mach_port_t task_self_;
thread_act_t profiled_thread_;
- pthread_t sampler_thread_;
- RuntimeProfilerRateLimiter rate_limiter_;
+};
- // Sampler thread handler.
- void Runner() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- Sample();
- OS::Sleep(sampler_->interval_);
+class SamplerThread : public Thread {
+ public:
+ explicit SamplerThread(int interval) : Thread(NULL), interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ instance_ = new SamplerThread(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state = SamplerRegistry::GetState();
+ while (state != SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
+ OS::Sleep(interval_);
+ state = SamplerRegistry::GetState();
}
}
- void Sample() {
- if (sampler_->IsProfiling()) {
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ if (!sampler->IsProfiling()) return;
+ SamplerThread* sampler_thread =
+ reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+ sampler_thread->SampleContext(sampler);
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SampleContext(Sampler* sampler) {
+ thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
- if (KERN_SUCCESS != thread_suspend(profiled_thread_)) return;
+ if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+ thread_state_flavor_t flavor = x86_THREAD_STATE64;
+ x86_thread_state64_t state;
+ mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
#if __DARWIN_UNIX03
#define REGISTER_FIELD(name) __r ## name
#else
#define REGISTER_FIELD(name) r ## name
#endif // __DARWIN_UNIX03
#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+ thread_state_flavor_t flavor = i386_THREAD_STATE;
+ i386_thread_state_t state;
+ mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
#if __DARWIN_UNIX03
#define REGISTER_FIELD(name) __e ## name
#else
#error Unsupported Mac OS X host architecture.
#endif // V8_HOST_ARCH
- if (thread_get_state(profiled_thread_,
- flavor,
- reinterpret_cast<natural_t*>(&state),
- &count) == KERN_SUCCESS) {
- sample->state = Top::current_vm_state();
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler_->SampleStack(sample);
- sampler_->Tick(sample);
- }
- thread_resume(profiled_thread_);
+ if (thread_get_state(profiled_thread,
+ flavor,
+ reinterpret_cast<natural_t*>(&state),
+ &count) == KERN_SUCCESS) {
+ sample->state = sampler->isolate()->current_vm_state();
+ sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+ sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+ sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ thread_resume(profiled_thread);
}
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
#undef REGISTER_FIELD
-// Entry point for sampler thread.
-static void* SamplerEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->Runner();
- return 0;
-}
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // Do not start multiple threads for the same sampler.
ASSERT(!IsActive());
- data_->profiled_thread_ = mach_thread_self();
-
- // Create sampler thread with high priority.
- // According to POSIX spec, when SCHED_FIFO policy is used, a thread
- // runs until it exits or blocks.
- pthread_attr_t sched_attr;
- sched_param fifo_param;
- pthread_attr_init(&sched_attr);
- pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED);
- pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO);
- fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
- pthread_attr_setschedparam(&sched_attr, &fifo_param);
-
SetActive(true);
- pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
+ SamplerThread::AddActiveSampler(this);
}
void Sampler::Stop() {
- // Seting active to false triggers termination of the sampler
- // thread.
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for sampler thread to terminate.
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->sampler_thread_, NULL);
-
- // Deallocate Mach port for thread.
- mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name("v8:<unknown>");
UNIMPLEMENTED();
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name(name);
UNIMPLEMENTED();
}
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name("v8:<unknown>");
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name(name);
}
// one) so we initialize it here too.
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
};
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
}
-const char* OS::LogFileOpenMode = "w";
+const char* const OS::LogFileOpenMode = "w";
void OS::Print(const char* format, ...) {
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name("v8:<unknown>");
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
set_name(name);
}
// one) so we initialize it here too.
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
}
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
return ceil(x);
}
+
+static Mutex* limit_mutex = NULL;
+
+
#ifdef _WIN64
typedef double (*ModuloFunction)(double, double);
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
// Open log file in binary mode to avoid /n -> /r/n conversion.
-const char* OS::LogFileOpenMode = "wb";
+const char* const OS::LogFileOpenMode = "wb";
// Print (debug) message to console.
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
// For exectutable pages try and randomize the allocation address
if (prot == PAGE_EXECUTE_READWRITE &&
msize >= static_cast<size_t>(Page::kPageSize)) {
- address = (V8::RandomPrivate() << kPageSizeBits)
+ address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits)
| kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
}
mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
if (mbase == NULL) {
- LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
if (err != ERROR_MOD_NOT_FOUND &&
err != ERROR_INVALID_HANDLE) return false;
}
- LOG(SharedLibraryEvent(
+ LOG(i::Isolate::Current(),
+ SharedLibraryEvent(
module_entry.szExePath,
reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
thread->thread_handle_data()->tid_ = GetCurrentThreadId();
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return 0;
}
// Initialize a Win32 thread object. The thread has an invalid thread
// handle until it is started.
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
data_ = new PlatformData(kNoThread);
set_name("v8:<unknown>");
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(Isolate* isolate, const char* name)
+ : ThreadHandle(ThreadHandle::INVALID),
+ isolate_(isolate) {
data_ = new PlatformData(kNoThread);
set_name(name);
}
// ----------------------------------------------------------------------------
// Win32 profiler support.
-//
-// On win32 we use a sampler thread with high priority to sample the program
-// counter for the profiled thread.
class Sampler::PlatformData : public Malloced {
public:
- explicit PlatformData(Sampler* sampler) {
- sampler_ = sampler;
- sampler_thread_ = INVALID_HANDLE_VALUE;
- profiled_thread_ = INVALID_HANDLE_VALUE;
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != NULL) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = NULL;
+ }
}
- Sampler* sampler_;
- HANDLE sampler_thread_;
+ HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
HANDLE profiled_thread_;
- RuntimeProfilerRateLimiter rate_limiter_;
+};
+
- // Sampler thread handler.
- void Runner() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- Sample();
- Sleep(sampler_->interval_);
+class SamplerThread : public Thread {
+ public:
+ explicit SamplerThread(int interval) : Thread(NULL), interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ instance_ = new SamplerThread(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
}
}
- void Sample() {
- if (sampler_->IsProfiling()) {
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+ }
+ }
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state = SamplerRegistry::GetState();
+ while (state != SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
+ OS::Sleep(interval_);
+ }
+ }
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
- sample->state = Top::current_vm_state();
+ static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ if (!sampler->IsProfiling()) return;
+ SamplerThread* sampler_thread =
+ reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+ sampler_thread->SampleContext(sampler);
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SampleContext(Sampler* sampler) {
+ HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread_, &context) != 0) {
+ static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+ sample->state = sampler->isolate()->current_vm_state();
+
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
+ sample->pc = reinterpret_cast<Address>(context.Rip);
+ sample->sp = reinterpret_cast<Address>(context.Rsp);
+ sample->fp = reinterpret_cast<Address>(context.Rbp);
#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
+ sample->pc = reinterpret_cast<Address>(context.Eip);
+ sample->sp = reinterpret_cast<Address>(context.Esp);
+ sample->fp = reinterpret_cast<Address>(context.Ebp);
#endif
- sampler_->SampleStack(sample);
- sampler_->Tick(sample);
- }
- ResumeThread(profiled_thread_);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ ResumeThread(profiled_thread);
}
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
-// Entry point for sampler thread.
-static unsigned int __stdcall SamplerEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->Runner();
- return 0;
-}
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
-// Initialize a profile sampler.
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
-// Start profiling.
void Sampler::Start() {
- // Do not start multiple threads for the same sampler.
ASSERT(!IsActive());
-
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId());
- BOOL ok = data_->profiled_thread_ != NULL;
- if (!ok) return;
-
- // Start sampler thread.
- unsigned int tid;
SetActive(true);
- data_->sampler_thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL, 0, SamplerEntry, data_, 0, &tid));
- // Set thread to high priority to increase sampling accuracy.
- SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+ SamplerThread::AddActiveSampler(this);
}
-// Stop profiling.
void Sampler::Stop() {
- // Seting active to false triggers termination of the sampler
- // thread.
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for sampler thread to terminate.
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- WaitForSingleObject(data_->sampler_thread_, INFINITE);
-
- // Release the thread handles
- CloseHandle(data_->sampler_thread_);
- CloseHandle(data_->profiled_thread_);
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
static bool Remove(const char* path);
// Log file open mode is platform-dependent due to line ends issues.
- static const char* LogFileOpenMode;
+ static const char* const LogFileOpenMode;
// Print output to console. This is mostly used for debugging output.
// On platforms that has standard terminal output, the output
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
- // Create new thread.
- Thread();
- explicit Thread(const char* name);
+ // Create new thread (with a value for storing in the TLS isolate field).
+ explicit Thread(Isolate* isolate);
+ Thread(Isolate* isolate, const char* name);
virtual ~Thread();
// Start new thread by calling the Run() method in the new thread.
// A hint to the scheduler to let another thread run.
static void YieldCPU();
+ Isolate* isolate() const { return isolate_; }
+
// The thread name length is limited to 16 based on Linux's implementation of
// prctl().
static const int kMaxThreadNameLength = 16;
class PlatformData;
PlatformData* data_;
-
+ Isolate* isolate_;
char name_[kMaxThreadNameLength];
DISALLOW_COPY_AND_ASSIGN(Thread);
// ----------------------------------------------------------------------------
-// ScopedLock
+// ScopedLock/ScopedUnlock
//
-// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
-// of a mutex.
+// Stack-allocated ScopedLocks/ScopedUnlocks provide block-scoped
+// locking and unlocking of a mutex.
class ScopedLock {
public:
explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
+ ASSERT(mutex_ != NULL);
mutex_->Lock();
}
~ScopedLock() {
class Sampler {
public:
// Initialize sampler.
- explicit Sampler(int interval);
+ Sampler(Isolate* isolate, int interval);
virtual ~Sampler();
+ int interval() const { return interval_; }
+
// Performs stack sampling.
void SampleStack(TickSample* sample) {
DoSampleStack(sample);
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
+ Isolate* isolate() { return isolate_; }
+
// Used in tests to make sure that stack sampling is performed.
int samples_taken() const { return samples_taken_; }
void ResetSamplesTaken() { samples_taken_ = 0; }
class PlatformData;
PlatformData* data() { return data_; }
+ PlatformData* platform_data() { return data_; }
+
protected:
virtual void DoSampleStack(TickSample* sample) = 0;
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
+ Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
+
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+/*
+TODO(isolates): I incldue v8.h instead of these because we need Isolate and
+some classes (NativeAllocationChecker) are moved into isolate.h
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
#include "allocation.h"
+#include "allocation-inl.h"
#include "utils.h"
#include "list-inl.h"
-#include "hashmap.h"
+#include "hashmap.h"
+*/
+
#include "preparse-data.h"
+
namespace v8 {
namespace internal {
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../include/v8-preparser.h"
+
+#include "v8.h"
+
#include "globals.h"
#include "checks.h"
#include "allocation.h"
class StandAloneJavaScriptScanner : public JavaScriptScanner {
public:
+ StandAloneJavaScriptScanner()
+ : JavaScriptScanner(Isolate::Current()) { }
+
void Initialize(UC16CharacterStream* source) {
source_ = source;
Init();
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+/*
+TODO(isolates): I incldue v8.h instead of these because we need Isolate and
+some classes (NativeAllocationChecker) are moved into isolate.h
#include "../include/v8stdint.h"
#include "unicode.h"
#include "globals.h"
#include "allocation.h"
#include "utils.h"
#include "list.h"
+*/
+
#include "scanner-base.h"
#include "preparse-data.h"
#include "preparser.h"
Print("%c", string->Get(i));
}
if (quote) Print("\"");
- } else if (object == Heap::null_value()) {
+ } else if (object->IsNull()) {
Print("null");
- } else if (object == Heap::true_value()) {
+ } else if (object->IsTrue()) {
Print("true");
- } else if (object == Heap::false_value()) {
+ } else if (object->IsFalse()) {
Print("false");
- } else if (object == Heap::undefined_value()) {
+ } else if (object->IsUndefined()) {
Print("undefined");
} else if (object->IsNumber()) {
Print("%g", object->Number());
class IndentedScope BASE_EMBEDDED {
public:
- IndentedScope() {
+ explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
ast_printer_->inc_indent();
}
ast_printer_->dec_indent();
}
- static void SetAstPrinter(AstPrinter* a) { ast_printer_ = a; }
-
private:
- static AstPrinter* ast_printer_;
+ AstPrinter* ast_printer_;
};
-AstPrinter* IndentedScope::ast_printer_ = NULL;
-
-
//-----------------------------------------------------------------------------
-int AstPrinter::indent_ = 0;
-
-AstPrinter::AstPrinter() {
- ASSERT(indent_ == 0);
- IndentedScope::SetAstPrinter(this);
+AstPrinter::AstPrinter() : indent_(0) {
}
AstPrinter::~AstPrinter() {
ASSERT(indent_ == 0);
- IndentedScope::SetAstPrinter(NULL);
}
node->type());
Variable* var = node->var();
if (var != NULL && var->rewrite() != NULL) {
- IndentedScope indent;
+ IndentedScope indent(this);
Visit(var->rewrite());
}
}
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent;
+ IndentedScope indent(this);
PrintArguments(node->arguments());
}
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
- static int indent_;
+ int indent_;
};
#include "global-handles.h"
#include "heap-profiler.h"
#include "scopeinfo.h"
-#include "top.h"
#include "unicode.h"
#include "zone-inl.h"
TokenEnumerator::~TokenEnumerator() {
+ Isolate* isolate = Isolate::Current();
for (int i = 0; i < token_locations_.length(); ++i) {
if (!token_removed_[i]) {
- GlobalHandles::ClearWeakness(token_locations_[i]);
- GlobalHandles::Destroy(token_locations_[i]);
+ isolate->global_handles()->ClearWeakness(token_locations_[i]);
+ isolate->global_handles()->Destroy(token_locations_[i]);
}
}
}
int TokenEnumerator::GetTokenId(Object* token) {
+ Isolate* isolate = Isolate::Current();
if (token == NULL) return TokenEnumerator::kNoSecurityToken;
for (int i = 0; i < token_locations_.length(); ++i) {
if (*token_locations_[i] == token && !token_removed_[i]) return i;
}
- Handle<Object> handle = GlobalHandles::Create(token);
+ Handle<Object> handle = isolate->global_handles()->Create(token);
// handle.location() points to a memory cell holding a pointer
// to a token object in the V8's heap.
- GlobalHandles::MakeWeak(handle.location(), this, TokenRemovedCallback);
+ isolate->global_handles()->MakeWeak(handle.location(), this,
+ TokenRemovedCallback);
token_locations_.Add(handle.location());
token_removed_.Add(false);
return token_locations_.length() - 1;
}
-const char* CodeEntry::kEmptyNamePrefix = "";
+const char* const CodeEntry::kEmptyNamePrefix = "";
void CodeEntry::CopyData(const CodeEntry& source) {
}
-const char* ProfileGenerator::kAnonymousFunctionName = "(anonymous function)";
-const char* ProfileGenerator::kProgramEntryName = "(program)";
-const char* ProfileGenerator::kGarbageCollectorEntryName =
- "(garbage collector)";
+const char* const ProfileGenerator::kAnonymousFunctionName =
+ "(anonymous function)";
+const char* const ProfileGenerator::kProgramEntryName =
+ "(program)";
+const char* const ProfileGenerator::kGarbageCollectorEntryName =
+ "(garbage collector)";
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
- SetPropertyReference(obj, entry,
- Heap::Proto_symbol(), js_obj->GetPrototype());
+ SetPropertyReference(
+ obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
SetInternalReference(
if (!proto_or_map->IsMap()) {
SetPropertyReference(
obj, entry,
- Heap::prototype_symbol(), proto_or_map,
+ HEAP->prototype_symbol(), proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
SetPropertyReference(
obj, entry,
- Heap::prototype_symbol(), js_fun->prototype());
+ HEAP->prototype_symbol(), js_fun->prototype());
}
}
}
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
- Heap::IterateRoots(&extractor, VISIT_ALL);
+ HEAP->IterateRoots(&extractor, VISIT_ALL);
filler_ = NULL;
return progress_->ProgressReport(false);
}
void NativeObjectsExplorer::FillRetainedObjects() {
if (embedder_queried_) return;
+ Isolate* isolate = Isolate::Current();
// Record objects that are joined into ObjectGroups.
- Heap::CallGlobalGCPrologueCallback();
- List<ObjectGroup*>* groups = GlobalHandles::ObjectGroups();
+ isolate->heap()->CallGlobalGCPrologueCallback();
+ List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
for (int i = 0; i < groups->length(); ++i) {
ObjectGroup* group = groups->at(i);
if (group->info_ == NULL) continue;
}
group->info_ = NULL; // Acquire info object ownership.
}
- GlobalHandles::RemoveObjectGroups();
- Heap::CallGlobalGCEpilogueCallback();
+ isolate->global_handles()->RemoveObjectGroups();
+ isolate->heap()->CallGlobalGCEpilogueCallback();
// Record objects that are not in ObjectGroups, but have class ID.
GlobalHandlesExtractor extractor(this);
- GlobalHandles::IterateAllRootsWithClassIds(&extractor);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
embedder_queried_ = true;
}
void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
if (in_groups_.Contains(*p)) return;
+ Isolate* isolate = Isolate::Current();
v8::RetainedObjectInfo* info =
- HeapProfiler::ExecuteWrapperClassCallback(class_id, p);
+ isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
if (info == NULL) return;
GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
}
String* GetConstructorNameForHeapProfile(JSObject* object) {
- if (object->IsJSFunction()) return Heap::closure_symbol();
+ if (object->IsJSFunction()) return HEAP->closure_symbol();
return object->constructor_name();
}
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
- static const char* kEmptyNamePrefix;
+ static const char* const kEmptyNamePrefix;
private:
Logger::LogEventsAndTags tag_;
return sample_rate_calc_.ticks_per_ms();
}
- static const char* kAnonymousFunctionName;
- static const char* kProgramEntryName;
- static const char* kGarbageCollectorEntryName;
+ static const char* const kAnonymousFunctionName;
+ static const char* const kProgramEntryName;
+ static const char* const kGarbageCollectorEntryName;
private:
INLINE(CodeEntry* EntryForVMState(StateTag tag));
MUST_USE_RESULT MaybeObject* KeyToSymbol() {
if (!StringShape(key_).IsSymbol()) {
Object* result;
- { MaybeObject* maybe_result = Heap::LookupSymbol(key_);
+ { MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
key_ = String::cast(result);
Object* GetCallbackObject() {
if (lookup_type_ == CONSTANT_TYPE) {
// For now we only have the __proto__ as constant type.
- return Heap::prototype_accessors();
+ return HEAP->prototype_accessors();
}
return GetValue();
}
Handle<Object> RegExpMacroAssemblerIrregexp::GetCode(Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = Factory::NewByteArray(length());
+ Handle<ByteArray> array = FACTORY->NewByteArray(length());
Copy(array->GetDataStartAddress());
return array;
}
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index) {
+ int previous_index,
+ Isolate* isolate) {
ASSERT(subject->IsFlat());
ASSERT(previous_index >= 0);
start_offset,
input_start,
input_end,
- offsets_vector);
+ offsets_vector,
+ isolate);
return res;
}
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output) {
+ int* output,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
- RegExpStack stack;
- Address stack_base = RegExpStack::stack_base();
+ RegExpStackScope stack_scope(isolate);
+ Address stack_base = stack_scope.stack()->stack_base();
int direct_call = 0;
int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
input_end,
output,
stack_base,
- direct_call);
+ direct_call,
+ isolate);
ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
- if (result == EXCEPTION && !Top::has_pending_exception()) {
+ if (result == EXCEPTION && !isolate->has_pending_exception()) {
// We detected a stack overflow (on the backtrack stack) in RegExp code,
// but haven't created the exception yet.
- Top::StackOverflow();
+ isolate->StackOverflow();
}
return static_cast<Result>(result);
}
-static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
-
-
-byte NativeRegExpMacroAssembler::word_character_map[] = {
+const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset1,
Address byte_offset2,
- size_t byte_length) {
+ size_t byte_length,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ isolate->regexp_macro_assembler_canonicalize();
// This function is not allowed to cause a garbage collection.
// A GC might move the calling generated code and invalidate the
// return address on the stack.
unibrow::uchar c2 = substring2[i];
if (c1 != c2) {
unibrow::uchar s1[1] = { c1 };
- canonicalize.get(c1, '\0', s1);
+ canonicalize->get(c1, '\0', s1);
if (s1[0] != c2) {
unibrow::uchar s2[1] = { c2 };
- canonicalize.get(c2, '\0', s2);
+ canonicalize->get(c2, '\0', s2);
if (s1[0] != s2[0]) {
return 0;
}
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
- Address* stack_base) {
- size_t size = RegExpStack::stack_capacity();
- Address old_stack_base = RegExpStack::stack_base();
+ Address* stack_base,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ RegExpStack* regexp_stack = isolate->regexp_stack();
+ size_t size = regexp_stack->stack_capacity();
+ Address old_stack_base = regexp_stack->stack_base();
ASSERT(old_stack_base == *stack_base);
ASSERT(stack_pointer <= old_stack_base);
ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
+ Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
if (new_stack_base == NULL) {
return NULL;
}
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index);
+ int previous_index,
+ Isolate* isolate);
// Compares two-byte strings case insensitively.
// Called from generated RegExp code.
static int CaseInsensitiveCompareUC16(Address byte_offset1,
Address byte_offset2,
- size_t byte_length);
+ size_t byte_length,
+ Isolate* isolate);
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
// successful, and updates the stack_top address, or returns 0 if unable
// to grow the stack.
// This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top);
+ static Address GrowStack(Address stack_pointer, Address* stack_top,
+ Isolate* isolate);
static const byte* StringCharacterPosition(String* subject, int start_index);
// Byte map of ASCII characters with a 0xff if the character is a word
// character (digit, letter or underscore) and 0x00 otherwise.
// Used by generated RegExp code.
- static byte word_character_map[128];
+ static const byte word_character_map[128];
static Address word_character_map_address() {
- return &word_character_map[0];
+ return const_cast<Address>(&word_character_map[0]);
}
static Result Execute(Code* code,
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output);
+ int* output,
+ Isolate* isolate);
};
#endif // V8_INTERPRETED_REGEXP
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
-#include "top.h"
#include "regexp-stack.h"
namespace v8 {
namespace internal {
-RegExpStack::RegExpStack() {
+RegExpStackScope::RegExpStackScope(Isolate* isolate)
+ : regexp_stack_(isolate->regexp_stack()) {
// Initialize, if not already initialized.
- RegExpStack::EnsureCapacity(0);
+ regexp_stack_->EnsureCapacity(0);
}
-RegExpStack::~RegExpStack() {
+RegExpStackScope::~RegExpStackScope() {
+ ASSERT(Isolate::Current() == regexp_stack_->isolate_);
// Reset the buffer if it has grown.
- RegExpStack::Reset();
+ regexp_stack_->Reset();
+}
+
+
+RegExpStack::RegExpStack()
+ : isolate_(NULL) {
+}
+
+
+RegExpStack::~RegExpStack() {
}
void RegExpStack::ThreadLocal::Free() {
- if (thread_local_.memory_size_ > 0) {
- DeleteArray(thread_local_.memory_);
- thread_local_ = ThreadLocal();
+ if (memory_size_ > 0) {
+ DeleteArray(memory_);
+ Clear();
}
}
}
-RegExpStack::ThreadLocal RegExpStack::thread_local_;
-
}} // namespace v8::internal
namespace v8 {
namespace internal {
+class RegExpStack;
+
// Maintains a per-v8thread stack area that can be used by irregexp
// implementation for its backtracking stack.
// Since there is only one stack area, the Irregexp implementation is not
// re-entrant. I.e., no regular expressions may be executed in the same thread
// during a preempted Irregexp execution.
+class RegExpStackScope {
+ public:
+ // Create and delete an instance to control the life-time of a growing stack.
+
+ // Initializes the stack memory area if necessary.
+ explicit RegExpStackScope(Isolate* isolate);
+ ~RegExpStackScope(); // Releases the stack if it has grown.
+
+ RegExpStack* stack() const { return regexp_stack_; }
+
+ private:
+ RegExpStack* regexp_stack_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegExpStackScope);
+};
+
+
class RegExpStack {
public:
// Number of allocated locations on the stack below the limit.
// check.
static const int kStackLimitSlack = 32;
- // Create and delete an instance to control the life-time of a growing stack.
- RegExpStack(); // Initializes the stack memory area if necessary.
- ~RegExpStack(); // Releases the stack if it has grown.
-
// Gives the top of the memory used as stack.
- static Address stack_base() {
+ Address stack_base() {
ASSERT(thread_local_.memory_size_ != 0);
return thread_local_.memory_ + thread_local_.memory_size_;
}
// The total size of the memory allocated for the stack.
- static size_t stack_capacity() { return thread_local_.memory_size_; }
+ size_t stack_capacity() { return thread_local_.memory_size_; }
// If the stack pointer gets below the limit, we should react and
// either grow the stack or report an out-of-stack exception.
// There is only a limited number of locations below the stack limit,
// so users of the stack should check the stack limit during any
// sequence of pushes longer that this.
- static Address* limit_address() { return &(thread_local_.limit_); }
+ Address* limit_address() { return &(thread_local_.limit_); }
// Ensures that there is a memory area with at least the specified size.
// If passing zero, the default/minimum size buffer is allocated.
- static Address EnsureCapacity(size_t size);
+ Address EnsureCapacity(size_t size);
// Thread local archiving.
static int ArchiveSpacePerThread() {
- return static_cast<int>(sizeof(thread_local_));
+ return static_cast<int>(sizeof(ThreadLocal));
}
- static char* ArchiveStack(char* to);
- static char* RestoreStack(char* from);
- static void FreeThreadResources() { thread_local_.Free(); }
-
+ char* ArchiveStack(char* to);
+ char* RestoreStack(char* from);
+ void FreeThreadResources() { thread_local_.Free(); }
private:
+ RegExpStack();
+ ~RegExpStack();
+
// Artificial limit used when no memory has been allocated.
static const uintptr_t kMemoryTop = static_cast<uintptr_t>(-1);
// Structure holding the allocated memory, size and limit.
struct ThreadLocal {
- ThreadLocal()
- : memory_(NULL),
- memory_size_(0),
- limit_(reinterpret_cast<Address>(kMemoryTop)) {}
+ ThreadLocal() { Clear(); }
// If memory_size_ > 0 then memory_ must be non-NULL.
Address memory_;
size_t memory_size_;
Address limit_;
+ void Clear() {
+ memory_ = NULL;
+ memory_size_ = 0;
+ limit_ = reinterpret_cast<Address>(kMemoryTop);
+ }
void Free();
};
// Address of allocated memory.
- static Address memory_address() {
+ Address memory_address() {
return reinterpret_cast<Address>(&thread_local_.memory_);
}
// Address of size of allocated memory.
- static Address memory_size_address() {
+ Address memory_size_address() {
return reinterpret_cast<Address>(&thread_local_.memory_size_);
}
// Resets the buffer if it has grown beyond the default/minimum size.
// After this, the buffer is either the default size, or it is empty, so
// you have to call EnsureCapacity before using it again.
- static void Reset();
+ void Reset();
- static ThreadLocal thread_local_;
+ ThreadLocal thread_local_;
+ Isolate* isolate_;
friend class ExternalReference;
+ friend class Isolate;
+ friend class RegExpStackScope;
+
+ DISALLOW_COPY_AND_ASSIGN(RegExpStack);
};
}} // namespace v8::internal
Result::~Result() {
if (is_register()) {
- CodeGeneratorScope::Current()->allocator()->Unuse(reg());
+ CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
}
}
void Result::Unuse() {
if (is_register()) {
- CodeGeneratorScope::Current()->allocator()->Unuse(reg());
+ CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
}
invalidate();
}
void Result::CopyTo(Result* destination) const {
destination->value_ = value_;
if (is_register()) {
- CodeGeneratorScope::Current()->allocator()->Use(reg());
+ CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg());
}
}
Result::Result(Register reg, TypeInfo info) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
- CodeGeneratorScope::Current()->allocator()->Use(reg);
+ CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg);
value_ = TypeField::encode(REGISTER)
| TypeInfoField::encode(info.ToInt())
| DataField::encode(reg.code_);
}
-Result::ZoneObjectList* Result::ConstantList() {
- static ZoneObjectList list(10);
- return &list;
-}
-
-
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
// Construct a Result whose value is a compile-time constant.
explicit Result(Handle<Object> value) {
+ ZoneObjectList* constant_list = Isolate::Current()->result_constant_list();
TypeInfo info = TypeInfo::TypeFromValue(value);
value_ = TypeField::encode(CONSTANT)
| TypeInfoField::encode(info.ToInt())
| IsUntaggedInt32Field::encode(false)
- | DataField::encode(ConstantList()->length());
- ConstantList()->Add(value);
+ | DataField::encode(constant_list->length());
+ constant_list->Add(value);
}
// The copy constructor and assignment operators could each create a new
inline ~Result();
- // Static indirection table for handles to constants. If a Result
- // represents a constant, the data contains an index into this table
- // of handles to the actual constants.
- typedef ZoneList<Handle<Object> > ZoneObjectList;
-
- static ZoneObjectList* ConstantList();
-
- // Clear the constants indirection table.
- static void ClearConstantList() {
- ConstantList()->Clear();
- }
-
inline void Unuse();
Type type() const { return TypeField::decode(value_); }
Handle<Object> handle() const {
ASSERT(type() == CONSTANT);
- return ConstantList()->at(DataField::decode(value_));
+ return Isolate::Current()->result_constant_list()->
+ at(DataField::decode(value_));
}
// Move this result to an arbitrary register. The register is not
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
- Variable* result = scope->NewTemporary(Factory::result_symbol());
+ Variable* result = scope->NewTemporary(
+ info->isolate()->factory()->result_symbol());
Processor processor(result);
processor.Process(body);
if (processor.HasStackOverflow()) return false;
#include "execution.h"
#include "global-handles.h"
#include "mark-compact.h"
+#include "platform.h"
#include "scopeinfo.h"
-#include "top.h"
namespace v8 {
namespace internal {
};
-enum SamplerState {
- IN_NON_JS_STATE = 0,
- IN_JS_STATE = 1
-};
-
-
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
-static const int kSamplerWindowSize = 16;
static const int kSamplerTicksBetweenThresholdAdjustment = 32;
static const int kSizeLimit = 1500;
-static int sampler_threshold = kSamplerThresholdInit;
-static int sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
-
-static int sampler_ticks_until_threshold_adjustment =
- kSamplerTicksBetweenThresholdAdjustment;
-
-// The ratio of ticks spent in JS code in percent.
-static Atomic32 js_ratio;
-
-static Object* sampler_window[kSamplerWindowSize] = { NULL, };
-static int sampler_window_position = 0;
-static int sampler_window_weight[kSamplerWindowSize] = { 0, };
-
-
-// Support for pending 'optimize soon' requests.
-static PendingListNode* optimize_soon_list = NULL;
-
PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
- function_ = GlobalHandles::Create(function);
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ function_ = global_handles->Create(function);
start_ = OS::Ticks();
- GlobalHandles::MakeWeak(function_.location(), this, &WeakCallback);
+ global_handles->MakeWeak(function_.location(), this, &WeakCallback);
}
void PendingListNode::Destroy() {
if (!IsValid()) return;
- GlobalHandles::Destroy(function_.location());
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ global_handles->Destroy(function_.location());
function_= Handle<Object>::null();
}
}
-static void Optimize(JSFunction* function, bool eager, int delay) {
+Atomic32 RuntimeProfiler::state_ = 0;
+// TODO(isolates): Create the semaphore lazily and clean it up when no
+// longer required.
+#ifdef ENABLE_LOGGING_AND_PROFILING
+Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+#endif
+
+
+RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
+ : isolate_(isolate),
+ sampler_threshold_(kSamplerThresholdInit),
+ sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
+ sampler_ticks_until_threshold_adjustment_(
+ kSamplerTicksBetweenThresholdAdjustment),
+ js_ratio_(0),
+ sampler_window_position_(0),
+ optimize_soon_list_(NULL),
+ state_window_position_(0) {
+ state_counts_[0] = kStateWindowSize;
+ state_counts_[1] = 0;
+ memset(state_window_, 0, sizeof(state_window_));
+ ClearSampleBuffer();
+}
+
+
+bool RuntimeProfiler::IsEnabled() {
+ return V8::UseCrankshaft() && FLAG_opt;
+}
+
+
+void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
ASSERT(IsOptimizable(function));
if (FLAG_trace_opt) {
PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
}
-static void AttemptOnStackReplacement(JSFunction* function) {
+void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
ASSERT(function->IsMarkedForLazyRecompilation());
- if (!FLAG_use_osr || Debug::has_break_points() || function->IsBuiltin()) {
+ if (!FLAG_use_osr ||
+ isolate_->debug()->has_break_points() ||
+ function->IsBuiltin()) {
return;
}
Object* check_code;
MaybeObject* maybe_check_code = check_stub.TryGetCode();
if (maybe_check_code->ToObject(&check_code)) {
- Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
+ Code* replacement_code =
+ isolate_->builtins()->builtin(Builtins::OnStackReplacement);
Code* unoptimized_code = shared->code();
Deoptimizer::PatchStackCheckCode(unoptimized_code,
Code::cast(check_code),
}
-static void ClearSampleBuffer() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- sampler_window[i] = NULL;
- sampler_window_weight[i] = 0;
- }
+void RuntimeProfiler::ClearSampleBuffer() {
+ memset(sampler_window_, 0, sizeof(sampler_window_));
+ memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
}
-static int LookupSample(JSFunction* function) {
+int RuntimeProfiler::LookupSample(JSFunction* function) {
int weight = 0;
for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* sample = sampler_window[i];
+ Object* sample = sampler_window_[i];
if (sample != NULL) {
if (function == sample) {
- weight += sampler_window_weight[i];
+ weight += sampler_window_weight_[i];
}
}
}
}
-static void AddSample(JSFunction* function, int weight) {
+void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
ASSERT(IsPowerOf2(kSamplerWindowSize));
- sampler_window[sampler_window_position] = function;
- sampler_window_weight[sampler_window_position] = weight;
- sampler_window_position = (sampler_window_position + 1) &
+ sampler_window_[sampler_window_position_] = function;
+ sampler_window_weight_[sampler_window_position_] = weight;
+ sampler_window_position_ = (sampler_window_position_ + 1) &
(kSamplerWindowSize - 1);
}
void RuntimeProfiler::OptimizeNow() {
- HandleScope scope;
- PendingListNode* current = optimize_soon_list;
+ HandleScope scope(isolate_);
+ PendingListNode* current = optimize_soon_list_;
while (current != NULL) {
PendingListNode* next = current->next();
if (current->IsValid()) {
delete current;
current = next;
}
- optimize_soon_list = NULL;
+ optimize_soon_list_ = NULL;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// Adjust threshold each time we have processed
// a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment > 0) {
- sampler_ticks_until_threshold_adjustment--;
- if (sampler_ticks_until_threshold_adjustment <= 0) {
+ if (sampler_ticks_until_threshold_adjustment_ > 0) {
+ sampler_ticks_until_threshold_adjustment_--;
+ if (sampler_ticks_until_threshold_adjustment_ <= 0) {
// If the threshold is not already at the minimum
// modify and reset the ticks until next adjustment.
- if (sampler_threshold > kSamplerThresholdMin) {
- sampler_threshold -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment =
+ if (sampler_threshold_ > kSamplerThresholdMin) {
+ sampler_threshold_ -= kSamplerThresholdDelta;
+ sampler_ticks_until_threshold_adjustment_ =
kSamplerTicksBetweenThresholdAdjustment;
}
}
int function_size = function->shared()->SourceSize();
int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor
+ ? sampler_threshold_size_factor_
: 1;
- int threshold = sampler_threshold * threshold_size_factor;
- int current_js_ratio = NoBarrier_Load(&js_ratio);
+ int threshold = sampler_threshold_ * threshold_size_factor;
+ int current_js_ratio = NoBarrier_Load(&js_ratio_);
// Adjust threshold depending on the ratio of time spent
// in JS code.
if (LookupSample(function) >= threshold) {
Optimize(function, false, 0);
- CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
+ isolate_->compilation_cache()->MarkForEagerOptimizing(
+ Handle<JSFunction>(function));
}
}
void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
if (!IsOptimizable(function)) return;
PendingListNode* node = new PendingListNode(function);
- node->set_next(optimize_soon_list);
- optimize_soon_list = node;
+ node->set_next(optimize_soon_list_);
+ optimize_soon_list_ = node;
}
#ifdef ENABLE_LOGGING_AND_PROFILING
-static void UpdateStateRatio(SamplerState current_state) {
- static const int kStateWindowSize = 128;
- static SamplerState state_window[kStateWindowSize];
- static int state_window_position = 0;
- static int state_counts[2] = { kStateWindowSize, 0 };
-
- SamplerState old_state = state_window[state_window_position];
- state_counts[old_state]--;
- state_window[state_window_position] = current_state;
- state_counts[current_state]++;
+void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
+ SamplerState old_state = state_window_[state_window_position_];
+ state_counts_[old_state]--;
+ state_window_[state_window_position_] = current_state;
+ state_counts_[current_state]++;
ASSERT(IsPowerOf2(kStateWindowSize));
- state_window_position = (state_window_position + 1) &
+ state_window_position_ = (state_window_position_ + 1) &
(kStateWindowSize - 1);
- NoBarrier_Store(&js_ratio, state_counts[IN_JS_STATE] * 100 /
+ NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
kStateWindowSize);
}
#endif
void RuntimeProfiler::NotifyTick() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Record state sample.
- SamplerState state = Top::IsInJSState()
+ SamplerState state = IsSomeIsolateInJS()
? IN_JS_STATE
: IN_NON_JS_STATE;
UpdateStateRatio(state);
- StackGuard::RequestRuntimeProfilerTick();
+ isolate_->stack_guard()->RequestRuntimeProfilerTick();
#endif
}
ClearSampleBuffer();
// If the ticker hasn't already started, make sure to do so to get
// the ticks for the runtime profiler.
- if (IsEnabled()) Logger::EnsureTickerStarted();
+ if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
}
void RuntimeProfiler::Reset() {
- sampler_threshold = kSamplerThresholdInit;
- sampler_ticks_until_threshold_adjustment =
+ sampler_threshold_ = kSamplerThresholdInit;
+ sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
+ sampler_ticks_until_threshold_adjustment_ =
kSamplerTicksBetweenThresholdAdjustment;
- sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
}
// Update the pointers in the sampler window after a GC.
void RuntimeProfiler::UpdateSamplesAfterScavenge() {
for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window[i];
- if (function != NULL && Heap::InNewSpace(function)) {
+ Object* function = sampler_window_[i];
+ if (function != NULL && isolate_->heap()->InNewSpace(function)) {
MapWord map_word = HeapObject::cast(function)->map_word();
if (map_word.IsForwardingAddress()) {
- sampler_window[i] = map_word.ToForwardingAddress();
+ sampler_window_[i] = map_word.ToForwardingAddress();
} else {
- sampler_window[i] = NULL;
+ sampler_window_[i] = NULL;
}
}
}
}
+void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // The profiler thread must still be waiting.
+ ASSERT(NoBarrier_Load(&state_) >= 0);
+ // In IsolateEnteredJS we have already incremented the counter and
+ // undid the decrement done by the profiler thread. Increment again
+ // to get the right count of active isolates.
+ NoBarrier_AtomicIncrement(&state_, 1);
+ semaphore_->Signal();
+ isolate->ResetEagerOptimizingData();
+#endif
+}
+
+
+bool RuntimeProfiler::IsSomeIsolateInJS() {
+ return NoBarrier_Load(&state_) > 0;
+}
+
+
+bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
+ ASSERT(old_state >= -1);
+ if (old_state != 0) return false;
+ semaphore_->Wait();
+#endif
+ return true;
+}
+
+
+void RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ semaphore_->Signal();
+#endif
+}
+
+
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window[i];
+ Object* function = sampler_window_[i];
if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
- sampler_window[i] = NULL;
+ sampler_window_[i] = NULL;
}
}
}
void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
for (int i = 0; i < kSamplerWindowSize; i++) {
- visitor->VisitPointer(&sampler_window[i]);
+ visitor->VisitPointer(&sampler_window_[i]);
}
}
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int kNonJSTicksThreshold = 100;
- // We suspend the runtime profiler thread when not running
- // JavaScript. If the CPU profiler is active we must not do this
- // because it samples both JavaScript and C++ code.
- if (RuntimeProfiler::IsEnabled() &&
- !CpuProfiler::is_profiling() &&
- !(FLAG_prof && FLAG_prof_auto)) {
- if (Top::IsInJSState()) {
- non_js_ticks_ = 0;
+ if (RuntimeProfiler::IsSomeIsolateInJS()) {
+ non_js_ticks_ = 0;
+ } else {
+ if (non_js_ticks_ < kNonJSTicksThreshold) {
+ ++non_js_ticks_;
} else {
- if (non_js_ticks_ < kNonJSTicksThreshold) {
- ++non_js_ticks_;
- } else {
- if (Top::WaitForJSState()) return true;
- }
+ return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
}
}
#endif
#ifndef V8_RUNTIME_PROFILER_H_
#define V8_RUNTIME_PROFILER_H_
-#include "v8.h"
#include "allocation.h"
+#include "atomicops.h"
namespace v8 {
namespace internal {
-class RuntimeProfiler : public AllStatic {
+class Isolate;
+class JSFunction;
+class Object;
+class PendingListNode;
+class Semaphore;
+
+
+enum SamplerState {
+ IN_NON_JS_STATE = 0,
+ IN_JS_STATE = 1
+};
+
+
+class RuntimeProfiler {
public:
- static bool IsEnabled() { return V8::UseCrankshaft() && FLAG_opt; }
+ explicit RuntimeProfiler(Isolate* isolate);
+
+ static bool IsEnabled();
+
+ void OptimizeNow();
+ void OptimizeSoon(JSFunction* function);
+
+ void NotifyTick();
+
+ void Setup();
+ void Reset();
+ void TearDown();
+
+ Object** SamplerWindowAddress();
+ int SamplerWindowSize();
+
+ // Rate limiting support.
+
+ // VM thread interface.
+ //
+ // Called by isolates when their states change.
+ static inline void IsolateEnteredJS(Isolate* isolate);
+ static inline void IsolateExitedJS(Isolate* isolate);
+
+ // Profiler thread interface.
+ //
+ // IsSomeIsolateInJS():
+ // The profiler thread can query whether some isolate is currently
+ // running JavaScript code.
+ //
+ // WaitForSomeIsolateToEnterJS():
+ // When no isolates are running JavaScript code for some time the
+ // profiler thread suspends itself by calling the wait function. The
+ // wait function returns true after it waited or false immediately.
+ // While the function was waiting the profiler may have been
+ // disabled so it *must check* whether it is allowed to continue.
+ static bool IsSomeIsolateInJS();
+ static bool WaitForSomeIsolateToEnterJS();
+
+ // When shutting down we join the profiler thread. Doing so while
+ // it's waiting on a semaphore will cause a deadlock, so we have to
+ // wake it up first.
+ static void WakeUpRuntimeProfilerThreadBeforeShutdown();
+
+ void UpdateSamplesAfterScavenge();
+ void RemoveDeadSamples();
+ void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
- static void OptimizeNow();
- static void OptimizeSoon(JSFunction* function);
+ private:
+ static const int kSamplerWindowSize = 16;
+ static const int kStateWindowSize = 128;
+
+ static void HandleWakeUp(Isolate* isolate);
+
+ void Optimize(JSFunction* function, bool eager, int delay);
+
+ void AttemptOnStackReplacement(JSFunction* function);
+
+ void ClearSampleBuffer();
+
+ void ClearSampleBufferNewSpaceEntries();
+
+ int LookupSample(JSFunction* function);
+
+ void AddSample(JSFunction* function, int weight);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ void UpdateStateRatio(SamplerState current_state);
+#endif
- static void NotifyTick();
+ Isolate* isolate_;
- static void Setup();
- static void Reset();
- static void TearDown();
+ int sampler_threshold_;
+ int sampler_threshold_size_factor_;
+ int sampler_ticks_until_threshold_adjustment_;
- static int SamplerWindowSize();
- static void UpdateSamplesAfterScavenge();
- static void RemoveDeadSamples();
- static void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
+ // The ratio of ticks spent in JS code in percent.
+ Atomic32 js_ratio_;
+
+ Object* sampler_window_[kSamplerWindowSize];
+ int sampler_window_position_;
+ int sampler_window_weight_[kSamplerWindowSize];
+
+ // Support for pending 'optimize soon' requests.
+ PendingListNode* optimize_soon_list_;
+
+ SamplerState state_window_[kStateWindowSize];
+ int state_window_position_;
+ int state_counts_[2];
+
+ // Possible state values:
+ // -1 => the profiler thread is waiting on the semaphore
+ // 0 or positive => the number of isolates running JavaScript code.
+ static Atomic32 state_;
+ static Semaphore* semaphore_;
};
public:
RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
- // Suspends the current thread when not executing JavaScript to
- // minimize CPU usage. Returns whether this thread was suspended
- // (and so might have to check whether profiling is still active.)
+ // Suspends the current thread (which must be the profiler thread)
+ // when not executing JavaScript to minimize CPU usage. Returns
+ // whether the thread was suspended (and so must check whether
+ // profiling is still active.)
//
// Does nothing when runtime profiling is not enabled.
bool SuspendIfNecessary();
DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
};
+
+// Implementation of RuntimeProfiler inline functions.
+
+void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
+ Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
+ if (new_state == 0) {
+ // Just incremented from -1 to 0. -1 can only be set by the
+ // profiler thread before it suspends itself and starts waiting on
+ // the semaphore.
+ HandleWakeUp(isolate);
+ }
+ ASSERT(new_state >= 0);
+}
+
+
+void RuntimeProfiler::IsolateExitedJS(Isolate* isolate) {
+ Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, -1);
+ ASSERT(new_state >= 0);
+ USE(new_state);
+}
+
} } // namespace v8::internal
#endif // V8_RUNTIME_PROFILER_H_
#define RUNTIME_ASSERT(value) \
- if (!(value)) return Top::ThrowIllegalOperation();
+ if (!(value)) return isolate->ThrowIllegalOperation();
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
RUNTIME_ASSERT(obj->IsNumber()); \
type name = NumberTo##Type(obj);
-// Non-reentrant string buffer for efficient general use in this file.
-static StaticResource<StringInputBuffer> runtime_string_input_buffer;
+MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
+ JSObject* boilerplate) {
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) return isolate->StackOverflow();
-MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
- StackLimitCheck check;
- if (check.HasOverflowed()) return Top::StackOverflow();
-
+ Heap* heap = isolate->heap();
Object* result;
- { MaybeObject* maybe_result = Heap::CopyJSObject(boilerplate);
+ { MaybeObject* maybe_result = heap->CopyJSObject(boilerplate);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSObject* copy = JSObject::cast(result);
Object* value = properties->get(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
properties->set(i, result);
Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
copy->InObjectPropertyAtPut(i, result);
}
} else {
{ MaybeObject* maybe_result =
- Heap::AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
+ heap->AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* names = FixedArray::cast(result);
copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
{ MaybeObject* maybe_result =
switch (copy->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == Heap::fixed_cow_array_map()) {
- Counters::cow_arrays_created_runtime.Increment();
+ if (elements->map() == heap->fixed_cow_array_map()) {
+ isolate->counters()->cow_arrays_created_runtime()->Increment();
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
ASSERT(!elements->get(i)->IsJSObject());
Object* value = elements->get(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
+ js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
elements->set(i, result);
Object* value = element_dictionary->ValueAt(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
+ js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
element_dictionary->ValueAtPut(i, result);
}
-static MaybeObject* Runtime_CloneLiteralBoilerplate(Arguments args) {
+static MaybeObject* Runtime_CloneLiteralBoilerplate(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return DeepCopyBoilerplate(boilerplate);
+ return DeepCopyBoilerplate(isolate, boilerplate);
}
-static MaybeObject* Runtime_CloneShallowLiteralBoilerplate(Arguments args) {
+static MaybeObject* Runtime_CloneShallowLiteralBoilerplate(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return Heap::CopyJSObject(boilerplate);
+ return isolate->heap()->CopyJSObject(boilerplate);
}
Handle<Context> context,
Handle<FixedArray> constant_properties,
bool* is_result_from_cache) {
+ Isolate* isolate = context->GetIsolate();
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
if (FLAG_canonicalize_object_literal_maps) {
if ((number_of_symbol_keys == number_of_properties) &&
(number_of_symbol_keys < kMaxKeys)) {
// Create the fixed array with the key.
- Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys);
+ Handle<FixedArray> keys =
+ isolate->factory()->NewFixedArray(number_of_symbol_keys);
if (number_of_symbol_keys > 0) {
int index = 0;
for (int p = 0; p < properties_length; p += 2) {
ASSERT(index == number_of_symbol_keys);
}
*is_result_from_cache = true;
- return Factory::ObjectLiteralMapFromCache(context, keys);
+ return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
}
}
*is_result_from_cache = false;
- return Factory::CopyMap(
+ return isolate->factory()->CopyMap(
Handle<Map>(context->object_function()->initial_map()),
number_of_properties);
}
static Handle<Object> CreateLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties);
static Handle<Object> CreateObjectLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties,
bool should_have_fast_elements) {
constant_properties,
&is_result_from_cache);
- Handle<JSObject> boilerplate = Factory::NewJSObjectFromMap(map);
+ Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) NormalizeElements(boilerplate);
length / 2,
!is_result_from_cache);
for (int index = 0; index < length; index +=2) {
- Handle<Object> key(constant_properties->get(index+0));
- Handle<Object> value(constant_properties->get(index+1));
+ Handle<Object> key(constant_properties->get(index+0), isolate);
+ Handle<Object> value(constant_properties->get(index+1), isolate);
if (value->IsFixedArray()) {
// The value contains the constant_properties of a
// simple object literal.
Handle<FixedArray> array = Handle<FixedArray>::cast(value);
- value = CreateLiteralBoilerplate(literals, array);
+ value = CreateLiteralBoilerplate(isolate, literals, array);
if (value.is_null()) return value;
}
Handle<Object> result;
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
- Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ Handle<String> name =
+ isolate->factory()->NewStringFromAscii(CStrVector(str));
result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
value, NONE);
}
static Handle<Object> CreateArrayLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> elements) {
// Create the JSArray.
Handle<JSFunction> constructor(
JSFunction::GlobalContextFromLiterals(*literals)->array_function());
- Handle<Object> object = Factory::NewJSObject(constructor);
+ Handle<Object> object = isolate->factory()->NewJSObject(constructor);
- const bool is_cow = (elements->map() == Heap::fixed_cow_array_map());
+ const bool is_cow =
+ (elements->map() == isolate->heap()->fixed_cow_array_map());
Handle<FixedArray> copied_elements =
- is_cow ? elements : Factory::CopyFixedArray(elements);
+ is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
if (is_cow) {
// simple object literal.
Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
Handle<Object> result =
- CreateLiteralBoilerplate(literals, fa);
+ CreateLiteralBoilerplate(isolate, literals, fa);
if (result.is_null()) return result;
content->set(i, *result);
}
static Handle<Object> CreateLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
switch (CompileTimeValue::GetType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(literals, elements, true);
+ return CreateObjectLiteralBoilerplate(isolate, literals, elements, true);
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(literals, elements, false);
+ return CreateObjectLiteralBoilerplate(isolate, literals, elements, false);
case CompileTimeValue::ARRAY_LITERAL:
- return CreateArrayLiteralBoilerplate(literals, elements);
+ return CreateArrayLiteralBoilerplate(isolate, literals, elements);
default:
UNREACHABLE();
return Handle<Object>::null();
}
-static MaybeObject* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
+static MaybeObject* Runtime_CreateArrayLiteralBoilerplate(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
// Takes a FixedArray of elements containing the literal elements of
// the array literal and produces JSArray with those elements.
// Additionally takes the literals array of the surrounding function
// which contains the context from which to get the Array function
// to use for creating the array literal.
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
- Handle<Object> object = CreateArrayLiteralBoilerplate(literals, elements);
+ Handle<Object> object =
+ CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (object.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
}
-static MaybeObject* Runtime_CreateObjectLiteral(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CreateObjectLiteral(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
bool should_have_fast_elements = fast_elements == 1;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(literals,
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateObjectLiteralBoilerplate(isolate,
+ literals,
constant_properties,
should_have_fast_elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+ return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateObjectLiteralShallow(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CreateObjectLiteralShallow(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
bool should_have_fast_elements = fast_elements == 1;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(literals,
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateObjectLiteralBoilerplate(isolate,
+ literals,
constant_properties,
should_have_fast_elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+ return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateArrayLiteral(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CreateArrayLiteral(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+ return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateArrayLiteralShallow(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CreateArrayLiteralShallow(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
if (JSObject::cast(*boilerplate)->elements()->map() ==
- Heap::fixed_cow_array_map()) {
- Counters::cow_arrays_created_runtime.Increment();
+ isolate->heap()->fixed_cow_array_map()) {
+ COUNTERS->cow_arrays_created_runtime()->Increment();
}
- return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+ return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateCatchExtensionObject(Arguments args) {
+static MaybeObject* Runtime_CreateCatchExtensionObject(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[0]);
Object* value = args[1];
// Create a catch context extension object.
JSFunction* constructor =
- Top::context()->global_context()->context_extension_function();
+ isolate->context()->global_context()->
+ context_extension_function();
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateJSObject(constructor);
+ { MaybeObject* maybe_object = isolate->heap()->AllocateJSObject(constructor);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
// Assign the exception value to the catch variable and make sure
}
-static MaybeObject* Runtime_ClassOf(Arguments args) {
+static MaybeObject* Runtime_ClassOf(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Object* obj = args[0];
- if (!obj->IsJSObject()) return Heap::null_value();
+ if (!obj->IsJSObject()) return isolate->heap()->null_value();
return JSObject::cast(obj)->class_name();
}
-static MaybeObject* Runtime_IsInPrototypeChain(Arguments args) {
+static MaybeObject* Runtime_IsInPrototypeChain(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
Object* V = args[1];
while (true) {
Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) return Heap::false_value();
- if (O == prototype) return Heap::true_value();
+ if (prototype->IsNull()) return isolate->heap()->false_value();
+ if (O == prototype) return isolate->heap()->true_value();
V = prototype;
}
}
// Inserts an object as the hidden prototype of another object.
-static MaybeObject* Runtime_SetHiddenPrototype(Arguments args) {
+static MaybeObject* Runtime_SetHiddenPrototype(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSObject, jsobject, args[0]);
new_map->set_prototype(proto);
jsobject->set_map(new_map);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_IsConstructCall(Arguments args) {
+static MaybeObject* Runtime_IsConstructCall(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 0);
JavaScriptFrameIterator it;
- return Heap::ToBoolean(it.frame()->IsConstructor());
+ return isolate->heap()->ToBoolean(it.frame()->IsConstructor());
}
JSObject* holder = result->holder();
JSObject* current = obj;
+ Isolate* isolate = obj->GetIsolate();
while (true) {
if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(current, name, access_type)) {
+ !isolate->MayNamedAccess(current, name, access_type)) {
// Access check callback denied the access, but some properties
// can have a special permissions which override callbacks descision
// (currently see v8::AccessControl).
break;
}
- Top::ReportFailedAccessCheck(current, access_type);
+ isolate->ReportFailedAccessCheck(current, access_type);
return false;
}
uint32_t index,
v8::AccessType access_type) {
if (obj->IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(obj, index, access_type)) {
+ !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
return false;
}
// [false, value, Writeable, Enumerable, Configurable]
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
-static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
+static MaybeObject* Runtime_GetOwnProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
- Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
- Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
+ Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
LookupResult result;
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(String, name, 1);
if (name->AsArrayIndex(&index)) {
switch (obj->HasLocalElement(index)) {
case JSObject::UNDEFINED_ELEMENT:
- return Heap::undefined_value();
+ return heap->undefined_value();
case JSObject::STRING_CHARACTER_ELEMENT: {
// Special handling of string objects according to ECMAScript 5
Handle<String> str(String::cast(js_value->value()));
Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
elms->set(VALUE_INDEX, *substr);
- elms->set(WRITABLE_INDEX, Heap::false_value());
- elms->set(ENUMERABLE_INDEX, Heap::false_value());
- elms->set(CONFIGURABLE_INDEX, Heap::false_value());
+ elms->set(WRITABLE_INDEX, heap->false_value());
+ elms->set(ENUMERABLE_INDEX, heap->false_value());
+ elms->set(CONFIGURABLE_INDEX, heap->false_value());
return *desc;
}
case JSObject::INTERCEPTED_ELEMENT:
case JSObject::FAST_ELEMENT: {
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
Handle<Object> value = GetElement(obj, index);
- RETURN_IF_EMPTY_HANDLE(value);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, Heap::true_value());
- elms->set(ENUMERABLE_INDEX, Heap::true_value());
- elms->set(CONFIGURABLE_INDEX, Heap::true_value());
+ elms->set(WRITABLE_INDEX, heap->true_value());
+ elms->set(ENUMERABLE_INDEX, heap->true_value());
+ elms->set(CONFIGURABLE_INDEX, heap->true_value());
return *desc;
}
Handle<JSObject> holder = obj;
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return Heap::undefined_value();
+ if (proto->IsNull()) return heap->undefined_value();
ASSERT(proto->IsJSGlobalObject());
holder = Handle<JSObject>(JSObject::cast(proto));
}
// This is an accessor property with getter and/or setter.
FixedArray* callbacks =
FixedArray::cast(dictionary->ValueAt(entry));
- elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->true_value());
if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
elms->set(GETTER_INDEX, callbacks->get(0));
}
}
case NORMAL: {
// This is a data property.
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
Handle<Object> value = GetElement(obj, index);
ASSERT(!value.is_null());
elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+ elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
break;
}
default:
UNREACHABLE();
break;
}
- elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
return *desc;
}
}
GetOwnPropertyImplementation(*obj, *name, &result);
if (!result.IsProperty()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
- return Heap::false_value();
+ return heap->false_value();
}
- elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
bool is_js_accessor = (result.type() == CALLBACKS) &&
(result.GetCallbackObject()->IsFixedArray());
if (is_js_accessor) {
// __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->true_value());
FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
elms->set(SETTER_INDEX, structure->get(1));
}
} else {
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
+ elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
PropertyAttributes attrs;
Object* value;
}
-static MaybeObject* Runtime_PreventExtensions(Arguments args) {
+static MaybeObject* Runtime_PreventExtensions(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
return obj->PreventExtensions();
}
-static MaybeObject* Runtime_IsExtensible(Arguments args) {
+static MaybeObject* Runtime_IsExtensible(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return Heap::false_value();
+ if (proto->IsNull()) return isolate->heap()->false_value();
ASSERT(proto->IsJSGlobalObject());
obj = JSObject::cast(proto);
}
- return obj->map()->is_extensible() ? Heap::true_value()
- : Heap::false_value();
+ return obj->map()->is_extensible() ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
}
-static MaybeObject* Runtime_RegExpCompile(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_RegExpCompile(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_CHECKED(String, pattern, 1);
}
-static MaybeObject* Runtime_CreateApiFunction(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CreateApiFunction(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
- return *Factory::CreateApiFunction(data);
+ return *isolate->factory()->CreateApiFunction(data);
}
-static MaybeObject* Runtime_IsTemplate(Arguments args) {
+static MaybeObject* Runtime_IsTemplate(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
Object* arg = args[0];
bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
- return Heap::ToBoolean(result);
+ return isolate->heap()->ToBoolean(result);
}
-static MaybeObject* Runtime_GetTemplateField(Arguments args) {
+static MaybeObject* Runtime_GetTemplateField(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(HeapObject, templ, args[0]);
CONVERT_CHECKED(Smi, field, args[1]);
}
-static MaybeObject* Runtime_DisableAccessChecks(Arguments args) {
+static MaybeObject* Runtime_DisableAccessChecks(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(HeapObject, object, args[0]);
Map* old_map = object->map();
Map::cast(new_map)->set_is_access_check_needed(false);
object->set_map(Map::cast(new_map));
}
- return needs_access_checks ? Heap::true_value() : Heap::false_value();
+ return needs_access_checks ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
}
-static MaybeObject* Runtime_EnableAccessChecks(Arguments args) {
+static MaybeObject* Runtime_EnableAccessChecks(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(HeapObject, object, args[0]);
Map* old_map = object->map();
Map::cast(new_map)->set_is_access_check_needed(true);
object->set_map(Map::cast(new_map));
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static Failure* ThrowRedeclarationError(const char* type, Handle<String> name) {
- HandleScope scope;
- Handle<Object> type_handle = Factory::NewStringFromAscii(CStrVector(type));
+static Failure* ThrowRedeclarationError(Isolate* isolate,
+ const char* type,
+ Handle<String> name) {
+ HandleScope scope(isolate);
+ Handle<Object> type_handle =
+ isolate->factory()->NewStringFromAscii(CStrVector(type));
Handle<Object> args[2] = { type_handle, name };
Handle<Object> error =
- Factory::NewTypeError("redeclaration", HandleVector(args, 2));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("redeclaration", HandleVector(args, 2));
+ return isolate->Throw(*error);
}
-static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
+static MaybeObject* Runtime_DeclareGlobals(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
- HandleScope scope;
- Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
+ HandleScope scope(isolate);
+ Handle<GlobalObject> global = Handle<GlobalObject>(
+ isolate->context()->global());
Handle<Context> context = args.at<Context>(0);
CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
for (int i = 0; i < length; i += 2) {
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<String> name(String::cast(pairs->get(i)));
- Handle<Object> value(pairs->get(i + 1));
+ Handle<Object> value(pairs->get(i + 1), isolate);
// We have to declare a global const property. To capture we only
// assign to it when evaluating the assignment for "const x =
// Check if the existing property conflicts with regards to const.
if (is_local && (is_read_only || is_const_property)) {
const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
};
// The property already exists without conflicting: Go to
// the next declaration.
// For const properties, we treat a callback with this name
// even in the prototype as a conflicting declaration.
if (is_const_property && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError("const", name);
+ return ThrowRedeclarationError(isolate, "const", name);
}
// Otherwise, we check for locally conflicting declarations.
if (is_local && (is_read_only || is_const_property)) {
const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
}
// The property already exists without conflicting: Go to
// the next declaration.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(value);
Handle<JSFunction> function =
- Factory::NewFunctionFromSharedFunctionInfo(shared, context, TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ TENURED);
value = function;
}
(lookup.type() != INTERCEPTOR) &&
(lookup.IsReadOnly() || is_const_property)) {
const char* type = (lookup.IsReadOnly()) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
}
// Safari does not allow the invocation of callback setters for
attributes = static_cast<PropertyAttributes>(
attributes | (lookup.GetAttributes() & DONT_DELETE));
}
- RETURN_IF_EMPTY_HANDLE(SetLocalPropertyIgnoreAttributes(global,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetLocalPropertyIgnoreAttributes(global,
name,
value,
attributes));
} else {
- RETURN_IF_EMPTY_HANDLE(SetProperty(global,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetProperty(global,
name,
value,
attributes,
}
}
- ASSERT(!Top::has_pending_exception());
- return Heap::undefined_value();
+ ASSERT(!isolate->has_pending_exception());
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DeclareContextSlot(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(Context, context, 0);
PropertyAttributes mode =
static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
- Handle<Object> initial_value(args[3]);
+ Handle<Object> initial_value(args[3], isolate);
// Declarations are always done in the function context.
context = Handle<Context>(context->fcontext());
// Functions are not read-only.
ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
}
// Initialize it if necessary.
// Slow case: The property is not in the FixedArray part of the context.
Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(context_ext, name, initial_value,
mode, kNonStrictMode));
}
} else {
// The function context's extension context does not exists - allocate
// it.
- context_ext = Factory::NewJSObject(Top::context_extension_function());
+ context_ext = isolate->factory()->NewJSObject(
+ isolate->context_extension_function());
// And store it in the extension slot.
context->set_extension(*context_ext);
}
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
ASSERT(!context_ext->HasLocalProperty(*name));
- Handle<Object> value(Heap::undefined_value());
+ Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
// there is a callback with that name in a prototype. It is
LookupResult lookup;
context_ext->Lookup(*name, &lookup);
if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError("const", name);
+ return ThrowRedeclarationError(isolate, "const", name);
}
}
- RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetProperty(context_ext, name, value, mode,
kNonStrictMode));
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
+static MaybeObject* Runtime_InitializeVarGlobal(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation nha;
// args[0] == name
// args[1] == strict_mode
bool assign = args.length() == 3;
CONVERT_ARG_CHECKED(String, name, 0);
- GlobalObject* global = Top::context()->global();
+ GlobalObject* global = isolate->context()->global();
RUNTIME_ASSERT(args[1]->IsSmi());
StrictModeFlag strict_mode =
static_cast<StrictModeFlag>(Smi::cast(args[1])->value());
if (lookup.IsReadOnly()) {
// If we found readonly property on one of hidden prototypes,
// just shadow it.
- if (real_holder != Top::context()->global()) break;
- return ThrowRedeclarationError("const", name);
+ if (real_holder != isolate->context()->global()) break;
+ return ThrowRedeclarationError(isolate, "const", name);
}
// Determine if this is a redeclaration of an intercepted read-only
bool found = true;
PropertyType type = lookup.type();
if (type == INTERCEPTOR) {
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
Handle<JSObject> holder(real_holder);
PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
real_holder = *holder;
// overwrite it with a variable declaration we must throw a
// re-declaration error. However if we found readonly property
// on one of hidden prototypes, just shadow it.
- if (real_holder != Top::context()->global()) break;
- return ThrowRedeclarationError("const", name);
+ if (real_holder != isolate->context()->global()) break;
+ return ThrowRedeclarationError(isolate, "const", name);
}
}
if (found && !assign) {
// The global property is there and we're not assigning any value
// to it. Just return.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Assign the value (or undefined) to the property.
- Object* value = (assign) ? args[2] : Heap::undefined_value();
+ Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
return real_holder->SetProperty(
&lookup, *name, value, attributes, strict_mode);
}
real_holder = JSObject::cast(proto);
}
- global = Top::context()->global();
+ global = isolate->context()->global();
if (assign) {
return global->SetProperty(*name, args[2], attributes, strict_mode);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) {
+static MaybeObject* Runtime_InitializeConstGlobal(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
Handle<Object> value = args.at<Object>(1);
// Get the current global object from top.
- GlobalObject* global = Top::context()->global();
+ GlobalObject* global = isolate->context()->global();
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable. Since it's a const, it must be READ_ONLY too.
// need to ask it for the property attributes.
if (!lookup.IsReadOnly()) {
if (lookup.type() != INTERCEPTOR) {
- return ThrowRedeclarationError("var", name);
+ return ThrowRedeclarationError(isolate, "var", name);
}
PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
// Throw re-declaration error if the intercepted property is present
// but not read-only.
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- return ThrowRedeclarationError("var", name);
+ return ThrowRedeclarationError(isolate, "var", name);
}
// Restore global object from context (in case of GC) and continue
// with setting the value because the property is either absent or
// read-only. We also have to do redo the lookup.
- HandleScope handle_scope;
- Handle<GlobalObject> global(Top::context()->global());
+ HandleScope handle_scope(isolate);
+ Handle<GlobalObject> global(isolate->context()->global());
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
// Passing non-strict mode because the property is writable.
- RETURN_IF_EMPTY_HANDLE(SetProperty(global,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetProperty(global,
name,
value,
attributes,
}
-static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_InitializeConstContextSlot(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- Handle<Object> value(args[0]);
+ Handle<Object> value(args[0], isolate);
ASSERT(!value->IsTheHole());
CONVERT_ARG_CHECKED(Context, context, 1);
Handle<String> name(String::cast(args[2]));
ASSERT((attributes & READ_ONLY) == 0);
Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetElement(arguments, index, value, kNonStrictMode));
}
return *value;
// The property could not be found, we introduce it in the global
// context.
if (attributes == ABSENT) {
- Handle<JSObject> global = Handle<JSObject>(Top::context()->global());
+ Handle<JSObject> global = Handle<JSObject>(
+ isolate->context()->global());
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(global, name, value, NONE, kNonStrictMode));
return *value;
}
if ((attributes & READ_ONLY) == 0) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(context_ext, name, value, attributes, kNonStrictMode));
}
}
static MaybeObject* Runtime_OptimizeObjectForAddingMultipleProperties(
- Arguments args) {
- HandleScope scope;
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_SMI_CHECKED(properties, args[1]);
}
-static MaybeObject* Runtime_RegExpExec(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_RegExpExec(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_CHECKED(String, subject, 1);
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
- Counters::regexp_entry_runtime.Increment();
+ isolate->counters()->regexp_entry_runtime()->Increment();
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index,
}
-static MaybeObject* Runtime_RegExpConstructResult(Arguments args) {
+static MaybeObject* Runtime_RegExpConstructResult(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 3);
CONVERT_SMI_CHECKED(elements_count, args[0]);
if (elements_count > JSArray::kMaxFastElementsLength) {
- return Top::ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
Object* new_object;
{ MaybeObject* maybe_new_object =
- Heap::AllocateFixedArrayWithHoles(elements_count);
+ isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
FixedArray* elements = FixedArray::cast(new_object);
- { MaybeObject* maybe_new_object = Heap::AllocateRaw(JSRegExpResult::kSize,
- NEW_SPACE,
- OLD_POINTER_SPACE);
+ { MaybeObject* maybe_new_object = isolate->heap()->AllocateRaw(
+ JSRegExpResult::kSize, NEW_SPACE, OLD_POINTER_SPACE);
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
{
AssertNoAllocation no_gc;
- HandleScope scope;
+ HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
- set_map(Top::global_context()->regexp_result_map());
+ set_map(isolate->global_context()->regexp_result_map());
}
JSArray* array = JSArray::cast(new_object);
- array->set_properties(Heap::empty_fixed_array());
+ array->set_properties(isolate->heap()->empty_fixed_array());
array->set_elements(elements);
array->set_length(Smi::FromInt(elements_count));
// Write in-object properties after the length of the array.
}
-static MaybeObject* Runtime_RegExpInitializeObject(Arguments args) {
+static MaybeObject* Runtime_RegExpInitializeObject(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
AssertNoAllocation no_alloc;
ASSERT(args.length() == 5);
CONVERT_CHECKED(JSRegExp, regexp, args[0]);
CONVERT_CHECKED(String, source, args[1]);
Object* global = args[2];
- if (!global->IsTrue()) global = Heap::false_value();
+ if (!global->IsTrue()) global = isolate->heap()->false_value();
Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = Heap::false_value();
+ if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = Heap::false_value();
+ if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
Map* map = regexp->map();
Object* constructor = map->constructor();
return regexp;
}
- // Map has changed, so use generic, but slower, method. Since these
- // properties were all added as DONT_DELETE they must be present and
- // normal so no failures can be expected.
+ // Map has changed, so use generic, but slower, method.
PropertyAttributes final =
static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ Heap* heap = isolate->heap();
MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(Heap::source_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_symbol(),
source,
final);
ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(Heap::global_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_symbol(),
global,
final);
ASSERT(!result->IsFailure());
result =
- regexp->SetLocalPropertyIgnoreAttributes(Heap::ignore_case_symbol(),
+ regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_symbol(),
ignoreCase,
final);
ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(Heap::multiline_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_symbol(),
multiline,
final);
ASSERT(!result->IsFailure());
result =
- regexp->SetLocalPropertyIgnoreAttributes(Heap::last_index_symbol(),
+ regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_symbol(),
Smi::FromInt(0),
writable);
ASSERT(!result->IsFailure());
}
-static MaybeObject* Runtime_FinishArrayPrototypeSetup(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_FinishArrayPrototypeSetup(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArray, prototype, 0);
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
- prototype->set_elements(Heap::empty_fixed_array());
+ prototype->set_elements(isolate->heap()->empty_fixed_array());
return Smi::FromInt(0);
}
-static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
+static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
+ Handle<JSObject> holder,
const char* name,
Builtins::Name builtin_name) {
- Handle<String> key = Factory::LookupAsciiSymbol(name);
- Handle<Code> code(Builtins::builtin(builtin_name));
- Handle<JSFunction> optimized = Factory::NewFunction(key,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- false);
+ Handle<String> key = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<Code> code(isolate->builtins()->builtin(builtin_name));
+ Handle<JSFunction> optimized =
+ isolate->factory()->NewFunction(key,
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize,
+ code,
+ false);
optimized->shared()->DontAdaptArguments();
SetProperty(holder, key, optimized, NONE, kStrictMode);
return optimized;
}
-static MaybeObject* Runtime_SpecialArrayFunctions(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_SpecialArrayFunctions(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, holder, 0);
- InstallBuiltin(holder, "pop", Builtins::ArrayPop);
- InstallBuiltin(holder, "push", Builtins::ArrayPush);
- InstallBuiltin(holder, "shift", Builtins::ArrayShift);
- InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
- InstallBuiltin(holder, "slice", Builtins::ArraySlice);
- InstallBuiltin(holder, "splice", Builtins::ArraySplice);
- InstallBuiltin(holder, "concat", Builtins::ArrayConcat);
+ InstallBuiltin(isolate, holder, "pop", Builtins::ArrayPop);
+ InstallBuiltin(isolate, holder, "push", Builtins::ArrayPush);
+ InstallBuiltin(isolate, holder, "shift", Builtins::ArrayShift);
+ InstallBuiltin(isolate, holder, "unshift", Builtins::ArrayUnshift);
+ InstallBuiltin(isolate, holder, "slice", Builtins::ArraySlice);
+ InstallBuiltin(isolate, holder, "splice", Builtins::ArraySplice);
+ InstallBuiltin(isolate, holder, "concat", Builtins::ArrayConcat);
return *holder;
}
-static MaybeObject* Runtime_GetGlobalReceiver(Arguments args) {
+static MaybeObject* Runtime_GetGlobalReceiver(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
// Returns a real global receiver, not one of builtins object.
- Context* global_context = Top::context()->global()->global_context();
+ Context* global_context =
+ isolate->context()->global()->global_context();
return global_context->global()->global_receiver();
}
-static MaybeObject* Runtime_MaterializeRegExpLiteral(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_MaterializeRegExpLiteral(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
int index = Smi::cast(args[1])->value();
RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags,
&has_pending_exception);
if (has_pending_exception) {
- ASSERT(Top::has_pending_exception());
+ ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
literals->set(index, *regexp);
}
-static MaybeObject* Runtime_FunctionGetName(Arguments args) {
+static MaybeObject* Runtime_FunctionGetName(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
}
-static MaybeObject* Runtime_FunctionSetName(Arguments args) {
+static MaybeObject* Runtime_FunctionSetName(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, f, args[0]);
CONVERT_CHECKED(String, name, args[1]);
f->shared()->set_name(name);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionRemovePrototype(Arguments args) {
+static MaybeObject* Runtime_FunctionRemovePrototype(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- Object* obj;
- { MaybeObject* maybe_obj = f->RemovePrototype();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ Object* obj = f->RemovePrototype();
+ if (obj->IsFailure()) return obj;
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionGetScript(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_FunctionGetScript(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, fun, args[0]);
- Handle<Object> script = Handle<Object>(fun->shared()->script());
- if (!script->IsScript()) return Heap::undefined_value();
+ Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
+ if (!script->IsScript()) return isolate->heap()->undefined_value();
return *GetScriptWrapper(Handle<Script>::cast(script));
}
-static MaybeObject* Runtime_FunctionGetSourceCode(Arguments args) {
+static MaybeObject* Runtime_FunctionGetSourceCode(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
}
-static MaybeObject* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
+static MaybeObject* Runtime_FunctionGetScriptSourcePosition(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
}
-static MaybeObject* Runtime_FunctionGetPositionForOffset(Arguments args) {
+static MaybeObject* Runtime_FunctionGetPositionForOffset(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Code, code, args[0]);
}
-
-static MaybeObject* Runtime_FunctionSetInstanceClassName(Arguments args) {
+static MaybeObject* Runtime_FunctionSetInstanceClassName(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, fun, args[0]);
CONVERT_CHECKED(String, name, args[1]);
fun->SetInstanceClassName(name);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionSetLength(Arguments args) {
+static MaybeObject* Runtime_FunctionSetLength(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
}
-static MaybeObject* Runtime_FunctionSetPrototype(Arguments args) {
+static MaybeObject* Runtime_FunctionSetPrototype(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
}
-static MaybeObject* Runtime_FunctionIsAPIFunction(Arguments args) {
+static MaybeObject* Runtime_FunctionIsAPIFunction(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->IsApiFunction() ? Heap::true_value()
- : Heap::false_value();
+ return f->shared()->IsApiFunction() ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
}
-static MaybeObject* Runtime_FunctionIsBuiltin(Arguments args) {
+
+static MaybeObject* Runtime_FunctionIsBuiltin(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->IsBuiltin() ? Heap::true_value() : Heap::false_value();
+ return f->IsBuiltin() ? isolate->heap()->true_value() :
+ isolate->heap()->false_value();
}
-static MaybeObject* Runtime_SetCode(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_SetCode(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, target, 0);
// SetCode is only used for built-in constructors like String,
// Array, and Object, and some web code
// doesn't like seeing source code for constructors.
- target->shared()->set_script(Heap::undefined_value());
+ target->shared()->set_script(isolate->heap()->undefined_value());
target->shared()->code()->set_optimizable(false);
// Clear the optimization hints related to the compiled code as these are no
// longer valid when the code is overwritten.
// cross context contamination.
int number_of_literals = fun->NumberOfLiterals();
Handle<FixedArray> literals =
- Factory::NewFixedArray(number_of_literals, TENURED);
+ isolate->factory()->NewFixedArray(number_of_literals, TENURED);
if (number_of_literals > 0) {
// Insert the object, regexp and array functions in the literals
// array prefix. These are the functions that will be used when
// It's okay to skip the write barrier here because the literals
// are guaranteed to be in old space.
target->set_literals(*literals, SKIP_WRITE_BARRIER);
- target->set_next_function_link(Heap::undefined_value());
+ target->set_next_function_link(isolate->heap()->undefined_value());
}
target->set_context(*context);
}
-static MaybeObject* Runtime_SetExpectedNumberOfProperties(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_SetExpectedNumberOfProperties(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_SMI_CHECKED(num, args[1]);
RUNTIME_ASSERT(num >= 0);
SetExpectedNofProperties(function, num);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-MUST_USE_RESULT static MaybeObject* CharFromCode(Object* char_code) {
+MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
+ Object* char_code) {
uint32_t code;
if (char_code->ToArrayIndex(&code)) {
if (code <= 0xffff) {
- return Heap::LookupSingleCharacterStringFromCode(code);
+ return isolate->heap()->LookupSingleCharacterStringFromCode(code);
}
}
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
}
-static MaybeObject* Runtime_StringCharCodeAt(Arguments args) {
+static MaybeObject* Runtime_StringCharCodeAt(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
uint32_t i = 0;
if (index->IsSmi()) {
int value = Smi::cast(index)->value();
- if (value < 0) return Heap::nan_value();
+ if (value < 0) return isolate->heap()->nan_value();
i = value;
} else {
ASSERT(index->IsHeapNumber());
subject = String::cast(flat);
if (i >= static_cast<uint32_t>(subject->length())) {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
}
return Smi::FromInt(subject->Get(i));
}
-static MaybeObject* Runtime_CharFromCode(Arguments args) {
+static MaybeObject* Runtime_CharFromCode(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- return CharFromCode(args[0]);
+ return CharFromCode(isolate, args[0]);
}
class FixedArrayBuilder {
public:
- explicit FixedArrayBuilder(int initial_capacity)
- : array_(Factory::NewFixedArrayWithHoles(initial_capacity)),
+ explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
+ : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
length_(0) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
new_length *= 2;
} while (new_length < required_length);
Handle<FixedArray> extended_array =
- Factory::NewFixedArrayWithHoles(new_length);
+ array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
array_->CopyTo(0, *extended_array, 0, length_);
array_ = extended_array;
}
}
Handle<JSArray> ToJSArray() {
- Handle<JSArray> result_array = Factory::NewJSArrayWithElements(array_);
+ Handle<JSArray> result_array = FACTORY->NewJSArrayWithElements(array_);
result_array->set_length(Smi::FromInt(length_));
return result_array;
}
class ReplacementStringBuilder {
public:
- ReplacementStringBuilder(Handle<String> subject, int estimated_part_count)
- : array_builder_(estimated_part_count),
+ ReplacementStringBuilder(Heap* heap,
+ Handle<String> subject,
+ int estimated_part_count)
+ : heap_(heap),
+ array_builder_(heap->isolate(), estimated_part_count),
subject_(subject),
character_count_(0),
is_ascii_(subject->IsAsciiRepresentation()) {
Handle<String> ToString() {
if (array_builder_.length() == 0) {
- return Factory::empty_string();
+ return heap_->isolate()->factory()->empty_string();
}
Handle<String> joined_string;
private:
Handle<String> NewRawAsciiString(int size) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(size), String);
+ CALL_HEAP_FUNCTION(heap_->isolate(),
+ heap_->AllocateRawAsciiString(size), String);
}
Handle<String> NewRawTwoByteString(int size) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(size), String);
+ CALL_HEAP_FUNCTION(heap_->isolate(),
+ heap_->AllocateRawTwoByteString(size), String);
}
array_builder_.Add(element);
}
+ Heap* heap_;
FixedArrayBuilder array_builder_;
Handle<String> subject_;
int character_count_;
capture_count,
subject_length);
}
+ Isolate* isolate = replacement->GetIsolate();
// Find substrings of replacement string and create them as String objects.
int substring_index = 0;
for (int i = 0, n = parts_.length(); i < n; i++) {
if (tag <= 0) { // A replacement string slice.
int from = -tag;
int to = parts_[i].data;
- replacement_substrings_.Add(Factory::NewSubString(replacement, from, to));
+ replacement_substrings_.Add(
+ isolate->factory()->NewSubString(replacement, from, to));
parts_[i].tag = REPLACEMENT_SUBSTRING;
parts_[i].data = substring_index;
substring_index++;
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
+ Isolate* isolate,
String* subject,
JSRegExp* regexp,
String* replacement,
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- HandleScope handles;
+ HandleScope handles(isolate);
int length = subject->length();
Handle<String> subject_handle(subject);
// conservatively.
int expected_parts =
(compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
- ReplacementStringBuilder builder(subject_handle, expected_parts);
+ ReplacementStringBuilder builder(isolate->heap(),
+ subject_handle,
+ expected_parts);
// Index of end of last match.
int prev = 0;
// so its internal buffer can safely allocate a new handle if it grows.
builder.EnsureCapacity(parts_added_per_loop);
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
int start, end;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
template <typename ResultSeqString>
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
+ Isolate* isolate,
String* subject,
JSRegExp* regexp,
JSArray* last_match_info) {
ASSERT(subject->IsFlat());
- HandleScope handles;
+ HandleScope handles(isolate);
Handle<String> subject_handle(subject);
Handle<JSRegExp> regexp_handle(regexp);
ASSERT(last_match_info_handle->HasFastElements());
- HandleScope loop_scope;
int start, end;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
int length = subject->length();
int new_length = length - (end - start);
if (new_length == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
}
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
- answer =
- Handle<ResultSeqString>::cast(Factory::NewRawAsciiString(new_length));
+ answer = Handle<ResultSeqString>::cast(
+ isolate->factory()->NewRawAsciiString(new_length));
} else {
- answer =
- Handle<ResultSeqString>::cast(Factory::NewRawTwoByteString(new_length));
+ answer = Handle<ResultSeqString>::cast(
+ isolate->factory()->NewRawTwoByteString(new_length));
}
// If the regexp isn't global, only match once.
if (match->IsNull()) break;
ASSERT(last_match_info_handle->HasFastElements());
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
FixedArray* match_info_array =
}
if (position == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
}
// Shorten string and fill
if (delta == 0) return *answer;
Address end_of_string = answer->address() + string_size;
- Heap::CreateFillerObjectAt(end_of_string, delta);
+ isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
return *answer;
}
-static MaybeObject* Runtime_StringReplaceRegExpWithString(Arguments args) {
+static MaybeObject* Runtime_StringReplaceRegExpWithString(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
CONVERT_CHECKED(String, subject, args[0]);
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
- subject, regexp, last_match_info);
+ isolate, subject, regexp, last_match_info);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
- subject, regexp, last_match_info);
+ isolate, subject, regexp, last_match_info);
}
}
- return StringReplaceRegExpWithString(subject,
+ return StringReplaceRegExpWithString(isolate,
+ subject,
regexp,
replacement,
last_match_info);
// Perform string match of pattern on subject, starting at start index.
// Caller must ensure that 0 <= start_index <= sub->length(),
// and should check that pat->length() + start_index <= sub->length().
-int Runtime::StringMatch(Handle<String> sub,
+int Runtime::StringMatch(Isolate* isolate,
+ Handle<String> sub,
Handle<String> pat,
int start_index) {
ASSERT(0 <= start_index);
if (seq_pat->IsAsciiRepresentation()) {
Vector<const char> pat_vector = seq_pat->ToAsciiVector();
if (seq_sub->IsAsciiRepresentation()) {
- return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToAsciiVector(),
+ pat_vector,
+ start_index);
}
- return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToUC16Vector(),
+ pat_vector,
+ start_index);
}
Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
if (seq_sub->IsAsciiRepresentation()) {
- return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToAsciiVector(),
+ pat_vector,
+ start_index);
}
- return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToUC16Vector(),
+ pat_vector,
+ start_index);
}
-static MaybeObject* Runtime_StringIndexOf(Arguments args) {
- HandleScope scope; // create a new handle scope
+static MaybeObject* Runtime_StringIndexOf(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate); // create a new handle scope
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, sub, 0);
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position = Runtime::StringMatch(sub, pat, start_index);
+ int position =
+ Runtime::StringMatch(isolate, sub, pat, start_index);
return Smi::FromInt(position);
}
return -1;
}
-static MaybeObject* Runtime_StringLastIndexOf(Arguments args) {
- HandleScope scope; // create a new handle scope
+static MaybeObject* Runtime_StringLastIndexOf(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate); // create a new handle scope
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, sub, 0);
}
-static MaybeObject* Runtime_StringLocaleCompare(Arguments args) {
+static MaybeObject* Runtime_StringLocaleCompare(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
str1->TryFlatten();
str2->TryFlatten();
- static StringInputBuffer buf1;
- static StringInputBuffer buf2;
+ StringInputBuffer& buf1 =
+ *isolate->runtime_state()->string_locale_compare_buf1();
+ StringInputBuffer& buf2 =
+ *isolate->runtime_state()->string_locale_compare_buf2();
buf1.Reset(str1);
buf2.Reset(str2);
}
-static MaybeObject* Runtime_SubString(Arguments args) {
+static MaybeObject* Runtime_SubString(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
- Counters::sub_string_runtime.Increment();
+ isolate->counters()->sub_string_runtime()->Increment();
return value->SubString(start, end);
}
-static MaybeObject* Runtime_StringMatch(Arguments args) {
+static MaybeObject* Runtime_StringMatch(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT_EQ(3, args.length());
CONVERT_ARG_CHECKED(String, subject, 0);
return Failure::Exception();
}
if (match->IsNull()) {
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
int length = subject->length();
}
} while (!match->IsNull());
int matches = offsets.length() / 2;
- Handle<FixedArray> elements = Factory::NewFixedArray(matches);
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
for (int i = 0; i < matches ; i++) {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- Handle<String> match = Factory::NewSubString(subject, from, to);
+ Handle<String> match = isolate->factory()->NewSubString(subject, from, to);
elements->set(i, *match);
}
- Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
return *result;
}
template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Vector<const SubjectChar> subject,
+static bool SearchStringMultiple(Isolate* isolate,
+ Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
String* pattern_string,
FixedArrayBuilder* builder,
int subject_length = subject.length();
int pattern_length = pattern.length();
int max_search_start = subject_length - pattern_length;
- StringSearch<PatternChar, SubjectChar> search(pattern);
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
while (pos <= max_search_start) {
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
*match_pos = pos;
}
-static bool SearchStringMultiple(Handle<String> subject,
+static bool SearchStringMultiple(Isolate* isolate,
+ Handle<String> subject,
Handle<String> pattern,
Handle<JSArray> last_match_info,
FixedArrayBuilder* builder) {
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
*pattern,
builder,
&match_pos)) break;
} else {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
*pattern,
builder,
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
*pattern,
builder,
&match_pos)) break;
} else {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
*pattern,
builder,
static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
+ Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
match_start);
}
match_end = register_vector[1];
- HandleScope loop_scope;
- builder->Add(*Factory::NewSubString(subject, match_start, match_end));
+ HandleScope loop_scope(isolate);
+ builder->Add(*isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end));
if (match_start != match_end) {
pos = match_end;
} else {
static RegExpImpl::IrregexpResult SearchRegExpMultiple(
+ Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
{
// Avoid accumulating new handles inside loop.
- HandleScope temp_scope;
+ HandleScope temp_scope(isolate);
// Arguments array to replace function is match, captures, index and
// subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
- Handle<String> match = Factory::NewSubString(subject,
- match_start,
- match_end);
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(3 + capture_count);
+ Handle<String> match = isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end);
elements->set(0, *match);
for (int i = 1; i <= capture_count; i++) {
int start = register_vector[i * 2];
if (start >= 0) {
int end = register_vector[i * 2 + 1];
ASSERT(start <= end);
- Handle<String> substring = Factory::NewSubString(subject,
- start,
- end);
+ Handle<String> substring = isolate->factory()->NewSubString(subject,
+ start,
+ end);
elements->set(i, *substring);
} else {
ASSERT(register_vector[i * 2 + 1] < 0);
- elements->set(i, Heap::undefined_value());
+ elements->set(i, isolate->heap()->undefined_value());
}
}
elements->set(capture_count + 1, Smi::FromInt(match_start));
elements->set(capture_count + 2, *subject);
- builder->Add(*Factory::NewJSArrayWithElements(elements));
+ builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
}
// Swap register vectors, so the last successful match is in
// prev_register_vector.
}
-static MaybeObject* Runtime_RegExpExecMultiple(Arguments args) {
+static MaybeObject* Runtime_RegExpExecMultiple(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
- HandleScope handles;
+ HandleScope handles(isolate);
CONVERT_ARG_CHECKED(String, subject, 1);
if (!subject->IsFlat()) { FlattenString(subject); }
result_elements =
Handle<FixedArray>(FixedArray::cast(result_array->elements()));
} else {
- result_elements = Factory::NewFixedArrayWithHoles(16);
+ result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
}
FixedArrayBuilder builder(result_elements);
Handle<String> pattern(
String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
ASSERT(pattern->IsFlat());
- if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
+ if (SearchStringMultiple(isolate, subject, pattern,
+ last_match_info, &builder)) {
return *builder.ToJSArray(result_array);
}
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
RegExpImpl::IrregexpResult result;
if (regexp->CaptureCount() == 0) {
- result = SearchRegExpNoCaptureMultiple(subject,
+ result = SearchRegExpNoCaptureMultiple(isolate,
+ subject,
regexp,
last_match_info,
&builder);
} else {
- result = SearchRegExpMultiple(subject, regexp, last_match_info, &builder);
+ result = SearchRegExpMultiple(isolate,
+ subject,
+ regexp,
+ last_match_info,
+ &builder);
}
if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
- if (result == RegExpImpl::RE_FAILURE) return Heap::null_value();
+ if (result == RegExpImpl::RE_FAILURE) return isolate->heap()->null_value();
ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
return Failure::Exception();
}
-static MaybeObject* Runtime_NumberToRadixString(Arguments args) {
+static MaybeObject* Runtime_NumberToRadixString(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
RUNTIME_ASSERT(radix <= 36);
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return Heap::LookupSingleCharacterStringFromCode(kCharTable[value]);
+ return isolate->heap()->
+ LookupSingleCharacterStringFromCode(kCharTable[value]);
}
}
// Slow case.
CONVERT_DOUBLE_CHECKED(value, args[0]);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
int radix = FastD2I(radix_number);
RUNTIME_ASSERT(2 <= radix && radix <= 36);
char* str = DoubleToRadixCString(value, radix);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* result =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
return result;
}
-static MaybeObject* Runtime_NumberToFixed(Arguments args) {
+static MaybeObject* Runtime_NumberToFixed(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(value, args[0]);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_CHECKED(f_number, args[1]);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* res =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
- return result;
+ return res;
}
-static MaybeObject* Runtime_NumberToExponential(Arguments args) {
+static MaybeObject* Runtime_NumberToExponential(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(value, args[0]);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_CHECKED(f_number, args[1]);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* res =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
- return result;
+ return res;
}
-static MaybeObject* Runtime_NumberToPrecision(Arguments args) {
+static MaybeObject* Runtime_NumberToPrecision(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(value, args[0]);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_CHECKED(f_number, args[1]);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* res =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
- return result;
+ return res;
}
}
-MaybeObject* Runtime::GetElementOrCharAt(Handle<Object> object,
+MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
uint32_t index) {
// Handle [] indexing on Strings
if (object->IsString()) {
}
-MaybeObject* Runtime::GetObjectProperty(Handle<Object> object,
+MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key) {
- HandleScope scope;
+ HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
Handle<Object> error =
- Factory::NewTypeError("non_object_property_load",
- HandleVector(args, 2));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("non_object_property_load",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
}
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(object, index);
+ return GetElementOrCharAt(isolate, object, index);
}
// Convert the key to a string - possibly by calling back into JavaScript.
// Check if the name is trivially convertible to an index and get
// the element if so.
if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(object, index);
+ return GetElementOrCharAt(isolate, object, index);
} else {
PropertyAttributes attr;
return object->GetProperty(*name, &attr);
}
-static MaybeObject* Runtime_GetProperty(Arguments args) {
+static MaybeObject* Runtime_GetProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- return Runtime::GetObjectProperty(object, key);
+ return Runtime::GetObjectProperty(isolate, object, key);
}
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
-static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
+static MaybeObject* Runtime_KeyedGetProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
if (receiver->HasFastProperties()) {
// Attempt to use lookup cache.
Map* receiver_map = receiver->map();
- int offset = KeyedLookupCache::Lookup(receiver_map, key);
+ KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+ int offset = keyed_lookup_cache->Lookup(receiver_map, key);
if (offset != -1) {
Object* value = receiver->FastPropertyAt(offset);
- return value->IsTheHole() ? Heap::undefined_value() : value;
+ return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
}
// Lookup cache miss. Perform lookup and update the cache if appropriate.
LookupResult result;
receiver->LocalLookup(key, &result);
if (result.IsProperty() && result.type() == FIELD) {
int offset = result.GetFieldIndex();
- KeyedLookupCache::Update(receiver_map, key, offset);
+ keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
}
} else {
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
// Fast case for string indexing using [] with a smi index.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<String> str = args.at<String>(0);
int index = Smi::cast(args[1])->value();
if (index >= 0 && index < str->length()) {
}
// Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(args.at<Object>(0),
+ return Runtime::GetObjectProperty(isolate,
+ args.at<Object>(0),
args.at<Object>(1));
}
// Steps 9c & 12 - replace an existing data property with an accessor property.
// Step 12 - update an existing accessor property with an accessor or generic
// descriptor.
-static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
+static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 5);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
// Steps 9b & 12 - replace an existing accessor property with a data property.
// Step 12 - update an existing data property with a data or generic
// descriptor.
-static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
+static MaybeObject* Runtime_DefineOrRedefineDataProperty(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSObject, js_object, 0);
CONVERT_ARG_CHECKED(String, name, 1);
Handle<Object> obj_value = args.at<Object>(2);
if (result.IsProperty() &&
(result.type() == CALLBACKS) &&
result.GetCallbackObject()->IsAccessorInfo()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Take special care when attributes are different and there is already
attr);
}
- return Runtime::ForceSetObjectProperty(js_object, name, obj_value, attr);
+ return Runtime::ForceSetObjectProperty(isolate,
+ js_object,
+ name,
+ obj_value,
+ attr);
}
-MaybeObject* Runtime::SetObjectProperty(Handle<Object> object,
+MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
StrictModeFlag strict_mode) {
- HandleScope scope;
+ HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
Handle<Object> error =
- Factory::NewTypeError("non_object_property_store",
- HandleVector(args, 2));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("non_object_property_store",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
}
// If the object isn't a JavaScript object, we ignore the store.
}
-MaybeObject* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
+MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
+ Handle<JSObject> js_object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr) {
- HandleScope scope;
+ HandleScope scope(isolate);
// Check if the given key is an array index.
uint32_t index;
}
-MaybeObject* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
+MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
+ Handle<JSObject> js_object,
Handle<Object> key) {
- HandleScope scope;
+ HandleScope scope(isolate);
// Check if the given key is an array index.
uint32_t index;
// underlying string does nothing with the deletion, we can ignore
// such deletions.
if (js_object->IsStringObjectWithCharacterAt(index)) {
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
}
-static MaybeObject* Runtime_SetProperty(Arguments args) {
+static MaybeObject* Runtime_SetProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
}
- return Runtime::SetObjectProperty(object,
+ return Runtime::SetObjectProperty(isolate,
+ object,
key,
value,
attributes,
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
-static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(Arguments args) {
+static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
CONVERT_CHECKED(JSObject, object, args[0]);
}
-static MaybeObject* Runtime_DeleteProperty(Arguments args) {
+static MaybeObject* Runtime_DeleteProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
}
-static Object* HasLocalPropertyImplementation(Handle<JSObject> object,
+static Object* HasLocalPropertyImplementation(Isolate* isolate,
+ Handle<JSObject> object,
Handle<String> key) {
- if (object->HasLocalProperty(*key)) return Heap::true_value();
+ if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
Handle<Object> proto(object->GetPrototype());
if (proto->IsJSObject() &&
Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
- return HasLocalPropertyImplementation(Handle<JSObject>::cast(proto), key);
+ return HasLocalPropertyImplementation(isolate,
+ Handle<JSObject>::cast(proto),
+ key);
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_HasLocalProperty(Arguments args) {
+static MaybeObject* Runtime_HasLocalProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[1]);
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
// Fast case - no interceptors.
- if (object->HasRealNamedProperty(key)) return Heap::true_value();
+ if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
// Slow case. Either it's not there or we have an interceptor. We should
// have handles for this kind of deal.
- HandleScope scope;
- return HasLocalPropertyImplementation(Handle<JSObject>(object),
+ HandleScope scope(isolate);
+ return HasLocalPropertyImplementation(isolate,
+ Handle<JSObject>(object),
Handle<String>(key));
} else if (obj->IsString()) {
// Well, there is one exception: Handle [] on strings.
if (key->AsArrayIndex(&index)) {
String* string = String::cast(obj);
if (index < static_cast<uint32_t>(string->length()))
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_HasProperty(Arguments args) {
+static MaybeObject* Runtime_HasProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 2);
if (args[0]->IsJSObject()) {
JSObject* object = JSObject::cast(args[0]);
CONVERT_CHECKED(String, key, args[1]);
- if (object->HasProperty(key)) return Heap::true_value();
+ if (object->HasProperty(key)) return isolate->heap()->true_value();
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_HasElement(Arguments args) {
+static MaybeObject* Runtime_HasElement(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation na;
ASSERT(args.length() == 2);
JSObject* object = JSObject::cast(args[0]);
CONVERT_CHECKED(Smi, index_obj, args[1]);
uint32_t index = index_obj->value();
- if (object->HasElement(index)) return Heap::true_value();
+ if (object->HasElement(index)) return isolate->heap()->true_value();
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_IsPropertyEnumerable(Arguments args) {
+static MaybeObject* Runtime_IsPropertyEnumerable(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
uint32_t index;
if (key->AsArrayIndex(&index)) {
- return Heap::ToBoolean(object->HasElement(index));
+ return isolate->heap()->ToBoolean(object->HasElement(index));
}
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
- return Heap::ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
+ return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
}
-static MaybeObject* Runtime_GetPropertyNames(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetPropertyNames(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
return *GetKeysFor(object);
// all enumerable properties of the object and its prototypes
// have none, the map of the object. This is used to speed up
// the check for deletions during a for-in.
-static MaybeObject* Runtime_GetPropertyNamesFast(Arguments args) {
+static MaybeObject* Runtime_GetPropertyNamesFast(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
if (raw_object->IsSimpleEnum()) return raw_object->map();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> object(raw_object);
Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
INCLUDE_PROTOS);
// Return the names of the local named properties.
// args[0]: object
-static MaybeObject* Runtime_GetLocalPropertyNames(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetLocalPropertyNames(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
CONVERT_ARG_CHECKED(JSObject, obj, 0);
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
if (obj->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*obj, Heap::undefined_value(), v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
- return *Factory::NewJSArray(0);
+ !isolate->MayNamedAccess(*obj,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ return *isolate->factory()->NewJSArray(0);
}
obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
}
for (int i = 0; i < length; i++) {
// Only collect names if access is permitted.
if (jsproto->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*jsproto,
- Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
- return *Factory::NewJSArray(0);
+ !isolate->MayNamedAccess(*jsproto,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ return *isolate->factory()->NewJSArray(0);
}
int n;
n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
}
// Allocate an array with storage for all the property names.
- Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(total_property_count);
// Get the property names.
jsproto = obj;
// Filter out name of hidden propeties object.
if (proto_with_hidden_properties > 0) {
Handle<FixedArray> old_names = names;
- names = Factory::NewFixedArray(
+ names = isolate->factory()->NewFixedArray(
names->length() - proto_with_hidden_properties);
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
- if (name == Heap::hidden_symbol()) {
+ if (name == isolate->heap()->hidden_symbol()) {
continue;
}
names->set(dest_pos++, name);
}
}
- return *Factory::NewJSArrayWithElements(names);
+ return *isolate->factory()->NewJSArrayWithElements(names);
}
// Return the names of the local indexed properties.
// args[0]: object
-static MaybeObject* Runtime_GetLocalElementNames(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetLocalElementNames(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
CONVERT_ARG_CHECKED(JSObject, obj, 0);
int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
- Handle<FixedArray> names = Factory::NewFixedArray(n);
+ Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
- return *Factory::NewJSArrayWithElements(names);
+ return *isolate->factory()->NewJSArrayWithElements(names);
}
// Return information on whether an object has a named or indexed interceptor.
// args[0]: object
-static MaybeObject* Runtime_GetInterceptorInfo(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetInterceptorInfo(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
return Smi::FromInt(0);
// Return property names from named interceptor.
// args[0]: object
-static MaybeObject* Runtime_GetNamedInterceptorPropertyNames(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetNamedInterceptorPropertyNames(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Return element names from indexed interceptor.
// args[0]: object
-static MaybeObject* Runtime_GetIndexedInterceptorElementNames(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetIndexedInterceptorElementNames(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_LocalKeys(Arguments args) {
+static MaybeObject* Runtime_LocalKeys(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT_EQ(args.length(), 1);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> object(raw_object);
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
if (object->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*object, Heap::undefined_value(),
+ !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- return *Factory::NewJSArray(0);
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ return *isolate->factory()->NewJSArray(0);
}
Handle<Object> proto(object->GetPrototype());
// If proxy is detached we simply return an empty array.
- if (proto->IsNull()) return *Factory::NewJSArray(0);
+ if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
object = Handle<JSObject>::cast(proto);
}
// property array and since the result is mutable we have to create
// a fresh clone on each invocation.
int length = contents->length();
- Handle<FixedArray> copy = Factory::NewFixedArray(length);
+ Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
for (int i = 0; i < length; i++) {
Object* entry = contents->get(i);
if (entry->IsString()) {
copy->set(i, entry);
} else {
ASSERT(entry->IsNumber());
- HandleScope scope;
- Handle<Object> entry_handle(entry);
- Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+ HandleScope scope(isolate);
+ Handle<Object> entry_handle(entry, isolate);
+ Handle<Object> entry_str =
+ isolate->factory()->NumberToString(entry_handle);
copy->set(i, *entry_str);
}
}
- return *Factory::NewJSArrayWithElements(copy);
+ return *isolate->factory()->NewJSArrayWithElements(copy);
}
-static MaybeObject* Runtime_GetArgumentsProperty(Arguments args) {
+static MaybeObject* Runtime_GetArgumentsProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
}
// Convert the key to a string.
- HandleScope scope;
+ HandleScope scope(isolate);
bool exception = false;
Handle<Object> converted =
Execution::ToString(args.at<Object>(0), &exception);
if (index < n) {
return frame->GetParameter(index);
} else {
- return Top::initial_object_prototype()->GetElement(index);
+ return isolate->initial_object_prototype()->GetElement(index);
}
}
// Handle special arguments properties.
- if (key->Equals(Heap::length_symbol())) return Smi::FromInt(n);
- if (key->Equals(Heap::callee_symbol())) {
+ if (key->Equals(isolate->heap()->length_symbol())) return Smi::FromInt(n);
+ if (key->Equals(isolate->heap()->callee_symbol())) {
Object* function = frame->function();
if (function->IsJSFunction() &&
JSFunction::cast(function)->shared()->strict_mode()) {
- return Top::Throw(*Factory::NewTypeError("strict_arguments_callee",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
return function;
}
// Lookup in the initial Object.prototype object.
- return Top::initial_object_prototype()->GetProperty(*key);
+ return isolate->initial_object_prototype()->GetProperty(*key);
}
-static MaybeObject* Runtime_ToFastProperties(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ToFastProperties(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
}
-static MaybeObject* Runtime_ToSlowProperties(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ToSlowProperties(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
}
-static MaybeObject* Runtime_ToBool(Arguments args) {
+static MaybeObject* Runtime_ToBool(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
-static MaybeObject* Runtime_Typeof(Arguments args) {
+static MaybeObject* Runtime_Typeof(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
Object* obj = args[0];
- if (obj->IsNumber()) return Heap::number_symbol();
+ if (obj->IsNumber()) return isolate->heap()->number_symbol();
HeapObject* heap_obj = HeapObject::cast(obj);
// typeof an undetectable object is 'undefined'
- if (heap_obj->map()->is_undetectable()) return Heap::undefined_symbol();
+ if (heap_obj->map()->is_undetectable()) {
+ return isolate->heap()->undefined_symbol();
+ }
InstanceType instance_type = heap_obj->map()->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
- return Heap::string_symbol();
+ return isolate->heap()->string_symbol();
}
switch (instance_type) {
case ODDBALL_TYPE:
if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
- return Heap::boolean_symbol();
+ return isolate->heap()->boolean_symbol();
}
if (heap_obj->IsNull()) {
- return Heap::object_symbol();
+ return isolate->heap()->object_symbol();
}
ASSERT(heap_obj->IsUndefined());
- return Heap::undefined_symbol();
+ return isolate->heap()->undefined_symbol();
case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
- return Heap::function_symbol();
+ return isolate->heap()->function_symbol();
default:
// For any kind of object not handled above, the spec rule for
// host objects gives that it is okay to return "object"
- return Heap::object_symbol();
+ return isolate->heap()->object_symbol();
}
}
}
-static MaybeObject* Runtime_StringToNumber(Arguments args) {
+static MaybeObject* Runtime_StringToNumber(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, subject, args[0]);
int start_pos = (minus ? 1 : 0);
if (start_pos == len) {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
} else if (data[start_pos] > '9') {
// Fast check for a junk value. A valid string may start from a
// whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
// the 'I' character ('Infinity'). All of that have codes not greater than
// '9' except 'I'.
if (data[start_pos] != 'I') {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
// The maximal/minimal smi has 10 digits. If the string has less digits we
// know it will fit into the smi-data type.
int d = ParseDecimalInteger(data, start_pos, len);
if (minus) {
- if (d == 0) return Heap::minus_zero_value();
+ if (d == 0) return isolate->heap()->minus_zero_value();
d = -d;
} else if (!subject->HasHashCode() &&
len <= String::kMaxArrayIndexSize &&
}
// Slower case.
- return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
+ return isolate->heap()->NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
}
-static MaybeObject* Runtime_StringFromCharCodeArray(Arguments args) {
+static MaybeObject* Runtime_StringFromCharCodeArray(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
MaybeObject* maybe_object = NULL;
if (i == length) { // The string is ASCII.
- maybe_object = Heap::AllocateRawAsciiString(length);
+ maybe_object = isolate->heap()->AllocateRawAsciiString(length);
} else { // The string is not ASCII.
- maybe_object = Heap::AllocateRawTwoByteString(length);
+ maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
}
Object* object = NULL;
}
-static MaybeObject* Runtime_URIEscape(Arguments args) {
+static MaybeObject* Runtime_URIEscape(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
const char hex_chars[] = "0123456789ABCDEF";
NoHandleAllocation ha;
ASSERT(args.length() == 1);
int escaped_length = 0;
int length = source->length();
{
- Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ isolate->runtime_state()->string_input_buffer());
buffer->Reset(source);
while (buffer->has_more()) {
uint16_t character = buffer->GetNext();
// We don't allow strings that are longer than a maximal length.
ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
}
return source;
}
Object* o;
- { MaybeObject* maybe_o = Heap::AllocateRawAsciiString(escaped_length);
+ { MaybeObject* maybe_o =
+ isolate->heap()->AllocateRawAsciiString(escaped_length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* destination = String::cast(o);
int dest_position = 0;
- Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ isolate->runtime_state()->string_input_buffer());
buffer->Rewind();
while (buffer->has_more()) {
uint16_t chr = buffer->GetNext();
}
-static MaybeObject* Runtime_URIUnescape(Arguments args) {
+static MaybeObject* Runtime_URIUnescape(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, source, args[0]);
return source;
Object* o;
- { MaybeObject* maybe_o = ascii ?
- Heap::AllocateRawAsciiString(unescaped_length) :
- Heap::AllocateRawTwoByteString(unescaped_length);
+ { MaybeObject* maybe_o =
+ ascii ?
+ isolate->heap()->AllocateRawAsciiString(unescaped_length) :
+ isolate->heap()->AllocateRawTwoByteString(unescaped_length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* destination = String::cast(o);
template <>
MaybeObject* AllocateRawString<SeqTwoByteString>(int length) {
- return Heap::AllocateRawTwoByteString(length);
+ return HEAP->AllocateRawTwoByteString(length);
}
template <>
MaybeObject* AllocateRawString<SeqAsciiString>(int length) {
- return Heap::AllocateRawAsciiString(length);
+ return HEAP->AllocateRawAsciiString(length);
}
template <typename Char, typename StringType, bool comma>
static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
int length = characters.length();
- Counters::quote_json_char_count.Increment(length);
+ COUNTERS->quote_json_char_count()->Increment(length);
const int kSpaceForQuotes = 2 + (comma ? 1 :0);
int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
if (worst_case_length > kMaxGuaranteedNewSpaceString) {
if (!new_alloc->ToObject(&new_object)) {
return new_alloc;
}
- if (!Heap::new_space()->Contains(new_object)) {
+ if (!HEAP->new_space()->Contains(new_object)) {
// Even if our string is small enough to fit in new space we still have to
// handle it being allocated in old space as may happen in the third
// attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
return SlowQuoteJsonString<Char, StringType, comma>(characters);
}
StringType* new_string = StringType::cast(new_object);
- ASSERT(Heap::new_space()->Contains(new_string));
+ ASSERT(HEAP->new_space()->Contains(new_string));
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize));
- Heap::new_space()->ShrinkStringAtAllocationBoundary<StringType>(new_string,
+ HEAP->new_space()->ShrinkStringAtAllocationBoundary<StringType>(new_string,
final_length);
return new_string;
}
-static MaybeObject* Runtime_QuoteJSONString(Arguments args) {
+static MaybeObject* Runtime_QuoteJSONString(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
CONVERT_CHECKED(String, str, args[0]);
if (!str->IsFlat()) {
}
-static MaybeObject* Runtime_QuoteJSONStringComma(Arguments args) {
+static MaybeObject* Runtime_QuoteJSONStringComma(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
CONVERT_CHECKED(String, str, args[0]);
if (!str->IsFlat()) {
}
}
-
-static MaybeObject* Runtime_StringParseInt(Arguments args) {
+static MaybeObject* Runtime_StringParseInt(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
double value = StringToInt(s, radix);
- return Heap::NumberFromDouble(value);
+ return isolate->heap()->NumberFromDouble(value);
}
-static MaybeObject* Runtime_StringParseFloat(Arguments args) {
+static MaybeObject* Runtime_StringParseFloat(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
CONVERT_CHECKED(String, str, args[0]);
double value = StringToDouble(str, ALLOW_TRAILING_JUNK, OS::nan_value());
// Create a number object from the value.
- return Heap::NumberFromDouble(value);
+ return isolate->heap()->NumberFromDouble(value);
}
-static unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping;
-static unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping;
-
-
template <class Converter>
MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
+ Isolate* isolate,
String* s,
int length,
int input_string_length,
// dependent upper/lower conversions.
Object* o;
{ MaybeObject* maybe_o = s->IsAsciiRepresentation()
- ? Heap::AllocateRawAsciiString(length)
- : Heap::AllocateRawTwoByteString(length);
+ ? isolate->heap()->AllocateRawAsciiString(length)
+ : isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* result = String::cast(o);
// Convert all characters to upper case, assuming that they will fit
// in the buffer
- Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ isolate->runtime_state()->string_input_buffer());
buffer->Reset(s);
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
if (char_length == 0) char_length = 1;
current_length += char_length;
if (current_length > Smi::kMaxValue) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
}
template <typename ConvertTraits>
MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
+ Isolate* isolate,
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
// dependent upper/lower conversions.
if (s->IsSeqAsciiString()) {
Object* o;
- { MaybeObject* maybe_o = Heap::AllocateRawAsciiString(length);
+ { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
SeqAsciiString* result = SeqAsciiString::cast(o);
}
Object* answer;
- { MaybeObject* maybe_answer = ConvertCaseHelper(s, length, length, mapping);
+ { MaybeObject* maybe_answer =
+ ConvertCaseHelper(isolate, s, length, length, mapping);
if (!maybe_answer->ToObject(&answer)) return maybe_answer;
}
if (answer->IsSmi()) {
// Retry with correct length.
{ MaybeObject* maybe_answer =
- ConvertCaseHelper(s, Smi::cast(answer)->value(), length, mapping);
+ ConvertCaseHelper(isolate,
+ s, Smi::cast(answer)->value(), length, mapping);
if (!maybe_answer->ToObject(&answer)) return maybe_answer;
}
}
}
-static MaybeObject* Runtime_StringToLowerCase(Arguments args) {
- return ConvertCase<ToLowerTraits>(args, &to_lower_mapping);
+static MaybeObject* Runtime_StringToLowerCase(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ return ConvertCase<ToLowerTraits>(
+ args, isolate, isolate->runtime_state()->to_lower_mapping());
}
-static MaybeObject* Runtime_StringToUpperCase(Arguments args) {
- return ConvertCase<ToUpperTraits>(args, &to_upper_mapping);
+static MaybeObject* Runtime_StringToUpperCase(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ return ConvertCase<ToUpperTraits>(
+ args, isolate, isolate->runtime_state()->to_upper_mapping());
}
}
-static MaybeObject* Runtime_StringTrim(Arguments args) {
+static MaybeObject* Runtime_StringTrim(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
template <typename SubjectChar, typename PatternChar>
-void FindStringIndices(Vector<const SubjectChar> subject,
+void FindStringIndices(Isolate* isolate,
+ Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index.
// Stop after finding at most limit values.
- StringSearch<PatternChar, SubjectChar> search(pattern);
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
int pattern_length = pattern.length();
int index = 0;
while (limit > 0) {
}
-static MaybeObject* Runtime_StringSplit(Arguments args) {
+static MaybeObject* Runtime_StringSplit(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 3);
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
CONVERT_ARG_CHECKED(String, subject, 0);
CONVERT_ARG_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
} else {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
&indices,
limit);
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
} else {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
&indices,
limit);
// Create JSArray of substrings separated by separator.
int part_count = indices.length();
- Handle<JSArray> result = Factory::NewJSArray(part_count);
+ Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastElements());
HandleScope local_loop_handle;
int part_end = indices.at(i);
Handle<String> substring =
- Factory::NewSubString(subject, part_start, part_end);
+ isolate->factory()->NewSubString(subject, part_start, part_end);
elements->set(i, *substring);
part_start = part_end + pattern_length;
}
// one-char strings in the cache. Gives up on the first char that is
// not in the cache and fills the remainder with smi zeros. Returns
// the length of the successfully copied prefix.
-static int CopyCachedAsciiCharsToArray(const char* chars,
+static int CopyCachedAsciiCharsToArray(Heap* heap,
+ const char* chars,
FixedArray* elements,
int length) {
AssertNoAllocation nogc;
- FixedArray* ascii_cache = Heap::single_character_string_cache();
- Object* undefined = Heap::undefined_value();
+ FixedArray* ascii_cache = heap->single_character_string_cache();
+ Object* undefined = heap->undefined_value();
int i;
for (i = 0; i < length; ++i) {
Object* value = ascii_cache->get(chars[i]);
if (value == undefined) break;
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!heap->InNewSpace(value));
elements->set(i, value, SKIP_WRITE_BARRIER);
}
if (i < length) {
// Converts a String to JSArray.
// For example, "foo" => ["f", "o", "o"].
-static MaybeObject* Runtime_StringToArray(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_StringToArray(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
Handle<FixedArray> elements;
if (s->IsFlat() && s->IsAsciiRepresentation()) {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateUninitializedFixedArray(length);
+ { MaybeObject* maybe_obj =
+ isolate->heap()->AllocateUninitializedFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- elements = Handle<FixedArray>(FixedArray::cast(obj));
+ elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
Vector<const char> chars = s->ToAsciiVector();
// Note, this will initialize all elements (not only the prefix)
// to prevent GC from seeing partially initialized array.
- int num_copied_from_cache = CopyCachedAsciiCharsToArray(chars.start(),
+ int num_copied_from_cache = CopyCachedAsciiCharsToArray(isolate->heap(),
+ chars.start(),
*elements,
length);
elements->set(i, *str);
}
} else {
- elements = Factory::NewFixedArray(length);
+ elements = isolate->factory()->NewFixedArray(length);
for (int i = 0; i < length; ++i) {
Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
elements->set(i, *str);
}
#endif
- return *Factory::NewJSArrayWithElements(elements);
+ return *isolate->factory()->NewJSArrayWithElements(elements);
}
-static MaybeObject* Runtime_NewStringWrapper(Arguments args) {
+static MaybeObject* Runtime_NewStringWrapper(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, value, args[0]);
}
-bool Runtime::IsUpperCaseChar(uint16_t ch) {
+bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
- int char_length = to_upper_mapping.get(ch, 0, chars);
+ int char_length = runtime_state->to_upper_mapping()->get(ch, 0, chars);
return char_length == 0;
}
-static MaybeObject* Runtime_NumberToString(Arguments args) {
+static MaybeObject* Runtime_NumberToString(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return Heap::NumberToString(number);
+ return isolate->heap()->NumberToString(number);
}
-static MaybeObject* Runtime_NumberToStringSkipCache(Arguments args) {
+static MaybeObject* Runtime_NumberToStringSkipCache(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return Heap::NumberToString(number, false);
+ return isolate->heap()->NumberToString(number, false);
}
-static MaybeObject* Runtime_NumberToInteger(Arguments args) {
+static MaybeObject* Runtime_NumberToInteger(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
if (number > 0 && number <= Smi::kMaxValue) {
return Smi::FromInt(static_cast<int>(number));
}
- return Heap::NumberFromDouble(DoubleToInteger(number));
+ return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
}
-static MaybeObject* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
+static MaybeObject* Runtime_NumberToIntegerMapMinusZero(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
// Map both -0 and +0 to +0.
if (double_value == 0) double_value = 0;
- return Heap::NumberFromDouble(double_value);
+ return isolate->heap()->NumberFromDouble(double_value);
}
-static MaybeObject* Runtime_NumberToJSUint32(Arguments args) {
+static MaybeObject* Runtime_NumberToJSUint32(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return Heap::NumberFromUint32(number);
+ return isolate->heap()->NumberFromUint32(number);
}
-static MaybeObject* Runtime_NumberToJSInt32(Arguments args) {
+static MaybeObject* Runtime_NumberToJSInt32(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
if (number > 0 && number <= Smi::kMaxValue) {
return Smi::FromInt(static_cast<int>(number));
}
- return Heap::NumberFromInt32(DoubleToInt32(number));
+ return isolate->heap()->NumberFromInt32(DoubleToInt32(number));
}
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
-static MaybeObject* Runtime_NumberToSmi(Arguments args) {
+static MaybeObject* Runtime_NumberToSmi(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
return Smi::FromInt(int_value);
}
}
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
}
-static MaybeObject* Runtime_AllocateHeapNumber(Arguments args) {
+static MaybeObject* Runtime_AllocateHeapNumber(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- return Heap::AllocateHeapNumber(0);
+ return isolate->heap()->AllocateHeapNumber(0);
}
-static MaybeObject* Runtime_NumberAdd(Arguments args) {
+static MaybeObject* Runtime_NumberAdd(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x + y);
+ return isolate->heap()->NumberFromDouble(x + y);
}
-static MaybeObject* Runtime_NumberSub(Arguments args) {
+static MaybeObject* Runtime_NumberSub(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x - y);
+ return isolate->heap()->NumberFromDouble(x - y);
}
-static MaybeObject* Runtime_NumberMul(Arguments args) {
+static MaybeObject* Runtime_NumberMul(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x * y);
+ return isolate->heap()->NumberFromDouble(x * y);
}
-static MaybeObject* Runtime_NumberUnaryMinus(Arguments args) {
+static MaybeObject* Runtime_NumberUnaryMinus(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(-x);
+ return isolate->heap()->NumberFromDouble(-x);
}
-static MaybeObject* Runtime_NumberAlloc(Arguments args) {
+static MaybeObject* Runtime_NumberAlloc(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- return Heap::NumberFromDouble(9876543210.0);
+ return isolate->heap()->NumberFromDouble(9876543210.0);
}
-static MaybeObject* Runtime_NumberDiv(Arguments args) {
+static MaybeObject* Runtime_NumberDiv(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x / y);
+ return isolate->heap()->NumberFromDouble(x / y);
}
-static MaybeObject* Runtime_NumberMod(Arguments args) {
+static MaybeObject* Runtime_NumberMod(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
x = modulo(x, y);
// NumberFromDouble may return a Smi instead of a Number object
- return Heap::NumberFromDouble(x);
+ return isolate->heap()->NumberFromDouble(x);
}
-static MaybeObject* Runtime_StringAdd(Arguments args) {
+static MaybeObject* Runtime_StringAdd(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]);
- Counters::string_add_runtime.Increment();
- return Heap::AllocateConsString(str1, str2);
+ isolate->counters()->string_add_runtime()->Increment();
+ return isolate->heap()->AllocateConsString(str1, str2);
}
}
-static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
+static MaybeObject* Runtime_StringBuilderConcat(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
if (!args[1]->IsSmi()) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
int array_length = Smi::cast(args[1])->value();
int special_length = special->length();
if (!array->HasFastElements()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
}
if (array_length == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
if (first->IsString()) return first;
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
Object* next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
pos = Smi::cast(next_smi)->value();
if (pos < 0) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
}
ASSERT(pos >= 0);
ASSERT(len >= 0);
if (pos > special_length || len > special_length - pos) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
increment = len;
} else if (elt->IsString()) {
ascii = false;
}
} else {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
if (increment > String::kMaxLength - position) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
position += increment;
Object* object;
if (ascii) {
- { MaybeObject* maybe_object = Heap::AllocateRawAsciiString(length);
+ { MaybeObject* maybe_object =
+ isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqAsciiString* answer = SeqAsciiString::cast(object);
array_length);
return answer;
} else {
- { MaybeObject* maybe_object = Heap::AllocateRawTwoByteString(length);
+ { MaybeObject* maybe_object =
+ isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
}
-static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
+static MaybeObject* Runtime_StringBuilderJoin(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
if (!args[1]->IsSmi()) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
int array_length = Smi::cast(args[1])->value();
CONVERT_CHECKED(String, separator, args[2]);
if (!array->HasFastElements()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
}
if (array_length == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
if (first->IsString()) return first;
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
int length = (array_length - 1) * separator_length;
Object* element_obj = fixed_array->get(i);
if (!element_obj->IsString()) {
// TODO(1161): handle this case.
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
length += increment;
}
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateRawTwoByteString(length);
+ { MaybeObject* maybe_object =
+ isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
}
-static MaybeObject* Runtime_NumberOr(Arguments args) {
+static MaybeObject* Runtime_NumberOr(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x | y);
+ return isolate->heap()->NumberFromInt32(x | y);
}
-static MaybeObject* Runtime_NumberAnd(Arguments args) {
+static MaybeObject* Runtime_NumberAnd(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x & y);
+ return isolate->heap()->NumberFromInt32(x & y);
}
-static MaybeObject* Runtime_NumberXor(Arguments args) {
+static MaybeObject* Runtime_NumberXor(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x ^ y);
+ return isolate->heap()->NumberFromInt32(x ^ y);
}
-static MaybeObject* Runtime_NumberNot(Arguments args) {
+static MaybeObject* Runtime_NumberNot(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- return Heap::NumberFromInt32(~x);
+ return isolate->heap()->NumberFromInt32(~x);
}
-static MaybeObject* Runtime_NumberShl(Arguments args) {
+static MaybeObject* Runtime_NumberShl(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x << (y & 0x1f));
+ return isolate->heap()->NumberFromInt32(x << (y & 0x1f));
}
-static MaybeObject* Runtime_NumberShr(Arguments args) {
+static MaybeObject* Runtime_NumberShr(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromUint32(x >> (y & 0x1f));
+ return isolate->heap()->NumberFromUint32(x >> (y & 0x1f));
}
-static MaybeObject* Runtime_NumberSar(Arguments args) {
+static MaybeObject* Runtime_NumberSar(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
+ return isolate->heap()->NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
}
-static MaybeObject* Runtime_NumberEquals(Arguments args) {
+static MaybeObject* Runtime_NumberEquals(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
}
-static MaybeObject* Runtime_StringEquals(Arguments args) {
+static MaybeObject* Runtime_StringEquals(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
}
-static MaybeObject* Runtime_NumberCompare(Arguments args) {
+static MaybeObject* Runtime_NumberCompare(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
-static MaybeObject* Runtime_SmiLexicographicCompare(Arguments args) {
+static MaybeObject* Runtime_SmiLexicographicCompare(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- // Arrays for the individual characters of the two Smis. Smis are
- // 31 bit integers and 10 decimal digits are therefore enough.
- static int x_elms[10];
- static int y_elms[10];
-
// Extract the integer values from the Smis.
CONVERT_CHECKED(Smi, x, args[0]);
CONVERT_CHECKED(Smi, y, args[1]);
y_value = -y_value;
}
+ // Arrays for the individual characters of the two Smis. Smis are
+ // 31 bit integers and 10 decimal digits are therefore enough.
+ // TODO(isolates): maybe we should simply allocate 20 bytes on the stack.
+ int* x_elms = isolate->runtime_state()->smi_lexicographic_compare_x_elms();
+ int* y_elms = isolate->runtime_state()->smi_lexicographic_compare_y_elms();
+
+
// Convert the integers to arrays of their decimal digits.
int x_index = 0;
int y_index = 0;
}
-static Object* StringInputBufferCompare(String* x, String* y) {
- static StringInputBuffer bufx;
- static StringInputBuffer bufy;
+static Object* StringInputBufferCompare(RuntimeState* state,
+ String* x,
+ String* y) {
+ StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx();
+ StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy();
bufx.Reset(x);
bufy.Reset(y);
while (bufx.has_more() && bufy.has_more()) {
} else {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
- ASSERT(result == StringInputBufferCompare(x, y));
+ ASSERT(result ==
+ StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y));
return result;
}
-static MaybeObject* Runtime_StringCompare(Arguments args) {
+static MaybeObject* Runtime_StringCompare(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, x, args[0]);
CONVERT_CHECKED(String, y, args[1]);
- Counters::string_compare_runtime.Increment();
+ isolate->counters()->string_compare_runtime()->Increment();
// A few fast case tests before we flatten.
if (x == y) return Smi::FromInt(EQUAL);
else if (d > 0) return Smi::FromInt(GREATER);
Object* obj;
- { MaybeObject* maybe_obj = Heap::PrepareForCompare(x);
+ { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(x);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- { MaybeObject* maybe_obj = Heap::PrepareForCompare(y);
+ { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(y);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringInputBufferCompare(x, y);
+ : StringInputBufferCompare(isolate->runtime_state(), x, y);
}
-static MaybeObject* Runtime_Math_acos(Arguments args) {
+static MaybeObject* Runtime_Math_acos(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_acos.Increment();
+ isolate->counters()->math_acos()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::ACOS, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
}
-static MaybeObject* Runtime_Math_asin(Arguments args) {
+static MaybeObject* Runtime_Math_asin(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_asin.Increment();
+ isolate->counters()->math_asin()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::ASIN, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
}
-static MaybeObject* Runtime_Math_atan(Arguments args) {
+static MaybeObject* Runtime_Math_atan(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_atan.Increment();
+ isolate->counters()->math_atan()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::ATAN, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
}
-static MaybeObject* Runtime_Math_atan2(Arguments args) {
+static const double kPiDividedBy4 = 0.78539816339744830962;
+
+
+static MaybeObject* Runtime_Math_atan2(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- Counters::math_atan2.Increment();
+ isolate->counters()->math_atan2()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
// is a multiple of Pi / 4. The sign of the result is determined
// by the first argument (x) and the sign of the second argument
// determines the multiplier: one or three.
- static double kPiDividedBy4 = 0.78539816339744830962;
int multiplier = (x < 0) ? -1 : 1;
if (y < 0) multiplier *= 3;
result = multiplier * kPiDividedBy4;
} else {
result = atan2(x, y);
}
- return Heap::AllocateHeapNumber(result);
+ return isolate->heap()->AllocateHeapNumber(result);
}
-static MaybeObject* Runtime_Math_ceil(Arguments args) {
+static MaybeObject* Runtime_Math_ceil(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_ceil.Increment();
+ isolate->counters()->math_ceil()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(ceiling(x));
+ return isolate->heap()->NumberFromDouble(ceiling(x));
}
-static MaybeObject* Runtime_Math_cos(Arguments args) {
+static MaybeObject* Runtime_Math_cos(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_cos.Increment();
+ isolate->counters()->math_cos()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::COS, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
}
-static MaybeObject* Runtime_Math_exp(Arguments args) {
+static MaybeObject* Runtime_Math_exp(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_exp.Increment();
+ isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::EXP, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
}
-static MaybeObject* Runtime_Math_floor(Arguments args) {
+static MaybeObject* Runtime_Math_floor(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_floor.Increment();
+ isolate->counters()->math_floor()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(floor(x));
+ return isolate->heap()->NumberFromDouble(floor(x));
}
-static MaybeObject* Runtime_Math_log(Arguments args) {
+static MaybeObject* Runtime_Math_log(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_log.Increment();
+ isolate->counters()->math_log()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::LOG, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
-static MaybeObject* Runtime_Math_pow(Arguments args) {
+static MaybeObject* Runtime_Math_pow(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- Counters::math_pow.Increment();
+ isolate->counters()->math_pow()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
int y = Smi::cast(args[1])->value();
- return Heap::NumberFromDouble(power_double_int(x, y));
+ return isolate->heap()->NumberFromDouble(power_double_int(x, y));
}
CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::AllocateHeapNumber(power_double_double(x, y));
+ return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
}
// Fast version of Math.pow if we know that y is not an integer and
// y is not -0.5 or 0.5. Used as slowcase from codegen.
-static MaybeObject* Runtime_Math_pow_cfunction(Arguments args) {
+static MaybeObject* Runtime_Math_pow_cfunction(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
if (y == 0) {
return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
} else {
- return Heap::AllocateHeapNumber(pow(x, y));
+ return isolate->heap()->AllocateHeapNumber(pow(x, y));
}
}
-static MaybeObject* Runtime_RoundNumber(Arguments args) {
+static MaybeObject* Runtime_RoundNumber(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_round.Increment();
+ isolate->counters()->math_round()->Increment();
if (!args[0]->IsHeapNumber()) {
// Must be smi. Return the argument unchanged for all the other types
return number;
}
- if (sign && value >= -0.5) return Heap::minus_zero_value();
+ if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
// Do not call NumberFromDouble() to avoid extra checks.
- return Heap::AllocateHeapNumber(floor(value + 0.5));
+ return isolate->heap()->AllocateHeapNumber(floor(value + 0.5));
}
-static MaybeObject* Runtime_Math_sin(Arguments args) {
+static MaybeObject* Runtime_Math_sin(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_sin.Increment();
+ isolate->counters()->math_sin()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::SIN, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
}
-static MaybeObject* Runtime_Math_sqrt(Arguments args) {
+static MaybeObject* Runtime_Math_sqrt(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_sqrt.Increment();
+ isolate->counters()->math_sqrt()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(sqrt(x));
+ return isolate->heap()->AllocateHeapNumber(sqrt(x));
}
-static MaybeObject* Runtime_Math_tan(Arguments args) {
+static MaybeObject* Runtime_Math_tan(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_tan.Increment();
+ isolate->counters()->math_tan()->Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::TAN, x);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
}
}
-static MaybeObject* Runtime_DateMakeDay(Arguments args) {
+static MaybeObject* Runtime_DateMakeDay(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
}
-static MaybeObject* Runtime_DateYMDFromTime(Arguments args) {
+static MaybeObject* Runtime_DateYMDFromTime(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
int year, month, day;
DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
- RUNTIME_ASSERT(res_array->elements()->map() == Heap::fixed_array_map());
+ RUNTIME_ASSERT(res_array->elements()->map() ==
+ isolate->heap()->fixed_array_map());
FixedArray* elms = FixedArray::cast(res_array->elements());
RUNTIME_ASSERT(elms->length() == 3);
elms->set(1, Smi::FromInt(month));
elms->set(2, Smi::FromInt(day));
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_NewArgumentsFast(Arguments args) {
+static MaybeObject* Runtime_NewArgumentsFast(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 3);
const int length = Smi::cast(args[2])->value();
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateArgumentsObject(callee, length);
+ { MaybeObject* maybe_result =
+ isolate->heap()->AllocateArgumentsObject(callee, length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateRawFixedArray(length);
+ { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
AssertNoAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map(Heap::fixed_array_map());
+ array->set_map(isolate->heap()->fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
}
-static MaybeObject* Runtime_NewClosure(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_NewClosure(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
pretenure = pretenure || (context->global_context() == *context);
PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
Handle<JSFunction> result =
- Factory::NewFunctionFromSharedFunctionInfo(shared,
- context,
- pretenure_flag);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ pretenure_flag);
return *result;
}
-
-static MaybeObject* Runtime_NewObjectFromBound(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_NewObjectFromBound(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// First argument is a function to use as a constructor.
CONVERT_ARG_CHECKED(JSFunction, function, 0);
}
-static void TrySettingInlineConstructStub(Handle<JSFunction> function) {
- Handle<Object> prototype = Factory::null_value();
+static void TrySettingInlineConstructStub(Isolate* isolate,
+ Handle<JSFunction> function) {
+ Handle<Object> prototype = isolate->factory()->null_value();
if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype());
+ prototype = Handle<Object>(function->instance_prototype(), isolate);
}
if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
ConstructStubCompiler compiler;
}
-static MaybeObject* Runtime_NewObject(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_NewObject(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> constructor = args.at<Object>(0);
if (!constructor->IsJSFunction()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
Handle<Object> type_error =
- Factory::NewTypeError("not_constructor", arguments);
- return Top::Throw(*type_error);
+ isolate->factory()->NewTypeError("not_constructor", arguments);
+ return isolate->Throw(*type_error);
}
Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
if (!function->should_have_prototype()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
Handle<Object> type_error =
- Factory::NewTypeError("not_constructor", arguments);
- return Top::Throw(*type_error);
+ isolate->factory()->NewTypeError("not_constructor", arguments);
+ return isolate->Throw(*type_error);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = isolate->debug();
// Handle stepping into constructors if step into is active.
- if (Debug::StepInActive()) {
- Debug::HandleStepIn(function, Handle<Object>::null(), 0, true);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
}
#endif
// called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new
- // JSFunction. Factory::NewJSObject() should not be used to
+ // JSFunction. FACTORY->NewJSObject() should not be used to
// allocate JSFunctions since it does not properly initialize
// the shared part of the function. Since the receiver is
// ignored anyway, we use the global object as the receiver
// instead of a new JSFunction object. This way, errors are
// reported the same way whether or not 'Function' is called
// using 'new'.
- return Top::context()->global();
+ return isolate->context()->global();
}
}
// available. We cannot use EnsureCompiled because that forces a
// compilation through the shared function info which makes it
// impossible for us to optimize.
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
if (!function->has_initial_map() &&
}
bool first_allocation = !shared->live_objects_may_exist();
- Handle<JSObject> result = Factory::NewJSObject(function);
- RETURN_IF_EMPTY_HANDLE(result);
+ Handle<JSObject> result = isolate->factory()->NewJSObject(function);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// Delay setting the stub if inobject slack tracking is in progress.
if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(function);
+ TrySettingInlineConstructStub(isolate, function);
}
- Counters::constructed_objects.Increment();
- Counters::constructed_objects_runtime.Increment();
+ isolate->counters()->constructed_objects()->Increment();
+ isolate->counters()->constructed_objects_runtime()->Increment();
return *result;
}
-static MaybeObject* Runtime_FinalizeInstanceSize(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_FinalizeInstanceSize(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(function);
+ TrySettingInlineConstructStub(isolate, function);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_LazyCompile(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_LazyCompile(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
}
-static MaybeObject* Runtime_LazyRecompile(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_LazyRecompile(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
if (!function->shared()->code()->optimizable() ||
- Debug::has_break_points()) {
+ isolate->debug()->has_break_points()) {
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
function->shared()->code()->optimizable() ? "T" : "F",
- Debug::has_break_points() ? "T" : "F");
+ isolate->debug()->has_break_points() ? "T" : "F");
}
function->ReplaceCode(function->shared()->code());
return function->code();
}
-static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_NotifyDeoptimized(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsSmi());
Deoptimizer::BailoutType type =
static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
- Deoptimizer* deoptimizer = Deoptimizer::Grab();
- ASSERT(Heap::IsAllocationAllowed());
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ ASSERT(isolate->heap()->IsAllocationAllowed());
int frames = deoptimizer->output_count();
JavaScriptFrameIterator it;
delete deoptimizer;
RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
Handle<Object> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == Heap::arguments_marker()) {
+ if (frame->GetExpression(i) == isolate->heap()->arguments_marker()) {
if (arguments.is_null()) {
// FunctionGetArguments can't throw an exception, so cast away the
// doubt with an assert.
arguments = Handle<Object>(
Accessors::FunctionGetArguments(*function,
NULL)->ToObjectUnchecked());
- ASSERT(*arguments != Heap::null_value());
- ASSERT(*arguments != Heap::undefined_value());
+ ASSERT(*arguments != isolate->heap()->null_value());
+ ASSERT(*arguments != isolate->heap()->undefined_value());
}
frame->SetExpression(i, *arguments);
}
}
- CompilationCache::MarkForLazyOptimizing(function);
+ isolate->compilation_cache()->MarkForLazyOptimizing(function);
if (type == Deoptimizer::EAGER) {
RUNTIME_ASSERT(function->IsOptimized());
} else {
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
if (FLAG_always_opt || type == Deoptimizer::LAZY) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Count the number of optimized activations of the function.
}
function->ReplaceCode(function->shared()->code());
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_NotifyOSR(Arguments args) {
- Deoptimizer* deoptimizer = Deoptimizer::Grab();
+static MaybeObject* Runtime_NotifyOSR(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
delete deoptimizer;
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DeoptimizeFunction(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DeoptimizeFunction(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimized()) return Heap::undefined_value();
+ if (!function->IsOptimized()) return isolate->heap()->undefined_value();
Deoptimizer::DeoptimizeFunction(*function);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CompileForOnStackReplacement(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
// We have hit a back edge in an unoptimized frame for a function that was
// selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code());
+ Handle<Code> unoptimized(function->shared()->code(), isolate);
// Keep track of whether we've succeeded in optimizing.
bool succeeded = unoptimized->optimizable();
if (succeeded) {
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
ASSERT(frame->function() == *function);
- ASSERT(frame->code() == *unoptimized);
+ ASSERT(frame->LookupCode(isolate) == *unoptimized);
ASSERT(unoptimized->contains(frame->pc()));
// Use linear search of the unoptimized code's stack check table to find
StackCheckStub check_stub;
Handle<Code> check_code = check_stub.GetCode();
Handle<Code> replacement_code(
- Builtins::builtin(Builtins::OnStackReplacement));
+ isolate->builtins()->builtin(Builtins::OnStackReplacement));
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
*replacement_code);
}
-static MaybeObject* Runtime_GetFunctionDelegate(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetFunctionDelegate(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
return *Execution::GetFunctionDelegate(args.at<Object>(0));
}
-static MaybeObject* Runtime_GetConstructorDelegate(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetConstructorDelegate(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
return *Execution::GetConstructorDelegate(args.at<Object>(0));
}
-static MaybeObject* Runtime_NewContext(Arguments args) {
+static MaybeObject* Runtime_NewContext(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[0]);
int length = function->shared()->scope_info()->NumberOfContextSlots();
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateFunctionContext(length, function);
+ { MaybeObject* maybe_result =
+ isolate->heap()->AllocateFunctionContext(length, function);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Top::set_context(Context::cast(result));
+ isolate->set_context(Context::cast(result));
return result; // non-failure
}
-MUST_USE_RESULT static MaybeObject* PushContextHelper(Object* object,
+MUST_USE_RESULT static MaybeObject* PushContextHelper(Isolate* isolate,
+ Object* object,
bool is_catch_context) {
// Convert the object to a proper JavaScript object.
Object* js_object = object;
if (!Failure::cast(maybe_js_object)->IsInternalError()) {
return maybe_js_object;
}
- HandleScope scope;
- Handle<Object> handle(object);
+ HandleScope scope(isolate);
+ Handle<Object> handle(object, isolate);
Handle<Object> result =
- Factory::NewTypeError("with_expression", HandleVector(&handle, 1));
- return Top::Throw(*result);
+ isolate->factory()->NewTypeError("with_expression",
+ HandleVector(&handle, 1));
+ return isolate->Throw(*result);
}
}
Object* result;
- { MaybeObject* maybe_result =
- Heap::AllocateWithContext(Top::context(),
- JSObject::cast(js_object),
- is_catch_context);
+ { MaybeObject* maybe_result = isolate->heap()->AllocateWithContext(
+ isolate->context(), JSObject::cast(js_object), is_catch_context);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = Context::cast(result);
- Top::set_context(context);
+ isolate->set_context(context);
return result;
}
-static MaybeObject* Runtime_PushContext(Arguments args) {
+static MaybeObject* Runtime_PushContext(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- return PushContextHelper(args[0], false);
+ return PushContextHelper(isolate, args[0], false);
}
-static MaybeObject* Runtime_PushCatchContext(Arguments args) {
+static MaybeObject* Runtime_PushCatchContext(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- return PushContextHelper(args[0], true);
+ return PushContextHelper(isolate, args[0], true);
}
-static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DeleteContextSlot(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Context, context, 0);
// If the slot was not found the result is true.
if (holder.is_null()) {
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
// If the slot was found in a context, it should be DONT_DELETE.
if (holder->IsContext()) {
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
// The slot was found in a JSObject, either a context extension object,
#endif
-static inline MaybeObject* Unhole(MaybeObject* x,
+static inline MaybeObject* Unhole(Heap* heap,
+ MaybeObject* x,
PropertyAttributes attributes) {
ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
USE(attributes);
- return x->IsTheHole() ? Heap::undefined_value() : x;
+ return x->IsTheHole() ? heap->undefined_value() : x;
}
-static JSObject* ComputeReceiverForNonGlobal(JSObject* holder) {
+static JSObject* ComputeReceiverForNonGlobal(Isolate* isolate,
+ JSObject* holder) {
ASSERT(!holder->IsGlobalObject());
- Context* top = Top::context();
+ Context* top = isolate->context();
// Get the context extension function.
JSFunction* context_extension_function =
top->global_context()->context_extension_function();
}
-static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
- HandleScope scope;
+static ObjectPair LoadContextSlotHelper(Arguments args,
+ Isolate* isolate,
+ bool throw_error) {
+ HandleScope scope(isolate);
ASSERT_EQ(2, args.length());
if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(Top::ThrowIllegalOperation(), NULL);
+ return MakePair(isolate->ThrowIllegalOperation(), NULL);
}
Handle<Context> context = args.at<Context>(0);
Handle<String> name = args.at<String>(1);
// If the "property" we were looking for is a local variable or an
// argument in a context, the receiver is the global object; see
// ECMA-262, 3rd., 10.1.6 and 10.2.3.
- JSObject* receiver = Top::context()->global()->global_receiver();
+ JSObject* receiver =
+ isolate->context()->global()->global_receiver();
MaybeObject* value = (holder->IsContext())
? Context::cast(*holder)->get(index)
: JSObject::cast(*holder)->GetElement(index);
- return MakePair(Unhole(value, attributes), receiver);
+ return MakePair(Unhole(isolate->heap(), value, attributes), receiver);
}
// If the holder is found, we read the property from it.
if (object->IsGlobalObject()) {
receiver = GlobalObject::cast(object)->global_receiver();
} else if (context->is_exception_holder(*holder)) {
- receiver = Top::context()->global()->global_receiver();
+ receiver = isolate->context()->global()->global_receiver();
} else {
- receiver = ComputeReceiverForNonGlobal(object);
+ receiver = ComputeReceiverForNonGlobal(isolate, object);
}
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
if (throw_error) {
// The property doesn't exist - throw exception.
Handle<Object> reference_error =
- Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return MakePair(Top::Throw(*reference_error), NULL);
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return MakePair(isolate->Throw(*reference_error), NULL);
} else {
// The property doesn't exist - return undefined
- return MakePair(Heap::undefined_value(), Heap::undefined_value());
+ return MakePair(isolate->heap()->undefined_value(),
+ isolate->heap()->undefined_value());
}
}
-static ObjectPair Runtime_LoadContextSlot(Arguments args) {
- return LoadContextSlotHelper(args, true);
+static ObjectPair Runtime_LoadContextSlot(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ return LoadContextSlotHelper(args, isolate, true);
}
-static ObjectPair Runtime_LoadContextSlotNoReferenceError(Arguments args) {
- return LoadContextSlotHelper(args, false);
+static ObjectPair Runtime_LoadContextSlotNoReferenceError(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ return LoadContextSlotHelper(args, isolate, false);
}
-static MaybeObject* Runtime_StoreContextSlot(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_StoreContextSlot(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
- Handle<Object> value(args[0]);
+ Handle<Object> value(args[0], isolate);
CONVERT_ARG_CHECKED(Context, context, 1);
CONVERT_ARG_CHECKED(String, name, 2);
CONVERT_SMI_CHECKED(strict_unchecked, args[3]);
} else if (strict_mode == kStrictMode) {
// Setting read only property in strict mode.
Handle<Object> error =
- Factory::NewTypeError("strict_cannot_assign",
- HandleVector(&name, 1));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("strict_cannot_assign",
+ HandleVector(&name, 1));
+ return isolate->Throw(*error);
}
} else {
ASSERT((attributes & READ_ONLY) == 0);
Handle<Object> result =
SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
if (result.is_null()) {
- ASSERT(Top::has_pending_exception());
+ ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
}
// The property was not found. It needs to be stored in the global context.
ASSERT(attributes == ABSENT);
attributes = NONE;
- context_ext = Handle<JSObject>(Top::context()->global());
+ context_ext = Handle<JSObject>(isolate->context()->global());
}
// Set the property, but ignore if read_only variable on the context
if ((attributes & READ_ONLY) == 0 ||
(context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(context_ext, name, value, NONE, strict_mode));
} else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
- Factory::NewTypeError("strict_cannot_assign", HandleVector(&name, 1));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError(
+ "strict_cannot_assign", HandleVector(&name, 1));
+ return isolate->Throw(*error);
}
return *value;
}
-static MaybeObject* Runtime_Throw(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_Throw(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- return Top::Throw(args[0]);
+ return isolate->Throw(args[0]);
}
-static MaybeObject* Runtime_ReThrow(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ReThrow(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- return Top::ReThrow(args[0]);
+ return isolate->ReThrow(args[0]);
}
-static MaybeObject* Runtime_PromoteScheduledException(Arguments args) {
+static MaybeObject* Runtime_PromoteScheduledException(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT_EQ(0, args.length());
- return Top::PromoteScheduledException();
+ return isolate->PromoteScheduledException();
}
-static MaybeObject* Runtime_ThrowReferenceError(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ThrowReferenceError(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Handle<Object> name(args[0]);
+ Handle<Object> name(args[0], isolate);
Handle<Object> reference_error =
- Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return Top::Throw(*reference_error);
-}
-
-
-static MaybeObject* Runtime_StackOverflow(Arguments args) {
- NoHandleAllocation na;
- return Top::StackOverflow();
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return isolate->Throw(*reference_error);
}
-static MaybeObject* Runtime_StackGuard(Arguments args) {
+static MaybeObject* Runtime_StackGuard(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
- if (StackGuard::IsStackOverflow()) {
- return Runtime_StackOverflow(args);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ NoHandleAllocation na;
+ return isolate->StackOverflow();
}
return Execution::HandleStackGuardInterrupt();
}
-static MaybeObject* Runtime_TraceEnter(Arguments args) {
+static MaybeObject* Runtime_TraceEnter(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
NoHandleAllocation ha;
PrintTransition(NULL);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_TraceExit(Arguments args) {
+static MaybeObject* Runtime_TraceExit(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
PrintTransition(args[0]);
return args[0]; // return TOS
}
-static MaybeObject* Runtime_DebugPrint(Arguments args) {
+static MaybeObject* Runtime_DebugPrint(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
}
-static MaybeObject* Runtime_DebugTrace(Arguments args) {
+static MaybeObject* Runtime_DebugTrace(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
NoHandleAllocation ha;
- Top::PrintStack();
- return Heap::undefined_value();
+ isolate->PrintStack();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DateCurrentTime(Arguments args) {
+static MaybeObject* Runtime_DateCurrentTime(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 0);
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
double millis = floor(OS::TimeCurrentMillis());
- return Heap::NumberFromDouble(millis);
+ return isolate->heap()->NumberFromDouble(millis);
}
-static MaybeObject* Runtime_DateParseString(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DateParseString(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str, 0);
if (result) {
return *output;
} else {
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
}
-static MaybeObject* Runtime_DateLocalTimezone(Arguments args) {
+static MaybeObject* Runtime_DateLocalTimezone(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
const char* zone = OS::LocalTimezone(x);
- return Heap::AllocateStringFromUtf8(CStrVector(zone));
+ return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
}
-static MaybeObject* Runtime_DateLocalTimeOffset(Arguments args) {
+static MaybeObject* Runtime_DateLocalTimeOffset(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- return Heap::NumberFromDouble(OS::LocalTimeOffset());
+ return isolate->heap()->NumberFromDouble(OS::LocalTimeOffset());
}
-static MaybeObject* Runtime_DateDaylightSavingsOffset(Arguments args) {
+static MaybeObject* Runtime_DateDaylightSavingsOffset(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(OS::DaylightSavingsOffset(x));
+ return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
}
-static MaybeObject* Runtime_GlobalReceiver(Arguments args) {
+static MaybeObject* Runtime_GlobalReceiver(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
Object* global = args[0];
- if (!global->IsJSGlobalObject()) return Heap::null_value();
+ if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
return JSGlobalObject::cast(global)->global_receiver();
}
-static MaybeObject* Runtime_ParseJson(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ParseJson(RUNTIME_CALLING_CONVENTION) {
+ HandleScope scope(isolate);
ASSERT_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, source, 0);
Handle<Object> result = JsonParser::Parse(source);
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
- ASSERT(Top::has_pending_exception());
+ ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
return *result;
}
-static MaybeObject* Runtime_CompileString(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_CompileString(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, source, 0);
// Compile source string in the global context.
- Handle<Context> context(Top::context()->global_context());
+ Handle<Context> context(isolate->context()->global_context());
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
context,
true,
kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
- Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ NOT_TENURED);
return *fun;
}
-static ObjectPair CompileGlobalEval(Handle<String> source,
+static ObjectPair CompileGlobalEval(Isolate* isolate,
+ Handle<String> source,
Handle<Object> receiver,
StrictModeFlag strict_mode) {
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
- Handle<Context>(Top::context()),
- Top::context()->IsGlobalContext(),
+ Handle<Context>(isolate->context()),
+ isolate->context()->IsGlobalContext(),
strict_mode);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
- Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
- shared,
- Handle<Context>(Top::context()),
- NOT_TENURED);
+ Handle<JSFunction> compiled =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, Handle<Context>(isolate->context()), NOT_TENURED);
return MakePair(*compiled, *receiver);
}
-static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
+static ObjectPair Runtime_ResolvePossiblyDirectEval(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
Handle<Object> receiver; // Will be overwritten.
// Compute the calling context.
- Handle<Context> context = Handle<Context>(Top::context());
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
#ifdef DEBUG
- // Make sure Top::context() agrees with the old code that traversed
+ // Make sure Isolate::context() agrees with the old code that traversed
// the stack frames to compute the context.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = -1;
PropertyAttributes attributes = ABSENT;
while (true) {
- receiver = context->Lookup(Factory::eval_symbol(), FOLLOW_PROTOTYPE_CHAIN,
+ receiver = context->Lookup(isolate->factory()->eval_symbol(),
+ FOLLOW_PROTOTYPE_CHAIN,
&index, &attributes);
// Stop search when eval is found or when the global context is
// reached.
if (attributes != ABSENT || context->IsGlobalContext()) break;
if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()));
+ context = Handle<Context>(Context::cast(context->closure()->context()),
+ isolate);
} else {
- context = Handle<Context>(context->previous());
+ context = Handle<Context>(context->previous(), isolate);
}
}
// If eval could not be resolved, it has been deleted and we need to
// throw a reference error.
if (attributes == ABSENT) {
- Handle<Object> name = Factory::eval_symbol();
+ Handle<Object> name = isolate->factory()->eval_symbol();
Handle<Object> reference_error =
- Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return MakePair(Top::Throw(*reference_error), NULL);
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return MakePair(isolate->Throw(*reference_error), NULL);
}
if (!context->IsGlobalContext()) {
// with the given arguments. This is not necessarily the global eval.
if (receiver->IsContext()) {
context = Handle<Context>::cast(receiver);
- receiver = Handle<Object>(context->get(index));
+ receiver = Handle<Object>(context->get(index), isolate);
} else if (receiver->IsJSContextExtensionObject()) {
- receiver = Handle<JSObject>(Top::context()->global()->global_receiver());
+ receiver = Handle<JSObject>(
+ isolate->context()->global()->global_receiver(), isolate);
}
return MakePair(*callee, *receiver);
}
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != Top::global_context()->global_eval_fun() ||
+ if (*callee != isolate->global_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, Top::context()->global()->global_receiver());
+ return MakePair(*callee,
+ isolate->context()->global()->global_receiver());
}
ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(args.at<String>(1),
+ return CompileGlobalEval(isolate,
+ args.at<String>(1),
args.at<Object>(2),
static_cast<StrictModeFlag>(
Smi::cast(args[3])->value()));
}
-static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
+static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != Top::global_context()->global_eval_fun() ||
+ if (*callee != isolate->global_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, Top::context()->global()->global_receiver());
+ return MakePair(*callee,
+ isolate->context()->global()->global_receiver());
}
ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(args.at<String>(1),
+ return CompileGlobalEval(isolate,
+ args.at<String>(1),
args.at<Object>(2),
static_cast<StrictModeFlag>(
Smi::cast(args[3])->value()));
}
-static MaybeObject* Runtime_SetNewFunctionAttributes(Arguments args) {
+static MaybeObject* Runtime_SetNewFunctionAttributes(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
// This utility adjusts the property attributes for newly created Function
// object ("new Function(...)") by changing the map.
// All it does is changing the prototype property to enumerable
// as specified in ECMA262, 15.3.5.2.
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, func, 0);
Handle<Map> map = func->shared()->strict_mode()
- ? Top::strict_mode_function_instance_map()
- : Top::function_instance_map();
+ ? isolate->strict_mode_function_instance_map()
+ : isolate->function_instance_map();
ASSERT(func->map()->instance_type() == map->instance_type());
ASSERT(func->map()->instance_size() == map->instance_size());
}
-static MaybeObject* Runtime_AllocateInNewSpace(Arguments args) {
+static MaybeObject* Runtime_AllocateInNewSpace(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
// Allocate a block of memory in NewSpace (filled with a filler).
// Use as fallback for allocation in generated code when NewSpace
// is full.
int size = size_smi->value();
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
- static const int kMinFreeNewSpaceAfterGC =
- Heap::InitialSemiSpaceSize() * 3/4;
+ Heap* heap = isolate->heap();
+ const int kMinFreeNewSpaceAfterGC = heap->InitialSemiSpaceSize() * 3/4;
RUNTIME_ASSERT(size <= kMinFreeNewSpaceAfterGC);
Object* allocation;
- { MaybeObject* maybe_allocation = Heap::new_space()->AllocateRaw(size);
+ { MaybeObject* maybe_allocation = heap->new_space()->AllocateRaw(size);
if (maybe_allocation->ToObject(&allocation)) {
- Heap::CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
+ heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
}
return maybe_allocation;
}
// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
-static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
+static MaybeObject* Runtime_PushIfAbsent(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(JSObject, element, args[1]);
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
- if (elements->get(i) == element) return Heap::false_value();
+ if (elements->get(i) == element) return isolate->heap()->false_value();
}
Object* obj;
// Strict not needed. Used for cycle detection in Array join implementation.
kNonStrictMode);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
*/
class ArrayConcatVisitor {
public:
- ArrayConcatVisitor(Handle<FixedArray> storage,
+ ArrayConcatVisitor(Isolate* isolate,
+ Handle<FixedArray> storage,
bool fast_elements) :
- storage_(Handle<FixedArray>::cast(GlobalHandles::Create(*storage))),
+ isolate_(isolate),
+ storage_(Handle<FixedArray>::cast(
+ isolate->global_handles()->Create(*storage))),
index_offset_(0u),
fast_elements_(fast_elements) { }
ASSERT(!fast_elements_);
Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
Handle<NumberDictionary> result =
- Factory::DictionaryAtNumberPut(dict, index, elm);
+ isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
}
Handle<JSArray> ToArray() {
- Handle<JSArray> array = Factory::NewJSArray(0);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
- Factory::NewNumber(static_cast<double>(index_offset_));
+ isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map;
if (fast_elements_) {
- map = Factory::GetFastElementsMap(Handle<Map>(array->map()));
+ map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
} else {
- map = Factory::GetSlowElementsMap(Handle<Map>(array->map()));
+ map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
}
array->set_map(*map);
array->set_length(*length);
ASSERT(fast_elements_);
Handle<FixedArray> current_storage(*storage_);
Handle<NumberDictionary> slow_storage(
- Factory::NewNumberDictionary(current_storage->length()));
+ isolate_->factory()->NewNumberDictionary(current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
for (uint32_t i = 0; i < current_length; i++) {
HandleScope loop_scope;
Handle<Object> element(current_storage->get(i));
if (!element->IsTheHole()) {
Handle<NumberDictionary> new_storage =
- Factory::DictionaryAtNumberPut(slow_storage, i, element);
+ isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
}
inline void clear_storage() {
- GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
+ isolate_->global_handles()->Destroy(
+ Handle<Object>::cast(storage_).location());
}
inline void set_storage(FixedArray* storage) {
- storage_ = Handle<FixedArray>::cast(GlobalHandles::Create(storage));
+ storage_ = Handle<FixedArray>::cast(
+ isolate_->global_handles()->Create(storage));
}
+ Isolate* isolate_;
Handle<FixedArray> storage_; // Always a global handle.
// Index after last seen index. Always less than or equal to
// JSObject::kMaxElementCount.
template<class ExternalArrayClass, class ElementType>
-static void IterateExternalArrayElements(Handle<JSObject> receiver,
+static void IterateExternalArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
bool elements_are_ints,
bool elements_are_guaranteed_smis,
ArrayConcatVisitor* visitor) {
visitor->visit(j, e);
} else {
Handle<Object> e =
- Factory::NewNumber(static_cast<ElementType>(val));
+ isolate->factory()->NewNumber(static_cast<ElementType>(val));
visitor->visit(j, e);
}
}
}
} else {
for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
- Handle<Object> e = Factory::NewNumber(array->get(j));
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewNumber(array->get(j));
visitor->visit(j, e);
}
}
* length.
* Returns false if any access threw an exception, otherwise true.
*/
-static bool IterateElements(Handle<JSArray> receiver,
+static bool IterateElements(Isolate* isolate,
+ Handle<JSArray> receiver,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
int fast_length = static_cast<int>(length);
ASSERT(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope;
- Handle<Object> element_value(elements->get(j));
+ HandleScope loop_scope(isolate);
+ Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
} else if (receiver->HasElement(j)) {
}
case JSObject::EXTERNAL_BYTE_ELEMENTS: {
IterateExternalArrayElements<ExternalByteArray, int8_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_SHORT_ELEMENTS: {
IterateExternalArrayElements<ExternalShortArray, int16_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_INT_ELEMENTS: {
IterateExternalArrayElements<ExternalIntArray, int32_t>(
- receiver, true, false, visitor);
+ isolate, receiver, true, false, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
- receiver, true, false, visitor);
+ isolate, receiver, true, false, visitor);
break;
}
case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
IterateExternalArrayElements<ExternalFloatArray, float>(
- receiver, false, false, visitor);
+ isolate, receiver, false, false, visitor);
break;
}
default:
* TODO(581): Fix non-compliance for very large concatenations and update to
* following the ECMAScript 5 specification.
*/
-static MaybeObject* Runtime_ArrayConcat(Arguments args) {
+static MaybeObject* Runtime_ArrayConcat(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
CONVERT_ARG_CHECKED(JSArray, arguments, 0);
int argument_count = static_cast<int>(arguments->length()->Number());
if (fast_case) {
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
- storage = Factory::NewFixedArrayWithHoles(estimate_result_length);
+ storage = isolate->factory()->NewFixedArrayWithHoles(
+ estimate_result_length);
} else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
- Factory::NewNumberDictionary(at_least_space_for));
+ isolate->factory()->NewNumberDictionary(at_least_space_for));
}
- ArrayConcatVisitor visitor(storage, fast_case);
+ ArrayConcatVisitor visitor(isolate, storage, fast_case);
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj(elements->get(i));
if (obj->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(obj);
- if (!IterateElements(array, &visitor)) {
+ if (!IterateElements(isolate, array, &visitor)) {
return Failure::Exception();
}
} else {
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
-static MaybeObject* Runtime_GlobalPrint(Arguments args) {
+static MaybeObject* Runtime_GlobalPrint(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
// and are followed by non-existing element. Does not change the length
// property.
// Returns the number of non-undefined elements collected.
-static MaybeObject* Runtime_RemoveArrayHoles(Arguments args) {
+static MaybeObject* Runtime_RemoveArrayHoles(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSObject, object, args[0]);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
// Move contents of argument 0 (an array) to argument 1 (an array)
-static MaybeObject* Runtime_MoveArrayContents(Arguments args) {
+static MaybeObject* Runtime_MoveArrayContents(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, from, args[0]);
CONVERT_CHECKED(JSArray, to, args[1]);
HeapObject* new_elements = from->elements();
MaybeObject* maybe_new_map;
- if (new_elements->map() == Heap::fixed_array_map() ||
- new_elements->map() == Heap::fixed_cow_array_map()) {
+ if (new_elements->map() == isolate->heap()->fixed_array_map() ||
+ new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
maybe_new_map = to->map()->GetFastElementsMap();
} else {
maybe_new_map = to->map()->GetSlowElementsMap();
// How many elements does this object/array have?
-static MaybeObject* Runtime_EstimateNumberOfElements(Arguments args) {
+static MaybeObject* Runtime_EstimateNumberOfElements(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, object, args[0]);
HeapObject* elements = object->elements();
}
-static MaybeObject* Runtime_SwapElements(Arguments args) {
- HandleScope handle_scope;
+static MaybeObject* Runtime_SwapElements(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope handle_scope(isolate);
ASSERT_EQ(3, args.length());
uint32_t index1, index2;
if (!key1->ToArrayIndex(&index1)
|| !key2->ToArrayIndex(&index2)) {
- return Top::ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
Handle<Object> tmp1 = GetElement(jsobject, index1);
- RETURN_IF_EMPTY_HANDLE(tmp1);
+ RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
Handle<Object> tmp2 = GetElement(jsobject, index2);
- RETURN_IF_EMPTY_HANDLE(tmp2);
+ RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
- RETURN_IF_EMPTY_HANDLE(SetElement(jsobject, index1, tmp2, kStrictMode));
- RETURN_IF_EMPTY_HANDLE(SetElement(jsobject, index2, tmp1, kStrictMode));
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetElement(jsobject, index1, tmp2, kStrictMode));
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetElement(jsobject, index2, tmp1, kStrictMode));
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// intervals (pair of a negative integer (-start-1) followed by a
// positive (length)) or undefined values.
// Intervals can span over some keys that are not in the object.
-static MaybeObject* Runtime_GetArrayKeys(Arguments args) {
+static MaybeObject* Runtime_GetArrayKeys(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
if (array->elements()->IsDictionary()) {
keys->set_undefined(i);
}
}
- return *Factory::NewJSArrayWithElements(keys);
+ return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
ASSERT(array->HasFastElements());
- Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
+ Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
uint32_t actual_length =
static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
- Factory::NewNumber(static_cast<double>(min_length));
+ isolate->factory()->NewNumber(static_cast<double>(min_length));
single_interval->set(1, *length_object);
- return *Factory::NewJSArrayWithElements(single_interval);
+ return *isolate->factory()->NewJSArrayWithElements(single_interval);
}
}
// to the way accessors are implemented, it is set for both the getter
// and setter on the first call to DefineAccessor and ignored on
// subsequent calls.
-static MaybeObject* Runtime_DefineAccessor(Arguments args) {
+static MaybeObject* Runtime_DefineAccessor(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
// Compute attributes.
PropertyAttributes attributes = NONE;
}
-static MaybeObject* Runtime_LookupAccessor(Arguments args) {
+static MaybeObject* Runtime_LookupAccessor(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSObject, obj, args[0]);
CONVERT_CHECKED(String, name, args[1]);
#ifdef ENABLE_DEBUGGER_SUPPORT
-static MaybeObject* Runtime_DebugBreak(Arguments args) {
+static MaybeObject* Runtime_DebugBreak(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
return Execution::DebugBreakHelper();
}
// args[0]: debug event listener function to set or null or undefined for
// clearing the event listener function
// args[1]: object supplied during callback
-static MaybeObject* Runtime_SetDebugEventListener(Arguments args) {
+static MaybeObject* Runtime_SetDebugEventListener(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsJSFunction() ||
args[0]->IsUndefined() ||
args[0]->IsNull());
Handle<Object> callback = args.at<Object>(0);
Handle<Object> data = args.at<Object>(1);
- Debugger::SetEventListener(callback, data);
+ isolate->debugger()->SetEventListener(callback, data);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_Break(Arguments args) {
+static MaybeObject* Runtime_Break(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
- StackGuard::DebugBreak();
- return Heap::undefined_value();
+ isolate->stack_guard()->DebugBreak();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
+static MaybeObject* DebugLookupResultValue(Heap* heap,
+ Object* receiver,
+ String* name,
LookupResult* result,
bool* caught_exception) {
Object* value;
case NORMAL:
value = result->holder()->GetNormalizedProperty(result);
if (value->IsTheHole()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
return value;
case FIELD:
JSObject::cast(
result->holder())->FastPropertyAt(result->GetFieldIndex());
if (value->IsTheHole()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
return value;
case CONSTANT_FUNCTION:
if (!maybe_value->ToObject(&value)) {
if (maybe_value->IsRetryAfterGC()) return maybe_value;
ASSERT(maybe_value->IsException());
- maybe_value = Top::pending_exception();
- Top::clear_pending_exception();
+ maybe_value = heap->isolate()->pending_exception();
+ heap->isolate()->clear_pending_exception();
if (caught_exception != NULL) {
*caught_exception = true;
}
}
return value;
} else {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
}
case INTERCEPTOR:
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
- return Heap::undefined_value();
+ return heap->undefined_value();
default:
UNREACHABLE();
}
UNREACHABLE();
- return Heap::undefined_value();
+ return heap->undefined_value();
}
// 4: Setter function if defined
// Items 2-4 are only filled if the property has either a getter or a setter
// defined through __defineGetter__ and/or __defineSetter__.
-static MaybeObject* Runtime_DebugGetPropertyDetails(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugGetPropertyDetails(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// into the embedding application can occour, and the embedding application
// could have the assumption that its own global context is the current
// context and not some internal debugger context.
- SaveContext save;
- if (Debug::InDebugger()) {
- Top::set_context(*Debug::debugger_entry()->GetContext());
+ SaveContext save(isolate);
+ if (isolate->debug()->InDebugger()) {
+ isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
}
// Skip the global proxy as it has no properties and always delegates to the
// if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<FixedArray> details = Factory::NewFixedArray(2);
+ Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
Object* element_or_char;
{ MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(obj, index);
+ Runtime::GetElementOrCharAt(isolate, obj, index);
if (!maybe_element_or_char->ToObject(&element_or_char)) {
return maybe_element_or_char;
}
}
details->set(0, element_or_char);
details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
// Find the number of objects making up this.
PropertyType result_type = result.type();
Handle<Object> result_callback_obj;
if (result_type == CALLBACKS) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject());
+ result_callback_obj = Handle<Object>(result.GetCallbackObject(),
+ isolate);
}
Smi* property_details = result.GetPropertyDetails().AsSmi();
// DebugLookupResultValue can cause GC so details from LookupResult needs
bool caught_exception = false;
Object* raw_value;
{ MaybeObject* maybe_raw_value =
- DebugLookupResultValue(*obj, *name, &result, &caught_exception);
+ DebugLookupResultValue(isolate->heap(), *obj, *name,
+ &result, &caught_exception);
if (!maybe_raw_value->ToObject(&raw_value)) return maybe_raw_value;
}
- Handle<Object> value(raw_value);
+ Handle<Object> value(raw_value, isolate);
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
bool hasJavaScriptAccessors = result_type == CALLBACKS &&
result_callback_obj->IsFixedArray();
Handle<FixedArray> details =
- Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
+ isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
details->set(0, *value);
details->set(1, property_details);
if (hasJavaScriptAccessors) {
details->set(2,
- caught_exception ? Heap::true_value()
- : Heap::false_value());
+ caught_exception ? isolate->heap()->true_value()
+ : isolate->heap()->false_value());
details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
}
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DebugGetProperty(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugGetProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
LookupResult result;
obj->Lookup(*name, &result);
if (result.IsProperty()) {
- return DebugLookupResultValue(*obj, *name, &result, NULL);
+ return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Return the property type calculated from the property details.
// args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
+static MaybeObject* Runtime_DebugPropertyTypeFromDetails(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(Smi, details, args[0]);
PropertyType type = PropertyDetails(details).type();
// Return the property attribute calculated from the property details.
// args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyAttributesFromDetails(Arguments args) {
+static MaybeObject* Runtime_DebugPropertyAttributesFromDetails(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(Smi, details, args[0]);
PropertyAttributes attributes = PropertyDetails(details).attributes();
// Return the property insertion index calculated from the property details.
// args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyIndexFromDetails(Arguments args) {
+static MaybeObject* Runtime_DebugPropertyIndexFromDetails(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(Smi, details, args[0]);
int index = PropertyDetails(details).index();
// Return property value from named interceptor.
// args[0]: object
// args[1]: property name
-static MaybeObject* Runtime_DebugNamedInterceptorPropertyValue(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugNamedInterceptorPropertyValue(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasNamedInterceptor());
// args[0]: object
// args[1]: index
static MaybeObject* Runtime_DebugIndexedInterceptorElementValue(
- Arguments args) {
- HandleScope scope;
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
}
-static MaybeObject* Runtime_CheckExecutionState(Arguments args) {
+static MaybeObject* Runtime_CheckExecutionState(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() >= 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
// Check that the break id is valid.
- if (Debug::break_id() == 0 || break_id != Debug::break_id()) {
- return Top::Throw(Heap::illegal_execution_state_symbol());
+ if (isolate->debug()->break_id() == 0 ||
+ break_id != isolate->debug()->break_id()) {
+ return isolate->Throw(
+ isolate->heap()->illegal_execution_state_symbol());
}
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
-static MaybeObject* Runtime_GetFrameCount(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetFrameCount(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Check arguments.
Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_result = Runtime_CheckExecutionState(args, isolate);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Count all frames which are relevant to debugging stack trace.
int n = 0;
- StackFrame::Id id = Debug::break_frame_id();
+ StackFrame::Id id = isolate->debug()->break_frame_id();
if (id == StackFrame::NO_ID) {
// If there is no JavaScript stack frame count is 0.
return Smi::FromInt(0);
// Arguments name, value
// Locals name, value
// Return value if any
-static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetFrameDetails(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ Heap* heap = isolate->heap();
// Find the relevant frame with the requested index.
- StackFrame::Id id = Debug::break_frame_id();
+ StackFrame::Id id = isolate->debug()->break_frame_id();
if (id == StackFrame::NO_ID) {
// If there are no JavaScript stack frames return undefined.
- return Heap::undefined_value();
+ return heap->undefined_value();
}
int count = 0;
JavaScriptFrameIterator it(id);
if (count == index) break;
count++;
}
- if (it.done()) return Heap::undefined_value();
+ if (it.done()) return heap->undefined_value();
bool is_optimized_frame =
- it.frame()->code()->kind() == Code::OPTIMIZED_FUNCTION;
+ it.frame()->LookupCode(isolate)->kind() == Code::OPTIMIZED_FUNCTION;
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = Top::save_context();
+ SaveContext* save = isolate->save_context();
while (save != NULL && !save->below(it.frame())) {
save = save->prev();
}
ASSERT(save != NULL);
// Get the frame id.
- Handle<Object> frame_id(WrapFrameId(it.frame()->id()));
+ Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
// Find source position.
- int position = it.frame()->code()->SourcePosition(it.frame()->pc());
+ int position =
+ it.frame()->LookupCode(isolate)->SourcePosition(it.frame()->pc());
// Check for constructor frame.
bool constructor = it.frame()->IsConstructor();
// TODO(1240907): Hide compiler-introduced stack variables
// (e.g. .result)? For users of the debugger, they will probably be
// confusing.
- Handle<FixedArray> locals = Factory::NewFixedArray(info.NumberOfLocals() * 2);
+ Handle<FixedArray> locals =
+ isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
// Fill in the names of the locals.
for (int i = 0; i < info.NumberOfLocals(); i++) {
//
// TODO(1140): We should be able to get the correct values
// for locals in optimized frames.
- locals->set(i * 2 + 1, Heap::undefined_value());
+ locals->set(i * 2 + 1, isolate->heap()->undefined_value());
} else if (i < info.number_of_stack_slots()) {
// Get the value from the stack.
locals->set(i * 2 + 1, it.frame()->GetExpression(i));
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
if (!is_optimized_frame && index == 0) {
- at_return = Debug::IsBreakAtReturn(it.frame());
+ at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
// If positioned just before return find the value to be returned and add it
// to the frame information.
- Handle<Object> return_value = Factory::undefined_value();
+ Handle<Object> return_value = isolate->factory()->undefined_value();
if (at_return) {
StackFrameIterator it2;
Address internal_frame_sp = NULL;
// entering the debug break exit frame.
if (internal_frame_sp != NULL) {
return_value =
- Handle<Object>(Memory::Object_at(internal_frame_sp));
+ Handle<Object>(Memory::Object_at(internal_frame_sp),
+ isolate);
break;
}
}
int details_size = kFrameDetailsFirstDynamicIndex +
2 * (argument_count + info.NumberOfLocals()) +
(at_return ? 1 : 0);
- Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+ Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
// Add the frame id.
details->set(kFrameDetailsFrameIdIndex, *frame_id);
if (position != RelocInfo::kNoPosition) {
details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
} else {
- details->set(kFrameDetailsSourcePositionIndex, Heap::undefined_value());
+ details->set(kFrameDetailsSourcePositionIndex, heap->undefined_value());
}
// Add the constructor information.
- details->set(kFrameDetailsConstructCallIndex, Heap::ToBoolean(constructor));
+ details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(constructor));
// Add the at return information.
- details->set(kFrameDetailsAtReturnIndex, Heap::ToBoolean(at_return));
+ details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
// Add information on whether this frame is invoked in the debugger context.
details->set(kFrameDetailsDebuggerFrameIndex,
- Heap::ToBoolean(*save->context() == *Debug::debug_context()));
+ heap->ToBoolean(*save->context() ==
+ *isolate->debug()->debug_context()));
// Fill the dynamic part.
int details_index = kFrameDetailsFirstDynamicIndex;
if (i < info.number_of_parameters()) {
details->set(details_index++, *info.parameter_name(i));
} else {
- details->set(details_index++, Heap::undefined_value());
+ details->set(details_index++, heap->undefined_value());
}
// Parameter value. If we are inspecting an optimized frame, use
(i < it.frame()->ComputeParametersCount())) {
details->set(details_index++, it.frame()->GetParameter(i));
} else {
- details->set(details_index++, Heap::undefined_value());
+ details->set(details_index++, heap->undefined_value());
}
}
// Add the receiver (same as in function frame).
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
- Handle<Object> receiver(it.frame()->receiver());
+ Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject()) {
// If the receiver is NOT a JSObject we have hit an optimization
// where a value object is not converted into a wrapped JS objects.
it.Advance();
Handle<Context> calling_frames_global_context(
Context::cast(Context::cast(it.frame()->context())->global_context()));
- receiver = Factory::ToObject(receiver, calling_frames_global_context);
+ receiver =
+ isolate->factory()->ToObject(receiver, calling_frames_global_context);
}
details->set(kFrameDetailsReceiverIndex, *receiver);
ASSERT_EQ(details_size, details_index);
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
// Copy all the context locals into an object used to materialize a scope.
static bool CopyContextLocalsToScopeObject(
+ Isolate* isolate,
Handle<SerializedScopeInfo> serialized_scope_info,
ScopeInfo<>& scope_info,
Handle<Context> context,
*scope_info.context_slot_name(i), NULL);
// Don't include the arguments shadow (.arguments) context variable.
- if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
+ if (*scope_info.context_slot_name(i) !=
+ isolate->heap()->arguments_shadow_symbol()) {
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(scope_object,
scope_info.context_slot_name(i),
- Handle<Object>(context->get(context_index)),
+ Handle<Object>(context->get(context_index), isolate),
NONE,
kNonStrictMode),
false);
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
+static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
+ JavaScriptFrame* frame) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
- Handle<JSObject> local_scope = Factory::NewJSObject(Top::object_function());
+ Handle<JSObject> local_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
// First fill all parameters.
for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(local_scope,
scope_info.parameter_name(i),
- Handle<Object>(frame->GetParameter(i)),
+ Handle<Object>(frame->GetParameter(i), isolate),
NONE,
kNonStrictMode),
Handle<JSObject>());
// Second fill all stack locals.
for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(local_scope,
scope_info.stack_slot_name(i),
- Handle<Object>(frame->GetExpression(i)),
+ Handle<Object>(frame->GetExpression(i), isolate),
NONE,
kNonStrictMode),
Handle<JSObject>());
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
- if (!CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ if (!CopyContextLocalsToScopeObject(isolate,
+ serialized_scope_info, scope_info,
function_context, local_scope)) {
return Handle<JSObject>();
}
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(local_scope,
key,
GetProperty(ext, key),
// Create a plain JSObject which materializes the closure content for the
// context.
-static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
+static Handle<JSObject> MaterializeClosure(Isolate* isolate,
+ Handle<Context> context) {
ASSERT(context->is_function_context());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
// Allocate and initialize a JSObject with all the content of theis function
// closure.
- Handle<JSObject> closure_scope = Factory::NewJSObject(Top::object_function());
+ Handle<JSObject> closure_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
// Check whether the arguments shadow object exists.
int arguments_shadow_index =
- shared->scope_info()->ContextSlotIndex(Heap::arguments_shadow_symbol(),
- NULL);
+ shared->scope_info()->ContextSlotIndex(
+ isolate->heap()->arguments_shadow_symbol(), NULL);
if (arguments_shadow_index >= 0) {
// In this case all the arguments are available in the arguments shadow
// object.
// We don't expect exception-throwing getters on the arguments shadow.
Object* element = arguments_shadow->GetElement(i)->ToObjectUnchecked();
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(closure_scope,
scope_info.parameter_name(i),
- Handle<Object>(element),
+ Handle<Object>(element, isolate),
NONE,
kNonStrictMode),
Handle<JSObject>());
}
// Fill all context locals to the context extension.
- if (!CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ if (!CopyContextLocalsToScopeObject(isolate,
+ serialized_scope_info, scope_info,
context, closure_scope)) {
return Handle<JSObject>();
}
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(closure_scope,
key,
GetProperty(ext, key),
ScopeTypeCatch
};
- explicit ScopeIterator(JavaScriptFrame* frame)
- : frame_(frame),
+ ScopeIterator(Isolate* isolate, JavaScriptFrame* frame)
+ : isolate_(isolate),
+ frame_(frame),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
local_done_(false),
// Checking for the existence of .result seems fragile, but the scope info
// saved with the code object does not otherwise have that information.
int index = function_->shared()->scope_info()->
- StackSlotIndex(Heap::result_symbol());
+ StackSlotIndex(isolate_->heap()->result_symbol());
at_local_ = index < 0;
} else if (context_->is_function_context()) {
at_local_ = true;
break;
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
- return MaterializeLocalScope(frame_);
+ return MaterializeLocalScope(isolate_, frame_);
break;
case ScopeIterator::ScopeTypeWith:
case ScopeIterator::ScopeTypeCatch:
break;
case ScopeIterator::ScopeTypeClosure:
// Materialize the content of the closure scope into a JSObject.
- return MaterializeClosure(CurrentContext());
+ return MaterializeClosure(isolate_, CurrentContext());
break;
}
UNREACHABLE();
#endif
private:
+ Isolate* isolate_;
JavaScriptFrame* frame_;
Handle<JSFunction> function_;
Handle<Context> context_;
};
-static MaybeObject* Runtime_GetScopeCount(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetScopeCount(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
n++;
}
// The array returned contains the following information:
// 0: Scope type
// 1: Scope object
-static MaybeObject* Runtime_GetScopeDetails(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetScopeDetails(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
// Find the requested scope.
int n = 0;
- ScopeIterator it(frame);
+ ScopeIterator it(isolate, frame);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
if (it.Done()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
- Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+ Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
Handle<JSObject> scope_object = it.ScopeObject();
- RETURN_IF_EMPTY_HANDLE(scope_object);
+ RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
details->set(kScopeDetailsObjectIndex, *scope_object);
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
-static MaybeObject* Runtime_DebugPrintScopes(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugPrintScopes(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
#ifdef DEBUG
// Print the scopes for the top frame.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
it.DebugPrint();
}
#endif
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_GetThreadCount(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetThreadCount(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Check arguments.
Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_result = Runtime_CheckExecutionState(args, isolate);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Count all archived V8 threads.
int n = 0;
- for (ThreadState* thread = ThreadState::FirstInUse();
+ for (ThreadState* thread =
+ isolate->thread_manager()->FirstThreadStateInUse();
thread != NULL;
thread = thread->Next()) {
n++;
// The array returned contains the following information:
// 0: Is current thread?
// 1: Thread id
-static MaybeObject* Runtime_GetThreadDetails(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetThreadDetails(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
// Allocate array for result.
- Handle<FixedArray> details = Factory::NewFixedArray(kThreadDetailsSize);
+ Handle<FixedArray> details =
+ isolate->factory()->NewFixedArray(kThreadDetailsSize);
// Thread index 0 is current thread.
if (index == 0) {
// Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex, Heap::true_value());
+ details->set(kThreadDetailsCurrentThreadIndex,
+ isolate->heap()->true_value());
details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(ThreadManager::CurrentId()));
+ Smi::FromInt(
+ isolate->thread_manager()->CurrentId()));
} else {
// Find the thread with the requested index.
int n = 1;
- ThreadState* thread = ThreadState::FirstInUse();
+ ThreadState* thread =
+ isolate->thread_manager()->FirstThreadStateInUse();
while (index != n && thread != NULL) {
thread = thread->Next();
n++;
}
if (thread == NULL) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex, Heap::false_value());
+ details->set(kThreadDetailsCurrentThreadIndex,
+ isolate->heap()->false_value());
details->set(kThreadDetailsThreadIdIndex, Smi::FromInt(thread->id()));
}
// Convert to JS array and return.
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
// Sets the disable break state
// args[0]: disable break state
-static MaybeObject* Runtime_SetDisableBreak(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_SetDisableBreak(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
- Debug::set_disable_break(disable_break);
- return Heap::undefined_value();
+ isolate->debug()->set_disable_break(disable_break);
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_GetBreakLocations(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetBreakLocations(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
- if (break_locations->IsUndefined()) return Heap::undefined_value();
+ if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
// Return array as JS array
- return *Factory::NewJSArrayWithElements(
+ return *isolate->factory()->NewJSArrayWithElements(
Handle<FixedArray>::cast(break_locations));
}
// args[0]: function
// args[1]: number: break source position (within the function source)
// args[2]: number: break point object
-static MaybeObject* Runtime_SetFunctionBreakPoint(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_SetFunctionBreakPoint(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
Handle<SharedFunctionInfo> shared(fun->shared());
Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point.
- Debug::SetBreakPoint(shared, break_point_object_arg, &source_position);
+ isolate->debug()->SetBreakPoint(shared, break_point_object_arg,
+ &source_position);
return Smi::FromInt(source_position);
}
-Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
+Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
+ Handle<Script> script,
int position) {
// Iterate the heap looking for SharedFunctionInfo generated from the
// script. The inner most SharedFunctionInfo containing the source position
}
if (target.is_null()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// If the candidate found is compiled we are done. NOTE: when lazy
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
// args[2]: number: break point object
-static MaybeObject* Runtime_SetScriptBreakPoint(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_SetScriptBreakPoint(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
Handle<Script> script(Script::cast(wrapper->value()));
Object* result = Runtime::FindSharedFunctionInfoInScript(
- script, source_position);
+ isolate, script, source_position);
if (!result->IsUndefined()) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
// Find position within function. The script position might be before the
} else {
position = source_position - shared->start_position();
}
- Debug::SetBreakPoint(shared, break_point_object_arg, &position);
+ isolate->debug()->SetBreakPoint(shared, break_point_object_arg, &position);
position += shared->start_position();
return Smi::FromInt(position);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Clear a break point
// args[0]: number: break point object
-static MaybeObject* Runtime_ClearBreakPoint(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ClearBreakPoint(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> break_point_object_arg = args.at<Object>(0);
// Clear break point.
- Debug::ClearBreakPoint(break_point_object_arg);
+ isolate->debug()->ClearBreakPoint(break_point_object_arg);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Change the state of break on exceptions.
// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
// args[1]: Boolean indicating on/off.
-static MaybeObject* Runtime_ChangeBreakOnException(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ChangeBreakOnException(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsNumber());
CONVERT_BOOLEAN_CHECKED(enable, args[1]);
ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
// Update break point state.
- Debug::ChangeBreakOnException(type, enable);
- return Heap::undefined_value();
+ isolate->debug()->ChangeBreakOnException(type, enable);
+ return isolate->heap()->undefined_value();
}
// Returns the state of break on exceptions
// args[0]: boolean indicating uncaught exceptions
-static MaybeObject* Runtime_IsBreakOnException(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_IsBreakOnException(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsNumber());
ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- bool result = Debug::IsBreakOnException(type);
+ bool result = isolate->debug()->IsBreakOnException(type);
return Smi::FromInt(result);
}
// args[1]: step action from the enumeration StepAction
// args[2]: number of times to perform the step, for step out it is the number
// of frames to step down.
-static MaybeObject* Runtime_PrepareStep(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_PrepareStep(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
if (!maybe_check->ToObject(&check)) return maybe_check;
}
if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
// Get the step action and check validity.
step_action != StepOut &&
step_action != StepInMin &&
step_action != StepMin) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
// Get the number of steps.
int step_count = NumberToInt32(args[2]);
if (step_count < 1) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
// Clear all current stepping setup.
- Debug::ClearStepping();
+ isolate->debug()->ClearStepping();
// Prepare step.
- Debug::PrepareStep(static_cast<StepAction>(step_action), step_count);
- return Heap::undefined_value();
+ isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
+ step_count);
+ return isolate->heap()->undefined_value();
}
// Clear all stepping set by PrepareStep.
-static MaybeObject* Runtime_ClearStepping(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_ClearStepping(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
- Debug::ClearStepping();
- return Heap::undefined_value();
+ isolate->debug()->ClearStepping();
+ return isolate->heap()->undefined_value();
}
Handle<Context> previous(context_chain->previous());
Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
Handle<Context> context = CopyWithContextChain(function_context, previous);
- return Factory::NewWithContext(context,
- extension,
- context_chain->IsCatchContext());
+ return context->GetIsolate()->factory()->NewWithContext(
+ context, extension, context_chain->IsCatchContext());
}
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
-static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
+static Handle<Object> GetArgumentsObject(Isolate* isolate,
+ JavaScriptFrame* frame,
Handle<JSFunction> function,
Handle<SerializedScopeInfo> scope_info,
const ScopeInfo<>* sinfo,
// does not support eval) then create an 'arguments' object.
int index;
if (sinfo->number_of_stack_slots() > 0) {
- index = scope_info->StackSlotIndex(Heap::arguments_symbol());
+ index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index != -1) {
- return Handle<Object>(frame->GetExpression(index));
+ return Handle<Object>(frame->GetExpression(index), isolate);
}
}
if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- index = scope_info->ContextSlotIndex(Heap::arguments_symbol(), NULL);
+ index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
+ NULL);
if (index != -1) {
- return Handle<Object>(function_context->get(index));
+ return Handle<Object>(function_context->get(index), isolate);
}
}
const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
- Handle<FixedArray> array = Factory::NewFixedArray(length);
+ Handle<JSObject> arguments =
+ isolate->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
AssertNoAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
}
+static const char kSourceStr[] =
+ "(function(arguments,__source__){return eval(__source__);})";
+
+
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. This is accomplished by creating a new context which in its
// extension part has all the parameters and locals of the function on the
// stack frame presenting the same view of the values of parameters and
// local variables as if the piece of JavaScript was evaluated at the point
// where the function on the stack frame is currently stopped.
-static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugEvaluate(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 5);
Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args,
+ isolate);
if (!maybe_check_result->ToObject(&check_result)) {
return maybe_check_result;
}
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = Top::save_context();
+ SaveContext* save = isolate->save_context();
while (save != NULL && !save->below(frame)) {
save = save->prev();
}
ASSERT(save != NULL);
- SaveContext savex;
- Top::set_context(*(save->context()));
+ SaveContext savex(isolate);
+ isolate->set_context(*(save->context()));
// Create the (empty) function replacing the function on the stack frame for
// the purpose of evaluating in the context created below. It is important
// in Context::Lookup, where context slots for parameters and local variables
// are looked at before the extension object.
Handle<JSFunction> go_between =
- Factory::NewFunction(Factory::empty_string(), Factory::undefined_value());
+ isolate->factory()->NewFunction(isolate->factory()->empty_string(),
+ isolate->factory()->undefined_value());
go_between->set_context(function->context());
#ifdef DEBUG
ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
#endif
// Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScope(frame);
- RETURN_IF_EMPTY_HANDLE(local_scope);
+ Handle<JSObject> local_scope = MaterializeLocalScope(isolate, frame);
+ RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
// Allocate a new context for the debug evaluation and set the extension
// object build.
Handle<Context> context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+ isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
+ go_between);
context->set_extension(*local_scope);
// Copy any with contexts present and chain them in front of this context.
Handle<Context> frame_context(Context::cast(frame->context()));
context = CopyWithContextChain(frame_context, context);
if (additional_context->IsJSObject()) {
- context = Factory::NewWithContext(context,
+ context = isolate->factory()->NewWithContext(context,
Handle<JSObject>::cast(additional_context), false);
}
// 'arguments'. This it to have access to what would have been 'arguments' in
// the function being debugged.
// function(arguments,__source__) {return eval(__source__);}
- static const char* source_str =
- "(function(arguments,__source__){return eval(__source__);})";
- static const int source_str_length = StrLength(source_str);
+
Handle<String> function_source =
- Factory::NewStringFromAscii(Vector<const char>(source_str,
- source_str_length));
+ isolate->factory()->NewStringFromAscii(
+ Vector<const char>(kSourceStr, sizeof(kSourceStr) - 1));
// Currently, the eval code will be executed in non-strict mode,
// even in the strict code context.
kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
- Factory::NewFunctionFromSharedFunctionInfo(shared, context);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
// Invoke the result of the compilation to get the evaluation function.
bool has_pending_exception;
- Handle<Object> receiver(frame->receiver());
+ Handle<Object> receiver(frame->receiver(), isolate);
Handle<Object> evaluation_function =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- Handle<Object> arguments = GetArgumentsObject(frame, function, scope_info,
+ Handle<Object> arguments = GetArgumentsObject(isolate, frame,
+ function, scope_info,
&sinfo, function_context);
// Invoke the evaluation function and return the result.
}
-static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugEvaluateGlobal(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 4);
Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args,
+ isolate);
if (!maybe_check_result->ToObject(&check_result)) {
return maybe_check_result;
}
DisableBreak disable_break_save(disable_break);
// Enter the top context from before the debugger was invoked.
- SaveContext save;
+ SaveContext save(isolate);
SaveContext* top = &save;
- while (top != NULL && *top->context() == *Debug::debug_context()) {
+ while (top != NULL && *top->context() == *isolate->debug()->debug_context()) {
top = top->prev();
}
if (top != NULL) {
- Top::set_context(*top->context());
+ isolate->set_context(*top->context());
}
// Get the global context now set to the top context from before the
// debugger was invoked.
- Handle<Context> context = Top::global_context();
+ Handle<Context> context = isolate->global_context();
bool is_global = true;
if (additional_context->IsJSObject()) {
// Create a function context first, than put 'with' context on top of it.
- Handle<JSFunction> go_between = Factory::NewFunction(
- Factory::empty_string(), Factory::undefined_value());
+ Handle<JSFunction> go_between = isolate->factory()->NewFunction(
+ isolate->factory()->empty_string(),
+ isolate->factory()->undefined_value());
go_between->set_context(*context);
context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+ isolate->factory()->NewFunctionContext(
+ Context::MIN_CONTEXT_SLOTS, go_between);
context->set_extension(JSObject::cast(*additional_context));
is_global = false;
}
Compiler::CompileEval(source, context, is_global, kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
- Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
- context));
+ Handle<JSFunction>(
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context));
// Invoke the result of the compilation to get the evaluation function.
bool has_pending_exception;
- Handle<Object> receiver = Top::global();
+ Handle<Object> receiver = isolate->global();
Handle<Object> result =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
}
-static MaybeObject* Runtime_DebugGetLoadedScripts(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_DebugGetLoadedScripts(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
// Fill the script objects.
- Handle<FixedArray> instances = Debug::GetLoadedScripts();
+ Handle<FixedArray> instances = isolate->debug()->GetLoadedScripts();
// Convert the script objects to proper JS objects.
for (int i = 0; i < instances->length(); i++) {
}
// Return result as a JS array.
- Handle<JSObject> result = Factory::NewJSObject(Top::array_function());
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->array_function());
Handle<JSArray>::cast(result)->SetContent(*instances);
return *result;
}
// args[0]: the object to find references to
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugReferencedBy(Arguments args) {
+static MaybeObject* Runtime_DebugReferencedBy(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- Heap::CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSObject, target, args[0]);
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
+ isolate->context()->global_context()->arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateFixedArray(count);
+ { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
// Return result as JS array.
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(
- Top::context()->global_context()->array_function());
+ { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ isolate->context()->global_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray::cast(result)->SetContent(instances);
// Scan the heap for objects constructed by a specific function.
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugConstructedBy(Arguments args) {
+static MaybeObject* Runtime_DebugConstructedBy(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- Heap::CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateFixedArray(count);
+ { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
// Return result as JS array.
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(
- Top::context()->global_context()->array_function());
+ { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ isolate->context()->global_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray::cast(result)->SetContent(instances);
// Find the effective prototype object as returned by __proto__.
// args[0]: the object to find the prototype for.
-static MaybeObject* Runtime_DebugGetPrototype(Arguments args) {
+static MaybeObject* Runtime_DebugGetPrototype(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
}
-static MaybeObject* Runtime_SystemBreak(Arguments args) {
+static MaybeObject* Runtime_SystemBreak(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
CPU::DebugBreak();
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DebugDisassembleFunction(Arguments args) {
+static MaybeObject* Runtime_DebugDisassembleFunction(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef DEBUG
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
}
func->code()->PrintLn();
#endif // DEBUG
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DebugDisassembleConstructor(Arguments args) {
+static MaybeObject* Runtime_DebugDisassembleConstructor(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef DEBUG
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
}
shared->construct_stub()->PrintLn();
#endif // DEBUG
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionGetInferredName(Arguments args) {
+static MaybeObject* Runtime_FunctionGetInferredName(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 1);
static int FindSharedFunctionInfosForScript(Script* script,
- FixedArray* buffer) {
+ FixedArray* buffer) {
AssertNoAllocation no_allocations;
int counter = 0;
// to this script. Returns JSArray of SharedFunctionInfo wrapped
// in OpaqueReferences.
static MaybeObject* Runtime_LiveEditFindSharedFunctionInfosForScript(
- Arguments args) {
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, script_value, args[0]);
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
const int kBufferSize = 32;
Handle<FixedArray> array;
- array = Factory::NewFixedArray(kBufferSize);
+ array = isolate->factory()->NewFixedArray(kBufferSize);
int number = FindSharedFunctionInfosForScript(*script, *array);
if (number > kBufferSize) {
- array = Factory::NewFixedArray(number);
+ array = isolate->factory()->NewFixedArray(number);
FindSharedFunctionInfosForScript(*script, *array);
}
- Handle<JSArray> result = Factory::NewJSArrayWithElements(array);
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
result->set_length(Smi::FromInt(number));
LiveEdit::WrapSharedFunctionInfos(result);
// Returns a JSArray of compilation infos. The array is ordered so that
// each function with all its descendant is always stored in a continues range
// with the function itself going first. The root function is a script function.
-static MaybeObject* Runtime_LiveEditGatherCompileInfo(Arguments args) {
+static MaybeObject* Runtime_LiveEditGatherCompileInfo(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, script, args[0]);
CONVERT_ARG_CHECKED(String, source, 1);
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
- if (Top::has_pending_exception()) {
+ if (isolate->has_pending_exception()) {
return Failure::Exception();
}
// Changes the source of the script to a new_source.
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
-static MaybeObject* Runtime_LiveEditReplaceScript(Arguments args) {
+static MaybeObject* Runtime_LiveEditReplaceScript(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 3);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, original_script_value, args[0]);
CONVERT_ARG_CHECKED(String, new_source, 1);
- Handle<Object> old_script_name(args[2]);
+ Handle<Object> old_script_name(args[2], isolate);
CONVERT_CHECKED(Script, original_script_pointer,
original_script_value->value());
Handle<Script> script_handle(Script::cast(old_script));
return *(GetScriptWrapper(script_handle));
} else {
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
}
-static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(Arguments args) {
+static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 1);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
return LiveEdit::FunctionSourceUpdated(shared_info);
}
// Replaces code of SharedFunctionInfo with a new one.
-static MaybeObject* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
+static MaybeObject* Runtime_LiveEditReplaceFunctionCode(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
}
// Connects SharedFunctionInfo to another script.
-static MaybeObject* Runtime_LiveEditFunctionSetScript(Arguments args) {
+static MaybeObject* Runtime_LiveEditFunctionSetScript(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
- Handle<Object> function_object(args[0]);
- Handle<Object> script_object(args[1]);
+ HandleScope scope(isolate);
+ Handle<Object> function_object(args[0], isolate);
+ Handle<Object> script_object(args[1], isolate);
if (function_object->IsJSValue()) {
Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
if (script_object->IsJSValue()) {
CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
- script_object = Handle<Object>(script);
+ script_object = Handle<Object>(script, isolate);
}
LiveEdit::SetFunctionScript(function_wrapper, script_object);
// and we check it in this function.
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// In a code of a parent function replaces original function as embedded object
// with a substitution one.
-static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(Arguments args) {
+static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 3);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
subst_wrapper);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// array of groups of 3 numbers:
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
-static MaybeObject* Runtime_LiveEditPatchFunctionPositions(Arguments args) {
+static MaybeObject* Runtime_LiveEditPatchFunctionPositions(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
// checks that none of them have activations on stacks (of any thread).
// Returns array of the same length with corresponding results of
// LiveEdit::FunctionPatchabilityStatus type.
-static MaybeObject* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
+static MaybeObject* Runtime_LiveEditCheckAndDropActivations(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
-static MaybeObject* Runtime_LiveEditCompareStrings(Arguments args) {
+static MaybeObject* Runtime_LiveEditCompareStrings(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(String, s1, 0);
CONVERT_ARG_CHECKED(String, s2, 1);
}
-
// A testing entry. Returns statement position which is the closest to
// source_position.
-static MaybeObject* Runtime_GetFunctionCodePositionFromSource(Arguments args) {
+static MaybeObject* Runtime_GetFunctionCodePositionFromSource(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- Handle<Code> code(function->code());
+ Handle<Code> code(function->code(), isolate);
if (code->kind() != Code::FUNCTION &&
code->kind() != Code::OPTIMIZED_FUNCTION) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
// Calls specified function with or without entering the debugger.
// This is used in unit tests to run code as if debugger is entered or simply
// to have a stack with C++ frame in the middle.
-static MaybeObject* Runtime_ExecuteInDebugContext(Arguments args) {
+static MaybeObject* Runtime_ExecuteInDebugContext(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]);
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, Top::global(), 0, NULL,
+ result = Execution::Call(function, isolate->global(), 0, NULL,
&pending_exception);
} else {
EnterDebugger enter_debugger;
- result = Execution::Call(function, Top::global(), 0, NULL,
+ result = Execution::Call(function, isolate->global(), 0, NULL,
&pending_exception);
}
}
// Sets a v8 flag.
-static MaybeObject* Runtime_SetFlags(Arguments args) {
+static MaybeObject* Runtime_SetFlags(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(String, arg, args[0]);
SmartPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
FlagList::SetFlagsFromString(*flags, StrLength(*flags));
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Performs a GC.
// Presently, it only does a full GC.
-static MaybeObject* Runtime_CollectGarbage(Arguments args) {
- Heap::CollectAllGarbage(true);
- return Heap::undefined_value();
+static MaybeObject* Runtime_CollectGarbage(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ isolate->heap()->CollectAllGarbage(true);
+ return isolate->heap()->undefined_value();
}
// Gets the current heap usage.
-static MaybeObject* Runtime_GetHeapUsage(Arguments args) {
- int usage = static_cast<int>(Heap::SizeOfObjects());
+static MaybeObject* Runtime_GetHeapUsage(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
- return *Factory::NewNumberFromInt(usage);
+ return *isolate->factory()->NewNumberFromInt(usage);
}
return Smi::FromInt(usage);
}
// Captures a live object list from the present heap.
-static MaybeObject* Runtime_HasLOLEnabled(Arguments args) {
+static MaybeObject* Runtime_HasLOLEnabled(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
- return Heap::true_value();
+ return isolate->heap()->true_value();
#else
- return Heap::false_value();
+ return isolate->heap()->false_value();
#endif
}
// Captures a live object list from the present heap.
-static MaybeObject* Runtime_CaptureLOL(Arguments args) {
+static MaybeObject* Runtime_CaptureLOL(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
return LiveObjectList::Capture();
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Deletes the specified live object list.
-static MaybeObject* Runtime_DeleteLOL(Arguments args) {
+static MaybeObject* Runtime_DeleteLOL(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
CONVERT_SMI_CHECKED(id, args[0]);
bool success = LiveObjectList::Delete(id);
- return success ? Heap::true_value() : Heap::false_value();
+ return success ? isolate->heap()->true_value() :
+ isolate->heap()->false_value();
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// specified by id1 and id2.
// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
// dumped.
-static MaybeObject* Runtime_DumpLOL(Arguments args) {
+static MaybeObject* Runtime_DumpLOL(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_SMI_CHECKED(id1, args[0]);
EnterDebugger enter_debugger;
return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the specified object as requested by the debugger.
// This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObj(Arguments args) {
+static MaybeObject* Runtime_GetLOLObj(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
CONVERT_SMI_CHECKED(obj_id, args[0]);
Object* result = LiveObjectList::GetObj(obj_id);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the obj id for the specified address if valid.
// This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObjId(Arguments args) {
+static MaybeObject* Runtime_GetLOLObjId(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_ARG_CHECKED(String, address, 0);
Object* result = LiveObjectList::GetObjId(address);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the retainers that references the specified object alive.
-static MaybeObject* Runtime_GetLOLObjRetainers(Arguments args) {
+static MaybeObject* Runtime_GetLOLObjRetainers(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_SMI_CHECKED(obj_id, args[0]);
limit,
filter_obj);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the reference path between 2 objects.
-static MaybeObject* Runtime_GetLOLPath(Arguments args) {
+static MaybeObject* Runtime_GetLOLPath(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_SMI_CHECKED(obj_id1, args[0]);
LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Generates the response to a debugger request for a list of all
// previously captured live object lists.
-static MaybeObject* Runtime_InfoLOL(Arguments args) {
+static MaybeObject* Runtime_InfoLOL(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
CONVERT_SMI_CHECKED(start, args[0]);
CONVERT_SMI_CHECKED(count, args[1]);
return LiveObjectList::Info(start, count);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets a dump of the specified object as requested by the debugger.
// This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_PrintLOLObj(Arguments args) {
+static MaybeObject* Runtime_PrintLOLObj(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_SMI_CHECKED(obj_id, args[0]);
Object* result = LiveObjectList::PrintObj(obj_id);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Resets and releases all previously captured live object lists.
-static MaybeObject* Runtime_ResetLOL(Arguments args) {
+static MaybeObject* Runtime_ResetLOL(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
LiveObjectList::Reset();
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// specified by id1 and id2.
// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
// summarized.
-static MaybeObject* Runtime_SummarizeLOL(Arguments args) {
+static MaybeObject* Runtime_SummarizeLOL(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_SMI_CHECKED(id1, args[0]);
EnterDebugger enter_debugger;
return LiveObjectList::Summarize(id1, id2, filter_obj);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
-static MaybeObject* Runtime_ProfilerResume(Arguments args) {
+static MaybeObject* Runtime_ProfilerResume(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
CONVERT_CHECKED(Smi, smi_tag, args[1]);
v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_ProfilerPause(Arguments args) {
+static MaybeObject* Runtime_ProfilerPause(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
CONVERT_CHECKED(Smi, smi_tag, args[1]);
v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
// If no script with the requested script data is found return undefined.
- if (script.is_null()) return Factory::undefined_value();
+ if (script.is_null()) return FACTORY->undefined_value();
// Return the script found.
return GetScriptWrapper(script);
// Get the script object from script data. NOTE: Regarding performance
// see the NOTE for GetScriptFromScriptData.
// args[0]: script data for the script to find the source for
-static MaybeObject* Runtime_GetScript(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_GetScript(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
-static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
+static MaybeObject* Runtime_CollectStackTrace(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT_EQ(args.length(), 2);
Handle<Object> caller = args.at<Object>(0);
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
- HandleScope scope;
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
limit = Max(limit, 0); // Ensure that limit is not negative.
int initial_size = Min(limit, 10);
Handle<FixedArray> elements =
- Factory::NewFixedArrayWithHoles(initial_size * 4);
+ factory->NewFixedArrayWithHoles(initial_size * 4);
StackFrameIterator iter;
// If the caller parameter is a function we skip frames until we're
if (cursor + 4 > elements->length()) {
int new_capacity = JSObject::NewElementsCapacity(elements->length());
Handle<FixedArray> new_elements =
- Factory::NewFixedArrayWithHoles(new_capacity);
+ factory->NewFixedArrayWithHoles(new_capacity);
for (int i = 0; i < cursor; i++) {
new_elements->set(i, elements->get(i));
}
}
iter.Advance();
}
- Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
+ Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor));
return *result;
}
// Returns V8 version as a string.
-static MaybeObject* Runtime_GetV8Version(Arguments args) {
+static MaybeObject* Runtime_GetV8Version(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT_EQ(args.length(), 0);
NoHandleAllocation ha;
const char* version_string = v8::V8::GetVersion();
- return Heap::AllocateStringFromAscii(CStrVector(version_string), NOT_TENURED);
+ return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string),
+ NOT_TENURED);
}
-static MaybeObject* Runtime_Abort(Arguments args) {
+static MaybeObject* Runtime_Abort(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
Smi::cast(args[1])->value());
- Top::PrintStack();
+ isolate->PrintStack();
OS::Abort();
UNREACHABLE();
return NULL;
}
-static MaybeObject* Runtime_GetFromCache(Arguments args) {
+static MaybeObject* Runtime_GetFromCache(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
// This is only called from codegen, so checks might be more lax.
CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
Object* key = args[1];
}
// There is no value in the cache. Invoke the function and cache result.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSFunctionResultCache> cache_handle(cache);
Handle<Object> key_handle(key);
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(Top::global_context()->global());
+ Handle<Object> receiver(isolate->global_context()->global());
// This handle is nor shared, nor used later, so it's safe.
Object** argv[] = { key_handle.location() };
bool pending_exception = false;
}
-static MaybeObject* Runtime_NewMessageObject(Arguments args) {
- HandleScope scope;
+static MaybeObject* Runtime_NewMessageObject(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(String, type, 0);
CONVERT_ARG_CHECKED(JSArray, arguments, 1);
- return *Factory::NewJSMessageObject(type,
- arguments,
- 0,
- 0,
- Factory::undefined_value(),
- Factory::undefined_value(),
- Factory::undefined_value());
+ return *isolate->factory()->NewJSMessageObject(
+ type,
+ arguments,
+ 0,
+ 0,
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value());
}
-static MaybeObject* Runtime_MessageGetType(Arguments args) {
+static MaybeObject* Runtime_MessageGetType(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return message->type();
}
-static MaybeObject* Runtime_MessageGetArguments(Arguments args) {
+static MaybeObject* Runtime_MessageGetArguments(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return message->arguments();
}
-static MaybeObject* Runtime_MessageGetStartPosition(Arguments args) {
+static MaybeObject* Runtime_MessageGetStartPosition(
+ RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return Smi::FromInt(message->start_position());
}
-static MaybeObject* Runtime_MessageGetScript(Arguments args) {
+static MaybeObject* Runtime_MessageGetScript(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return message->script();
}
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
-static MaybeObject* Runtime_ListNatives(Arguments args) {
+static MaybeObject* Runtime_ListNatives(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 0);
HandleScope scope;
#define COUNT_ENTRY(Name, argc, ressize) + 1
INLINE_FUNCTION_LIST(COUNT_ENTRY)
INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
#undef COUNT_ENTRY
- Handle<FixedArray> elements = Factory::NewFixedArray(entry_count);
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
int index = 0;
bool inline_runtime_functions = false;
#define ADD_ENTRY(Name, argc, ressize) \
Handle<String> name; \
/* Inline runtime functions have an underscore in front of the name. */ \
if (inline_runtime_functions) { \
- name = Factory::NewStringFromAscii( \
+ name = factory->NewStringFromAscii( \
Vector<const char>("_" #Name, StrLength("_" #Name))); \
} else { \
- name = Factory::NewStringFromAscii( \
+ name = factory->NewStringFromAscii( \
Vector<const char>(#Name, StrLength(#Name))); \
} \
- Handle<FixedArray> pair_elements = Factory::NewFixedArray(2); \
+ Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
pair_elements->set(0, *name); \
pair_elements->set(1, Smi::FromInt(argc)); \
- Handle<JSArray> pair = Factory::NewJSArrayWithElements(pair_elements); \
+ Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements); \
elements->set(index++, *pair); \
}
inline_runtime_functions = false;
INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
ASSERT_EQ(index, entry_count);
- Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
+ Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
return *result;
}
#endif
-static MaybeObject* Runtime_Log(Arguments args) {
+static MaybeObject* Runtime_Log(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, format, args[0]);
CONVERT_CHECKED(JSArray, elms, args[1]);
Vector<const char> chars = format->ToAsciiVector();
- Logger::LogRuntime(chars, elms);
- return Heap::undefined_value();
+ LOGGER->LogRuntime(chars, elms);
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_IS_VAR(Arguments args) {
+static MaybeObject* Runtime_IS_VAR(RUNTIME_CALLING_CONVENTION) {
UNREACHABLE(); // implemented as macro in the parser
return NULL;
}
{ Runtime::kInline##name, Runtime::INLINE, \
"_" #name, NULL, number_of_args, result_size },
-Runtime::Function kIntrinsicFunctions[] = {
+static const Runtime::Function kIntrinsicFunctions[] = {
RUNTIME_FUNCTION_LIST(F)
INLINE_FUNCTION_LIST(I)
INLINE_RUNTIME_FUNCTION_LIST(I)
};
-MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Object* dictionary) {
+MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
+ Object* dictionary) {
+ ASSERT(Isolate::Current()->heap() == heap);
ASSERT(dictionary != NULL);
ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
Object* name_symbol;
{ MaybeObject* maybe_name_symbol =
- Heap::LookupAsciiSymbol(kIntrinsicFunctions[i].name);
+ heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name);
if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol;
}
StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
}
-Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
- int entry = Heap::intrinsic_function_names()->FindEntry(*name);
+const Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
+ Heap* heap = name->GetHeap();
+ int entry = heap->intrinsic_function_names()->FindEntry(*name);
if (entry != kNotFound) {
- Object* smi_index = Heap::intrinsic_function_names()->ValueAt(entry);
+ Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
int function_index = Smi::cast(smi_index)->value();
return &(kIntrinsicFunctions[function_index]);
}
}
-Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
+const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
return &(kIntrinsicFunctions[static_cast<int>(id)]);
}
if (failure->IsRetryAfterGC()) {
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
- Heap::CollectGarbage(failure->allocation_space());
+ HEAP->CollectGarbage(failure->allocation_space());
} else {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
- Counters::gc_last_resort_from_js.Increment();
- Heap::CollectAllGarbage(false);
+ COUNTERS->gc_last_resort_from_js()->Increment();
+ HEAP->CollectAllGarbage(false);
}
}
#ifndef V8_RUNTIME_H_
#define V8_RUNTIME_H_
+#include "zone.h"
+
namespace v8 {
namespace internal {
#define RUNTIME_FUNCTION_LIST_DEBUG(F)
#endif
-
// ----------------------------------------------------------------------------
// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
// either directly by id (via the code generator), or indirectly
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
+class RuntimeState {
+ public:
+
+ StaticResource<StringInputBuffer>* string_input_buffer() {
+ return &string_input_buffer_;
+ }
+ unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
+ return &to_upper_mapping_;
+ }
+ unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
+ return &to_lower_mapping_;
+ }
+ StringInputBuffer* string_input_buffer_compare_bufx() {
+ return &string_input_buffer_compare_bufx_;
+ }
+ StringInputBuffer* string_input_buffer_compare_bufy() {
+ return &string_input_buffer_compare_bufy_;
+ }
+ StringInputBuffer* string_locale_compare_buf1() {
+ return &string_locale_compare_buf1_;
+ }
+ StringInputBuffer* string_locale_compare_buf2() {
+ return &string_locale_compare_buf2_;
+ }
+ int* smi_lexicographic_compare_x_elms() {
+ return smi_lexicographic_compare_x_elms_;
+ }
+ int* smi_lexicographic_compare_y_elms() {
+ return smi_lexicographic_compare_y_elms_;
+ }
+
+ private:
+ RuntimeState() {}
+ // Non-reentrant string buffer for efficient general use in the runtime.
+ StaticResource<StringInputBuffer> string_input_buffer_;
+ unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
+ unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+ StringInputBuffer string_input_buffer_compare_bufx_;
+ StringInputBuffer string_input_buffer_compare_bufy_;
+ StringInputBuffer string_locale_compare_buf1_;
+ StringInputBuffer string_locale_compare_buf2_;
+ int smi_lexicographic_compare_x_elms_[10];
+ int smi_lexicographic_compare_y_elms_[10];
+
+ friend class Isolate;
+ friend class Runtime;
+
+ DISALLOW_COPY_AND_ASSIGN(RuntimeState);
+};
+
+
class Runtime : public AllStatic {
public:
enum FunctionId {
// retried with a new, empty StringDictionary, not with the same one.
// Alternatively, heap initialization can be completely restarted.
MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
- Object* dictionary);
+ Heap* heap, Object* dictionary);
// Get the intrinsic function with the given name, which must be a symbol.
- static Function* FunctionForSymbol(Handle<String> name);
+ static const Function* FunctionForSymbol(Handle<String> name);
// Get the intrinsic function with the given FunctionId.
- static Function* FunctionForId(FunctionId id);
+ static const Function* FunctionForId(FunctionId id);
// General-purpose helper functions for runtime system.
- static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
+ static int StringMatch(Isolate* isolate,
+ Handle<String> sub,
+ Handle<String> pat,
+ int index);
- static bool IsUpperCaseChar(uint16_t ch);
+ static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
// TODO(1240886): The following three methods are *not* handle safe,
// but accept handle arguments. This seems fragile.
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Handle<Object> object,
+ MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
uint32_t index);
MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
uint32_t index);
MUST_USE_RESULT static MaybeObject* SetObjectProperty(
+ Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
+ Isolate* isolate,
Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr);
MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
+ Isolate* isolate,
Handle<JSObject> object,
Handle<Object> key);
- MUST_USE_RESULT static MaybeObject* GetObjectProperty(Handle<Object> object,
- Handle<Object> key);
+ MUST_USE_RESULT static MaybeObject* GetObjectProperty(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key);
// This function is used in FunctionNameUsing* tests.
- static Object* FindSharedFunctionInfoInScript(Handle<Script> script,
+ static Object* FindSharedFunctionInfoInScript(Isolate* isolate,
+ Handle<Script> script,
int position);
// Helper functions used stubs.
static void PerformGC(Object* result);
};
-
} } // namespace v8::internal
#endif // V8_RUNTIME_H_
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "safepoint-table.h"
#include "deoptimizer.h"
#include "disasm.h"
#include "macro-assembler.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
#ifndef V8_SAFEPOINT_TABLE_H_
#define V8_SAFEPOINT_TABLE_H_
-#include "v8.h"
-
#include "heap.h"
#include "zone.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
// Features shared by parsing and pre-parsing scanners.
+#include "v8.h"
+
+/*
+TODO(isolates): I incldue v8.h instead of these because we need Isolate and
+some classes (NativeAllocationChecker) are moved into isolate.h
#include "../include/v8stdint.h"
+*/
#include "scanner-base.h"
#include "char-predicates-inl.h"
namespace internal {
// ----------------------------------------------------------------------------
-// Character predicates
-
-unibrow::Predicate<IdentifierStart, 128> ScannerConstants::kIsIdentifierStart;
-unibrow::Predicate<IdentifierPart, 128> ScannerConstants::kIsIdentifierPart;
-unibrow::Predicate<unibrow::WhiteSpace, 128> ScannerConstants::kIsWhiteSpace;
-unibrow::Predicate<unibrow::LineTerminator, 128>
- ScannerConstants::kIsLineTerminator;
-
-StaticResource<ScannerConstants::Utf8Decoder> ScannerConstants::utf8_decoder_;
-
// Compound predicates.
bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner()
- : octal_pos_(kNoOctalLocation) { }
+Scanner::Scanner(Isolate* isolate)
+ : scanner_constants_(isolate->scanner_constants()),
+ octal_pos_(kNoOctalLocation) {
+}
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
// ----------------------------------------------------------------------------
// JavaScriptScanner
-JavaScriptScanner::JavaScriptScanner() : Scanner() {}
+JavaScriptScanner::JavaScriptScanner(Isolate* isolate) : Scanner(isolate) {}
Token::Value JavaScriptScanner::Next() {
while (true) {
// We treat byte-order marks (BOMs) as whitespace for better
// compatibility with Spidermonkey and other JavaScript engines.
- while (ScannerConstants::kIsWhiteSpace.get(c0_) || IsByteOrderMark(c0_)) {
+ while (scanner_constants_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
// IsWhiteSpace() includes line terminators!
- if (ScannerConstants::kIsLineTerminator.get(c0_)) {
+ if (scanner_constants_->IsLineTerminator(c0_)) {
// Ignore line terminators, but remember them. This is necessary
// for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4, page 12).
- while (c0_ >= 0 && !ScannerConstants::kIsLineTerminator.get(c0_)) {
+ while (c0_ >= 0 && !scanner_constants_->IsLineTerminator(c0_)) {
Advance();
}
break;
default:
- if (ScannerConstants::kIsIdentifierStart.get(c0_)) {
+ if (scanner_constants_->IsIdentifierStart(c0_)) {
token = ScanIdentifierOrKeyword();
} else if (IsDecimalDigit(c0_)) {
token = ScanNumber(false);
Advance();
// Skip escaped newlines.
- if (ScannerConstants::kIsLineTerminator.get(c)) {
+ if (scanner_constants_->IsLineTerminator(c)) {
// Allow CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
// Allow LF+CR newlines in multiline string literals.
LiteralScope literal(this);
while (c0_ != quote && c0_ >= 0
- && !ScannerConstants::kIsLineTerminator.get(c0_)) {
+ && !scanner_constants_->IsLineTerminator(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
// not be an identifier start or a decimal digit; see ECMA-262
// section 7.8.3, page 17 (note that we read only one decimal digit
// if the value is 0).
- if (IsDecimalDigit(c0_) || ScannerConstants::kIsIdentifierStart.get(c0_))
+ if (IsDecimalDigit(c0_) || scanner_constants_->IsIdentifierStart(c0_))
return Token::ILLEGAL;
literal.Complete();
Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
- ASSERT(ScannerConstants::kIsIdentifierStart.get(c0_));
+ ASSERT(scanner_constants_->IsIdentifierStart(c0_));
LiteralScope literal(this);
KeywordMatcher keyword_match;
// Scan identifier start character.
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier start characters.
- if (!ScannerConstants::kIsIdentifierStart.get(c)) return Token::ILLEGAL;
+ if (!scanner_constants_->IsIdentifierStart(c)) return Token::ILLEGAL;
AddLiteralChar(c);
return ScanIdentifierSuffix(&literal);
}
}
// Scan the rest of the identifier characters.
- while (ScannerConstants::kIsIdentifierPart.get(c0_)) {
+ while (scanner_constants_->IsIdentifierPart(c0_)) {
if (c0_ != '\\') {
uc32 next_char = c0_;
Advance();
Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
// Scan the rest of the identifier characters.
- while (ScannerConstants::kIsIdentifierPart.get(c0_)) {
+ while (scanner_constants_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier part characters.
- if (!ScannerConstants::kIsIdentifierPart.get(c)) return Token::ILLEGAL;
+ if (!scanner_constants_->IsIdentifierPart(c)) return Token::ILLEGAL;
AddLiteralChar(c);
} else {
AddLiteralChar(c0_);
AddLiteralChar('=');
while (c0_ != '/' || in_character_class) {
- if (ScannerConstants::kIsLineTerminator.get(c0_) || c0_ < 0) return false;
+ if (scanner_constants_->IsLineTerminator(c0_) || c0_ < 0) return false;
if (c0_ == '\\') { // Escape sequence.
AddLiteralCharAdvance();
- if (ScannerConstants::kIsLineTerminator.get(c0_) || c0_ < 0) return false;
+ if (scanner_constants_->IsLineTerminator(c0_) || c0_ < 0) return false;
AddLiteralCharAdvance();
// If the escape allows more characters, i.e., \x??, \u????, or \c?,
// only "safe" characters are allowed (letters, digits, underscore),
bool JavaScriptScanner::ScanRegExpFlags() {
// Scan regular expression flags.
LiteralScope literal(this);
- while (ScannerConstants::kIsIdentifierPart.get(c0_)) {
+ while (scanner_constants_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
if (c != static_cast<uc32>(unibrow::Utf8::kBadChar)) {
};
+class ScannerConstants {
// ---------------------------------------------------------------------
// Constants used by scanners.
-
-class ScannerConstants : AllStatic {
public:
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
- static StaticResource<Utf8Decoder>* utf8_decoder() {
+ StaticResource<Utf8Decoder>* utf8_decoder() {
return &utf8_decoder_;
}
- static unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
- static unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- static unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
+ bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
+ bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
+ bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
+ bool IsIdentifier(unibrow::CharacterStream* buffer);
private:
- static StaticResource<Utf8Decoder> utf8_decoder_;
+ ScannerConstants() {}
+
+ unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
+ unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
+ unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
+ unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ StaticResource<Utf8Decoder> utf8_decoder_;
+
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(ScannerConstants);
};
// ----------------------------------------------------------------------------
bool is_ascii_;
int position_;
Vector<byte> backing_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
};
bool complete_;
};
- Scanner();
+ explicit Scanner(Isolate* isolate);
// Returns the current token again.
Token::Value current_token() { return current_.token; }
return source_->pos() - kCharacterLookaheadBufferSize;
}
+ ScannerConstants* scanner_constants_;
+
// Buffers collecting literal strings, numbers, etc.
LiteralBuffer literal_buffer1_;
LiteralBuffer literal_buffer2_;
bool complete_;
};
- JavaScriptScanner();
+ explicit JavaScriptScanner(Isolate* isolate);
// Returns the next token.
Token::Value Next();
// ----------------------------------------------------------------------------
// V8JavaScriptScanner
-V8JavaScriptScanner::V8JavaScriptScanner() : JavaScriptScanner() { }
-
void V8JavaScriptScanner::Initialize(UC16CharacterStream* source) {
source_ = source;
// ----------------------------------------------------------------------------
// JsonScanner
-JsonScanner::JsonScanner() : Scanner() { }
+JsonScanner::JsonScanner(Isolate* isolate) : Scanner(isolate) { }
void JsonScanner::Initialize(UC16CharacterStream* source) {
Advance();
text++;
}
- if (ScannerConstants::kIsIdentifierPart.get(c0_)) return Token::ILLEGAL;
+ if (scanner_constants_->IsIdentifierPart(c0_)) return Token::ILLEGAL;
literal.Complete();
return token;
}
-
} } // namespace v8::internal
class V8JavaScriptScanner : public JavaScriptScanner {
public:
- V8JavaScriptScanner();
+ explicit V8JavaScriptScanner(Isolate* isolate)
+ : JavaScriptScanner(isolate) {}
+
void Initialize(UC16CharacterStream* source);
};
class JsonScanner : public Scanner {
public:
- JsonScanner();
+ explicit JsonScanner(Isolate* isolate);
void Initialize(UC16CharacterStream* source);
template<class Allocator>
ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
- : function_name_(Factory::empty_symbol()),
+ : function_name_(FACTORY->empty_symbol()),
calls_eval_(scope->calls_eval()),
parameters_(scope->num_parameters()),
stack_slots_(scope->num_stack_slots()),
context_slots_.length());
ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length());
- context_slots_.Add(Factory::empty_symbol());
+ context_slots_.Add(FACTORY->empty_symbol());
context_modes_.Add(Variable::INTERNAL);
}
}
template<class Allocator>
ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
- : function_name_(Factory::empty_symbol()),
+ : function_name_(FACTORY->empty_symbol()),
parameters_(4),
stack_slots_(8),
context_slots_(8),
stack_slots_.length();
Handle<SerializedScopeInfo> data(
- SerializedScopeInfo::cast(*Factory::NewFixedArray(length, TENURED)));
+ SerializedScopeInfo::cast(*FACTORY->NewFixedArray(length, TENURED)));
AssertNoAllocation nogc;
Object** p0 = data->data_start();
SerializedScopeInfo* SerializedScopeInfo::Empty() {
- return reinterpret_cast<SerializedScopeInfo*>(Heap::empty_fixed_array());
+ return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
}
int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
ASSERT(name->IsSymbol());
- int result = ContextSlotCache::Lookup(this, name, mode);
+ Isolate* isolate = GetIsolate();
+ int result = isolate->context_slot_cache()->Lookup(this, name, mode);
if (result != ContextSlotCache::kNotFound) return result;
if (length() > 0) {
// Slots start after length entry.
Variable::Mode mode_value = static_cast<Variable::Mode>(v);
if (mode != NULL) *mode = mode_value;
result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
- ContextSlotCache::Update(this, name, mode_value, result);
+ isolate->context_slot_cache()->Update(this, name, mode_value, result);
return result;
}
p += 2;
}
}
- ContextSlotCache::Update(this, name, Variable::INTERNAL, -1);
+ isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
return -1;
}
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
- if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = Hash(data, symbol);
Key& key = keys_[index];
key.data = data;
}
-ContextSlotCache::Key ContextSlotCache::keys_[ContextSlotCache::kLength];
-
-
-uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
-
-
#ifdef DEBUG
void ContextSlotCache::ValidateEntry(Object* data,
Variable::Mode mode,
int slot_index) {
String* symbol;
- if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = Hash(data, name);
Key& key = keys_[index];
ASSERT(key.data == data);
// Does this scope have an arguments shadow?
bool HasArgumentsShadow() {
- return StackSlotIndex(Heap::arguments_shadow_symbol()) >= 0;
+ return StackSlotIndex(GetHeap()->arguments_shadow_symbol()) >= 0;
}
// Return the number of stack slots for code.
public:
// Lookup context slot index for (data, name).
// If absent, kNotFound is returned.
- static int Lookup(Object* data,
- String* name,
- Variable::Mode* mode);
+ int Lookup(Object* data,
+ String* name,
+ Variable::Mode* mode);
// Update an element in the cache.
- static void Update(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index);
+ void Update(Object* data,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
// Clear the cache.
- static void Clear();
+ void Clear();
static const int kNotFound = -2;
private:
+ ContextSlotCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].data = NULL;
+ keys_[i].name = NULL;
+ values_[i] = kNotFound;
+ }
+ }
+
inline static int Hash(Object* data, String* name);
#ifdef DEBUG
- static void ValidateEntry(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index);
+ void ValidateEntry(Object* data,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
#endif
static const int kLength = 256;
uint32_t value_;
};
- static Key keys_[kLength];
- static uint32_t values_[kLength];
+ Key keys_[kLength];
+ uint32_t values_[kLength];
+
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
};
// ----------------------------------------------------------------------------
// A Zone allocator for use with LocalsMap.
+// TODO(isolates): It is probably worth it to change the Allocator class to
+// take a pointer to an isolate.
class ZoneAllocator: public Allocator {
public:
/* nothing to do */
virtual ~ZoneAllocator() {}
- virtual void* New(size_t size) { return Zone::New(static_cast<int>(size)); }
+ virtual void* New(size_t size) { return ZONE->New(static_cast<int>(size)); }
/* ignored - Zone is freed in one fell swoop */
virtual void Delete(void* p) {}
// This scope's arguments shadow (if present) is context-allocated if an inner
// scope accesses this one's parameters. Allocate the arguments_shadow_
// variable if necessary.
+ Isolate* isolate = Isolate::Current();
Variable::Mode mode;
int arguments_shadow_index =
- scope_info_->ContextSlotIndex(Heap::arguments_shadow_symbol(), &mode);
+ scope_info_->ContextSlotIndex(
+ isolate->heap()->arguments_shadow_symbol(), &mode);
if (arguments_shadow_index >= 0) {
ASSERT(mode == Variable::INTERNAL);
- arguments_shadow_ = new Variable(this,
- Factory::arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
+ arguments_shadow_ = new Variable(
+ this,
+ isolate->factory()->arguments_shadow_symbol(),
+ Variable::INTERNAL,
+ true,
+ Variable::ARGUMENTS);
arguments_shadow_->set_rewrite(
new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
arguments_shadow_->set_is_used(true);
top->AllocateVariables(info->calling_context());
#ifdef DEBUG
- if (Bootstrapper::IsActive()
+ if (info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_scopes
: FLAG_print_scopes) {
info->function()->scope()->Print();
// such parameter is 'this' which is passed on the stack when
// invoking scripts
Variable* var =
- variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
+ variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
false, Variable::THIS);
var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
receiver_ = var;
// Declare 'arguments' variable which exists in all functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
- variables_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
+ variables_.Declare(this, FACTORY->arguments_symbol(), Variable::VAR,
true, Variable::ARGUMENTS);
}
}
// We should never lookup 'arguments' in this scope
// as it is implicitly present in any scope.
- ASSERT(*name != *Factory::arguments_symbol());
+ ASSERT(*name != *FACTORY->arguments_symbol());
// Assert that there is no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
bool Scope::HasArgumentsParameter() {
for (int i = 0; i < params_.length(); i++) {
- if (params_[i]->name().is_identical_to(Factory::arguments_symbol()))
+ if (params_[i]->name().is_identical_to(FACTORY->arguments_symbol()))
return true;
}
return false;
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(Factory::arguments_symbol());
+ Variable* arguments = LocalLookup(FACTORY->arguments_symbol());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
// Parameters are rewritten to arguments[i] if 'arguments' is used in
// variable may be allocated in the heap-allocated context (temporaries
// are never allocated in the context).
arguments_shadow_ = new Variable(this,
- Factory::arguments_shadow_symbol(),
+ FACTORY->arguments_shadow_symbol(),
Variable::INTERNAL,
true,
Variable::ARGUMENTS);
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
ASSERT(var->rewrite() == NULL ||
- (!var->IsVariable(Factory::result_symbol())) ||
+ (!var->IsVariable(FACTORY->result_symbol())) ||
(var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
if (var->rewrite() == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
// A new variable proxy corresponding to the (function) receiver.
VariableProxy* receiver() const {
VariableProxy* proxy =
- new VariableProxy(Factory::this_symbol(), true, false);
+ new VariableProxy(FACTORY->this_symbol(), true, false);
proxy->BindTo(receiver_);
return proxy;
}
SerializedScopeInfo* scope_info) {
outer_scope_ = outer_scope;
type_ = type;
- scope_name_ = Factory::empty_symbol();
+ scope_name_ = FACTORY->empty_symbol();
dynamics_ = NULL;
receiver_ = NULL;
function_ = NULL;
#include "serialize.h"
#include "stub-cache.h"
#include "v8threads.h"
-#include "top.h"
#include "bootstrapper.h"
namespace v8 {
// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
class ExternalReferenceTable {
public:
- static ExternalReferenceTable* instance() {
- if (!instance_) instance_ = new ExternalReferenceTable();
- return instance_;
+ static ExternalReferenceTable* instance(Isolate* isolate) {
+ ExternalReferenceTable* external_reference_table =
+ isolate->external_reference_table();
+ if (external_reference_table == NULL) {
+ external_reference_table = new ExternalReferenceTable(isolate);
+ isolate->set_external_reference_table(external_reference_table);
+ }
+ return external_reference_table;
}
int size() const { return refs_.length(); }
int max_id(int code) { return max_id_[code]; }
private:
- static ExternalReferenceTable* instance_;
-
- ExternalReferenceTable() : refs_(64) { PopulateTable(); }
+ explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
+ PopulateTable(isolate);
+ }
~ExternalReferenceTable() { }
struct ExternalReferenceEntry {
const char* name;
};
- void PopulateTable();
+ void PopulateTable(Isolate* isolate);
// For a few types of references, we can get their address from their id.
void AddFromId(TypeCode type, uint16_t id, const char* name);
};
-ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
-
-
void ExternalReferenceTable::AddFromId(TypeCode type,
uint16_t id,
const char* name) {
}
-void ExternalReferenceTable::PopulateTable() {
+void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
max_id_[type_code] = 0;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Debug addresses
- Add(Debug_Address(Debug::k_after_break_target_address).address(),
+ Add(Debug_Address(Debug::k_after_break_target_address).address(isolate),
DEBUG_ADDRESS,
Debug::k_after_break_target_address << kDebugIdShift,
"Debug::after_break_target_address()");
- Add(Debug_Address(Debug::k_debug_break_slot_address).address(),
+ Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate),
DEBUG_ADDRESS,
Debug::k_debug_break_slot_address << kDebugIdShift,
"Debug::debug_break_slot_address()");
- Add(Debug_Address(Debug::k_debug_break_return_address).address(),
+ Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate),
DEBUG_ADDRESS,
Debug::k_debug_break_return_address << kDebugIdShift,
"Debug::debug_break_return_address()");
- Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(),
+ Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate),
DEBUG_ADDRESS,
Debug::k_restarter_frame_function_pointer << kDebugIdShift,
"Debug::restarter_frame_function_pointer_address()");
// Stat counters
struct StatsRefTableEntry {
- StatsCounter* counter;
+ StatsCounter* (Counters::*counter)();
uint16_t id;
const char* name;
};
- static const StatsRefTableEntry stats_ref_table[] = {
+ const StatsRefTableEntry stats_ref_table[] = {
#define COUNTER_ENTRY(name, caption) \
- { &Counters::name, \
+ { &Counters::name, \
Counters::k_##name, \
"Counters::" #name },
#undef COUNTER_ENTRY
}; // end of stats_ref_table[].
+ Counters* counters = isolate->counters();
for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
- Add(reinterpret_cast<Address>(
- GetInternalPointer(stats_ref_table[i].counter)),
+ Add(reinterpret_cast<Address>(GetInternalPointer(
+ (counters->*(stats_ref_table[i].counter))())),
STATS_COUNTER,
stats_ref_table[i].id,
stats_ref_table[i].name);
}
// Top addresses
- const char* top_address_format = "Top::%s";
const char* AddressNames[] = {
-#define C(name) #name,
- TOP_ADDRESS_LIST(C)
- TOP_ADDRESS_LIST_PROF(C)
+#define C(name) "Isolate::" #name,
+ ISOLATE_ADDRESS_LIST(C)
+ ISOLATE_ADDRESS_LIST_PROF(C)
NULL
#undef C
};
- int top_format_length = StrLength(top_address_format) - 2;
- for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
- const char* address_name = AddressNames[i];
- Vector<char> name =
- Vector<char>::New(top_format_length + StrLength(address_name) + 1);
- const char* chars = name.start();
- OS::SNPrintF(name, top_address_format, address_name);
- Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
+ for (uint16_t i = 0; i < Isolate::k_isolate_address_count; ++i) {
+ Add(isolate->get_address_from_id((Isolate::AddressId)i),
+ TOP_ADDRESS, i, AddressNames[i]);
}
// Accessors
ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
#undef ACCESSOR_DESCRIPTOR_DECLARATION
+ StubCache* stub_cache = isolate->stub_cache();
+
// Stub cache tables
- Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
+ Add(stub_cache->key_reference(StubCache::kPrimary).address(),
STUB_CACHE_TABLE,
1,
"StubCache::primary_->key");
- Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
+ Add(stub_cache->value_reference(StubCache::kPrimary).address(),
STUB_CACHE_TABLE,
2,
"StubCache::primary_->value");
- Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
+ Add(stub_cache->key_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
3,
"StubCache::secondary_->key");
- Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
+ Add(stub_cache->value_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
4,
"StubCache::secondary_->value");
RUNTIME_ENTRY,
2,
"V8::FillHeapNumberWithRandom");
-
Add(ExternalReference::random_uint32_function().address(),
RUNTIME_ENTRY,
3,
"V8::Random");
-
Add(ExternalReference::delete_handle_scope_extensions().address(),
RUNTIME_ENTRY,
4,
UNCLASSIFIED,
36,
"LDoubleConstant::one_half");
- Add(ExternalReference::address_of_minus_zero().address(),
+ Add(ExternalReference::isolate_address().address(),
UNCLASSIFIED,
37,
+ "isolate");
+ Add(ExternalReference::address_of_minus_zero().address(),
+ UNCLASSIFIED,
+ 38,
"LDoubleConstant::minus_zero");
Add(ExternalReference::address_of_negative_infinity().address(),
UNCLASSIFIED,
- 38,
+ 39,
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function().address(),
UNCLASSIFIED,
- 39,
+ 40,
"power_double_double_function");
Add(ExternalReference::power_double_int_function().address(),
UNCLASSIFIED,
- 40,
+ 41,
"power_double_int_function");
Add(ExternalReference::arguments_marker_location().address(),
UNCLASSIFIED,
- 41,
+ 42,
"Factory::arguments_marker().location()");
}
ExternalReferenceEncoder::ExternalReferenceEncoder()
- : encodings_(Match) {
+ : encodings_(Match),
+ isolate_(Isolate::Current()) {
ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance();
+ ExternalReferenceTable::instance(isolate_);
for (int i = 0; i < external_references->size(); ++i) {
Put(external_references->address(i), i);
}
uint32_t ExternalReferenceEncoder::Encode(Address key) const {
int index = IndexOf(key);
ASSERT(key == NULL || index >= 0);
- return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
+ return index >=0 ?
+ ExternalReferenceTable::instance(isolate_)->code(index) : 0;
}
const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
int index = IndexOf(key);
- return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
+ return index >= 0 ?
+ ExternalReferenceTable::instance(isolate_)->name(index) : NULL;
}
int ExternalReferenceEncoder::IndexOf(Address key) const {
if (key == NULL) return -1;
HashMap::Entry* entry =
- const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
+ const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
return entry == NULL
? -1
: static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
ExternalReferenceDecoder::ExternalReferenceDecoder()
- : encodings_(NewArray<Address*>(kTypeCodeCount)) {
+ : encodings_(NewArray<Address*>(kTypeCodeCount)),
+ isolate_(Isolate::Current()) {
ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance();
+ ExternalReferenceTable::instance(isolate_);
for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
int max = external_references->max_id(type) + 1;
encodings_[type] = NewArray<Address>(max + 1);
bool Serializer::serialization_enabled_ = false;
bool Serializer::too_late_to_enable_now_ = false;
-ExternalReferenceDecoder* Deserializer::external_reference_decoder_ = NULL;
-Deserializer::Deserializer(SnapshotByteSource* source) : source_(source) {
+Deserializer::Deserializer(SnapshotByteSource* source)
+ : isolate_(NULL),
+ source_(source),
+ external_reference_decoder_(NULL) {
}
void Deserializer::Deserialize() {
+ isolate_ = Isolate::Current();
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
LinearAllocationScope allocate_linearly;
// No active threads.
- ASSERT_EQ(NULL, ThreadState::FirstInUse());
+ ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
- ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
// Make sure the entire partial snapshot cache is traversed, filling it with
// valid object pointers.
- partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+ isolate_->set_serialize_partial_snapshot_cache_length(
+ Isolate::kPartialSnapshotCacheCapacity);
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
- Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
- Heap::IterateWeakRoots(this, VISIT_ALL);
+ isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
- Heap::set_global_contexts_list(Heap::undefined_value());
+ isolate_->heap()->set_global_contexts_list(
+ isolate_->heap()->undefined_value());
}
void Deserializer::DeserializePartial(Object** root) {
+ isolate_ = Isolate::Current();
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
Deserializer::~Deserializer() {
ASSERT(source_->AtEOF());
- if (external_reference_decoder_ != NULL) {
+ if (external_reference_decoder_) {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
}
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
if (FLAG_log_snapshot_positions) {
- LOG(SnapshotPositionEvent(address, source_->position()));
+ LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
#ifdef DEBUG
- bool is_codespace = (space == Heap::code_space()) ||
- ((space == Heap::lo_space()) && (space_number == kLargeCode));
+ bool is_codespace = (space == HEAP->code_space()) ||
+ ((space == HEAP->lo_space()) && (space_number == kLargeCode));
ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
#endif
}
#define ASSIGN_DEST_SPACE(space_number) \
Space* dest_space; \
if (space_number == NEW_SPACE) { \
- dest_space = Heap::new_space(); \
+ dest_space = isolate->heap()->new_space(); \
} else if (space_number == OLD_POINTER_SPACE) { \
- dest_space = Heap::old_pointer_space(); \
+ dest_space = isolate->heap()->old_pointer_space(); \
} else if (space_number == OLD_DATA_SPACE) { \
- dest_space = Heap::old_data_space(); \
+ dest_space = isolate->heap()->old_data_space(); \
} else if (space_number == CODE_SPACE) { \
- dest_space = Heap::code_space(); \
+ dest_space = isolate->heap()->code_space(); \
} else if (space_number == MAP_SPACE) { \
- dest_space = Heap::map_space(); \
+ dest_space = isolate->heap()->map_space(); \
} else if (space_number == CELL_SPACE) { \
- dest_space = Heap::cell_space(); \
+ dest_space = isolate->heap()->cell_space(); \
} else { \
ASSERT(space_number >= LO_SPACE); \
- dest_space = Heap::lo_space(); \
+ dest_space = isolate->heap()->lo_space(); \
}
Object** limit,
int source_space,
Address address) {
+ Isolate* const isolate = isolate_;
while (current < limit) {
int data = source_->Get();
switch (data) {
ReadObject(space_number, dest_space, &new_object); \
} else if (where == kRootArray) { \
int root_id = source_->GetInt(); \
- new_object = Heap::roots_address()[root_id]; \
+ new_object = isolate->heap()->roots_address()[root_id]; \
} else if (where == kPartialSnapshotCache) { \
int cache_index = source_->GetInt(); \
- new_object = partial_snapshot_cache_[cache_index]; \
+ new_object = isolate->serialize_partial_snapshot_cache() \
+ [cache_index]; \
} else if (where == kExternalReference) { \
int reference_id = source_->GetInt(); \
- Address address = \
- external_reference_decoder_->Decode(reference_id); \
+ Address address = external_reference_decoder_-> \
+ Decode(reference_id); \
new_object = reinterpret_cast<Object*>(address); \
} else if (where == kBackref) { \
emit_write_barrier = \
} \
} \
if (emit_write_barrier) { \
- Heap::RecordWrite(address, static_cast<int>( \
+ isolate->heap()->RecordWrite(address, static_cast<int>( \
reinterpret_cast<Address>(current) - address)); \
} \
if (!current_was_incremented) { \
int index = source_->Get();
Vector<const char> source_vector = Natives::GetScriptSource(index);
NativesExternalStringResource* resource =
- new NativesExternalStringResource(source_vector.start());
+ new NativesExternalStringResource(
+ isolate->bootstrapper(), source_vector.start());
*current++ = reinterpret_cast<Object*>(resource);
break;
}
current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder),
large_object_total_(0) {
+ // The serializer is meant to be used only to generate initial heap images
+ // from a context in which there is only one isolate.
+ ASSERT(Isolate::Current()->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
void StartupSerializer::SerializeStrongReferences() {
+ Isolate* isolate = Isolate::Current();
// No active threads.
- CHECK_EQ(NULL, ThreadState::FirstInUse());
+ CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
- CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
- CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
+ CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
+ CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
// We don't support serializing installed extensions.
- for (RegisteredExtension* ext = RegisteredExtension::first_extension();
+ for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
ext != NULL;
ext = ext->next()) {
CHECK_NE(v8::INSTALLED, ext->state());
}
- Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
void PartialSerializer::Serialize(Object** object) {
this->VisitPointer(object);
+ Isolate* isolate = Isolate::Current();
// After we have done the partial serialization the partial snapshot cache
// will contain some references needed to decode the partial snapshot. We
// fill it up with undefineds so it has a predictable length so the
// deserialization code doesn't need to know the length.
- for (int index = partial_snapshot_cache_length_;
- index < kPartialSnapshotCacheCapacity;
+ for (int index = isolate->serialize_partial_snapshot_cache_length();
+ index < Isolate::kPartialSnapshotCacheCapacity;
index++) {
- partial_snapshot_cache_[index] = Heap::undefined_value();
- startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
+ isolate->serialize_partial_snapshot_cache()[index] =
+ isolate->heap()->undefined_value();
+ startup_serializer_->VisitPointer(
+ &isolate->serialize_partial_snapshot_cache()[index]);
}
- partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+ isolate->set_serialize_partial_snapshot_cache_length(
+ Isolate::kPartialSnapshotCacheCapacity);
}
}
-Object* SerializerDeserializer::partial_snapshot_cache_[
- kPartialSnapshotCacheCapacity];
-int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
-
-
// This ensures that the partial snapshot cache keeps things alive during GC and
// tracks their movement. When it is called during serialization of the startup
// snapshot the partial snapshot is empty, so nothing happens. When the partial
// deserialization we therefore need to visit the cache array. This fills it up
// with pointers to deserialized objects.
void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+ Isolate* isolate = Isolate::Current();
visitor->VisitPointers(
- &partial_snapshot_cache_[0],
- &partial_snapshot_cache_[partial_snapshot_cache_length_]);
+ isolate->serialize_partial_snapshot_cache(),
+ &isolate->serialize_partial_snapshot_cache()[
+ isolate->serialize_partial_snapshot_cache_length()]);
}
// the root iteration code (above) will iterate over array elements, writing the
// references to deserialized objects in them.
void SerializerDeserializer::SetSnapshotCacheSize(int size) {
- partial_snapshot_cache_length_ = size;
+ Isolate::Current()->set_serialize_partial_snapshot_cache_length(size);
}
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- for (int i = 0; i < partial_snapshot_cache_length_; i++) {
- Object* entry = partial_snapshot_cache_[i];
+ Isolate* isolate = Isolate::Current();
+
+ for (int i = 0;
+ i < isolate->serialize_partial_snapshot_cache_length();
+ i++) {
+ Object* entry = isolate->serialize_partial_snapshot_cache()[i];
if (entry == heap_object) return i;
}
// We didn't find the object in the cache. So we add it to the cache and
// then visit the pointer so that it becomes part of the startup snapshot
// and we can refer to it from the partial snapshot.
- int length = partial_snapshot_cache_length_;
- CHECK(length < kPartialSnapshotCacheCapacity);
- partial_snapshot_cache_[length] = heap_object;
- startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
+ int length = isolate->serialize_partial_snapshot_cache_length();
+ CHECK(length < Isolate::kPartialSnapshotCacheCapacity);
+ isolate->serialize_partial_snapshot_cache()[length] = heap_object;
+ startup_serializer_->VisitPointer(
+ &isolate->serialize_partial_snapshot_cache()[length]);
// We don't recurse from the startup snapshot generator into the partial
// snapshot generator.
- ASSERT(length == partial_snapshot_cache_length_);
- return partial_snapshot_cache_length_++;
+ ASSERT(length == isolate->serialize_partial_snapshot_cache_length());
+ isolate->set_serialize_partial_snapshot_cache_length(length + 1);
+ return length;
}
int PartialSerializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
- Object* root = Heap::roots_address()[i];
+ Object* root = HEAP->roots_address()[i];
if (root == heap_object) return i;
}
return kInvalidRootIndex;
void StartupSerializer::SerializeWeakReferences() {
- for (int i = partial_snapshot_cache_length_;
- i < kPartialSnapshotCacheCapacity;
+ for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length();
+ i < Isolate::kPartialSnapshotCacheCapacity;
i++) {
sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
}
- Heap::IterateWeakRoots(this, VISIT_ALL);
+ HEAP->IterateWeakRoots(this, VISIT_ALL);
}
"ObjectSerialization");
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
- LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
+ LOG(i::Isolate::Current(),
+ SnapshotPositionEvent(object_->address(), sink_->Position()));
// Mark this object as already serialized.
bool start_new_page;
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = Heap::natives_source_cache()->get(i);
+ Object* source = HEAP->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (Heap::InSpace(object, s)) {
+ if (HEAP->InSpace(object, s)) {
if (i == LO_SPACE) {
if (object->IsCode()) {
return kLargeCode;
int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (Heap::InSpace(object, s)) {
+ if (HEAP->InSpace(object, s)) {
return i;
}
}
static bool Match(void* key1, void* key2) { return key1 == key2; }
void Put(Address key, int index);
+
+ Isolate* isolate_;
};
void Put(uint32_t key, Address value) {
*Lookup(key) = value;
}
+
+ Isolate* isolate_;
};
// This only works for objects in the first page of a space. Don't use this for
// things in newspace since it bypasses the write barrier.
-static const int k64 = (sizeof(uintptr_t) - 4) / 4;
+RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
#define COMMON_REFERENCE_PATTERNS(f) \
f(kNumberOfSpaces, 2, (11 - k64)) \
// both.
class SerializerDeserializer: public ObjectVisitor {
public:
- static void Iterate(ObjectVisitor* visitor);
- static void SetSnapshotCacheSize(int size);
+ RLYSTC void Iterate(ObjectVisitor* visitor);
+ RLYSTC void SetSnapshotCacheSize(int size);
protected:
// Where the pointed-to object can be found:
// Misc.
// Raw data to be copied from the snapshot.
- static const int kRawData = 0x30;
+ RLYSTC const int kRawData = 0x30;
// Some common raw lengths: 0x31-0x3f
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- static const int kSynchronize = 0x70;
+ RLYSTC const int kSynchronize = 0x70;
// Used for the source code of the natives, which is in the executable, but
// is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0x71;
- static const int kNewPage = 0x72;
+ RLYSTC const int kNativesStringResource = 0x71;
+ RLYSTC const int kNewPage = 0x72;
// 0x73-0x7f Free.
// 0xb0-0xbf Free.
// 0xf0-0xff Free.
- static const int kLargeData = LAST_SPACE;
- static const int kLargeCode = kLargeData + 1;
- static const int kLargeFixedArray = kLargeCode + 1;
- static const int kNumberOfSpaces = kLargeFixedArray + 1;
- static const int kAnyOldSpace = -1;
+ RLYSTC const int kLargeData = LAST_SPACE;
+ RLYSTC const int kLargeCode = kLargeData + 1;
+ RLYSTC const int kLargeFixedArray = kLargeCode + 1;
+ RLYSTC const int kNumberOfSpaces = kLargeFixedArray + 1;
+ RLYSTC const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
- static const int kSpaceMask = 15;
+ RLYSTC const int kSpaceMask = 15;
- static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
- static inline bool SpaceIsPaged(int space) {
+ RLYSTC inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
+ RLYSTC inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
-
- static int partial_snapshot_cache_length_;
- static const int kPartialSnapshotCacheCapacity = 1400;
- static Object* partial_snapshot_cache_[];
};
Address Allocate(int space_number, Space* space, int size);
void ReadObject(int space_number, Space* space, Object** write_back);
+ // Cached current isolate.
+ Isolate* isolate_;
+
// Keep track of the pages in the paged spaces.
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
SnapshotByteSource* source_;
- static ExternalReferenceDecoder* external_reference_decoder_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
// START_NEW_PAGE_SERIALIZATION tag.
Address last_object_address_;
+ ExternalReferenceDecoder* external_reference_decoder_;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
}
private:
- static bool SerializationMatchFun(void* key1, void* key2) {
+ RLYSTC bool SerializationMatchFun(void* key1, void* key2) {
return key1 == key2;
}
- static uint32_t Hash(HeapObject* obj) {
+ RLYSTC uint32_t Hash(HeapObject* obj) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
}
- static void* Key(HeapObject* obj) {
+ RLYSTC void* Key(HeapObject* obj) {
return reinterpret_cast<void*>(obj->address());
}
- static void* Value(int v) {
+ RLYSTC void* Value(int v) {
return reinterpret_cast<void*>(v);
}
};
-class Serializer : public SerializerDeserializer {
+// There can be only one serializer per V8 process.
+STATIC_CLASS Serializer : public SerializerDeserializer {
public:
explicit Serializer(SnapshotByteSink* sink);
~Serializer();
return fullness_[space];
}
- static void Enable() {
+ RLYSTC void Enable() {
if (!serialization_enabled_) {
ASSERT(!too_late_to_enable_now_);
}
serialization_enabled_ = true;
}
- static void Disable() { serialization_enabled_ = false; }
+ RLYSTC void Disable() { serialization_enabled_ = false; }
// Call this when you have made use of the fact that there is no serialization
// going on.
- static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
- static bool enabled() { return serialization_enabled_; }
+ RLYSTC void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
+ RLYSTC bool enabled() { return serialization_enabled_; }
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
protected:
- static const int kInvalidRootIndex = -1;
+ RLYSTC const int kInvalidRootIndex = -1;
virtual int RootIndex(HeapObject* heap_object) = 0;
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
// object space it may return kLargeCode or kLargeFixedArray in order
// to indicate to the deserializer what kind of large object allocation
// to make.
- static int SpaceOfObject(HeapObject* object);
+ RLYSTC int SpaceOfObject(HeapObject* object);
// This just returns the space of the object. It will return LO_SPACE
// for all large objects since you can't check the type of the object
// once the map has been used for the serialization address.
- static int SpaceOfAlreadySerializedObject(HeapObject* object);
+ RLYSTC int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size, bool* new_page_started);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
- static bool serialization_enabled_;
+ RLYSTC bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
- static bool too_late_to_enable_now_;
+ RLYSTC bool too_late_to_enable_now_;
int large_object_total_;
SerializationAddressMapper address_mapper_;
ASSERT(!o->IsScript());
return o->IsString() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
- o->map() == Heap::fixed_cow_array_map();
+ o->map() == HEAP->fixed_cow_array_map();
}
private:
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects neede by that partial
// snapshot.
- partial_snapshot_cache_length_ = 0;
+ Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
}
// Serialize the current state of the heap. The order is:
// 1) Strong references.
if (context_size_ == 0) {
return Handle<Context>();
}
- Heap::ReserveSpace(new_space_used_,
+ HEAP->ReserveSpace(new_space_used_,
pointer_space_used_,
data_space_used_,
code_space_used_,
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "isolate.h"
+
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
namespace v8 {
namespace internal {
-class Snapshot {
+STATIC_CLASS Snapshot {
public:
// Initialize the VM from the given snapshot file. If snapshot_file is
// NULL, use the internal snapshot instead. Returns false if no snapshot
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
+#include "isolate.h"
#include "memory.h"
#include "spaces.h"
// Page
Page* Page::next_page() {
- return MemoryAllocator::GetNextPage(this);
+ return heap_->isolate()->memory_allocator()->GetNextPage(this);
}
Address Page::AllocationTop() {
- PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
return owner->PageAllocationTop(this);
}
Address Page::AllocationWatermark() {
- PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
if (this == owner->AllocationTopPage()) {
return owner->top();
}
void Page::SetAllocationWatermark(Address allocation_watermark) {
- if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
+ if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
// When iterating intergenerational references during scavenge
// we might decide to promote an encountered young object.
// We will allocate a space for such an object and put it
}
-void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
- watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
+void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
+ heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
}
bool Page::IsWatermarkValid() {
- return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
+ return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
+ heap_->page_watermark_invalidated_mark_;
}
void Page::InvalidateWatermark(bool value) {
if (value) {
flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- watermark_invalidated_mark_;
+ heap_->page_watermark_invalidated_mark_;
} else {
- flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
+ flags_ =
+ (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ (heap_->page_watermark_invalidated_mark_ ^
+ (1 << WATERMARK_INVALIDATED));
}
ASSERT(IsWatermarkValid() == !value);
void Page::ClearGCFields() {
InvalidateWatermark(true);
SetAllocationWatermark(ObjectAreaStart());
- if (Heap::gc_state() == Heap::SCAVENGE) {
+ if (heap_->gc_state() == Heap::SCAVENGE) {
SetCachedAllocationWatermark(ObjectAreaStart());
}
SetRegionMarks(kAllRegionsCleanMarks);
size_ = s;
owner_ = o;
executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
+ owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
}
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
if (!p->is_valid()) return false;
- return MemoryAllocator::IsPageInSpace(p, this);
-}
-
-
-bool PagedSpace::SafeContains(Address addr) {
- if (!MemoryAllocator::SafeIsInAPageChunk(addr)) return false;
- Page* p = Page::FromAddress(addr);
- if (!p->is_valid()) return false;
- return MemoryAllocator::IsPageInSpace(p, this);
+ return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
}
void LargeObjectChunk::Free(Executability executable) {
- MemoryAllocator::FreeRawMemory(address(), size(), executable);
+ Isolate* isolate =
+ Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
+ isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
}
// -----------------------------------------------------------------------------
}
+intptr_t LargeObjectSpace::Available() {
+ return LargeObjectChunk::ObjectSizeFor(
+ heap()->isolate()->memory_allocator()->Available());
+}
+
+
template <typename StringType>
void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
ASSERT(length <= string->length());
bool FreeListNode::IsFreeListNode(HeapObject* object) {
- return object->map() == Heap::raw_unchecked_byte_array_map()
- || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
- || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
+ return object->map() == HEAP->raw_unchecked_byte_array_map()
+ || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
+ || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
}
} } // namespace v8::internal
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
-intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
-
// ----------------------------------------------------------------------------
// HeapObjectIterator
// -----------------------------------------------------------------------------
// CodeRange
-List<CodeRange::FreeBlock> CodeRange::free_list_(0);
-List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
-int CodeRange::current_allocation_block_index_ = 0;
-VirtualMemory* CodeRange::code_range_ = NULL;
+
+CodeRange::CodeRange()
+ : code_range_(NULL),
+ free_list_(0),
+ allocation_list_(0),
+ current_allocation_block_index_(0),
+ isolate_(NULL) {
+}
bool CodeRange::Setup(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(code_range_->size() == requested);
- LOG(NewEvent("CodeRange", code_range_->address(), requested));
+ LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
current_allocation_block_index_ = 0;
return true;
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-intptr_t MemoryAllocator::capacity_ = 0;
-intptr_t MemoryAllocator::capacity_executable_ = 0;
-intptr_t MemoryAllocator::size_ = 0;
-intptr_t MemoryAllocator::size_executable_ = 0;
-
-List<MemoryAllocator::MemoryAllocationCallbackRegistration>
- MemoryAllocator::memory_allocation_callbacks_;
-
-VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
// 270 is an estimate based on the static default heap size of a pair of 256K
// semispaces and a 64M old generation.
const int kEstimatedNumberOfChunks = 270;
-List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
- kEstimatedNumberOfChunks);
-List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
-int MemoryAllocator::max_nof_chunks_ = 0;
-int MemoryAllocator::top_ = 0;
+
+
+MemoryAllocator::MemoryAllocator()
+ : capacity_(0),
+ capacity_executable_(0),
+ size_(0),
+ size_executable_(0),
+ initial_chunk_(NULL),
+ chunks_(kEstimatedNumberOfChunks),
+ free_chunk_ids_(kEstimatedNumberOfChunks),
+ max_nof_chunks_(0),
+ top_(0),
+ isolate_(NULL) {
+}
void MemoryAllocator::Push(int free_chunk_id) {
}
-bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
- return InInitialChunk(addr) || InAllocatedChunks(addr);
-}
-
-
void MemoryAllocator::TearDown() {
for (int i = 0; i < max_nof_chunks_; i++) {
if (chunks_[i].address() != NULL) DeleteChunk(i);
free_chunk_ids_.Clear();
if (initial_chunk_ != NULL) {
- LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
+ LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
delete initial_chunk_;
initial_chunk_ = NULL;
}
- FreeChunkTables(&chunk_table_[0],
- kChunkTableTopLevelEntries,
- kChunkTableLevels);
-
ASSERT(top_ == max_nof_chunks_); // all chunks are free
top_ = 0;
capacity_ = 0;
}
-void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
- for (int i = 0; i < len; i++) {
- if (array[i] != kUnusedChunkTableEntry) {
- uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
- if (level > 1) {
- array[i] = kUnusedChunkTableEntry;
- FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
- } else {
- array[i] = kUnusedChunkTableEntry;
- }
- delete[] subarray;
- }
- }
-}
-
-
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable) {
// Check executable memory limit.
if (size_executable_ + requested >
static_cast<size_t>(capacity_executable_)) {
- LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
+ LOG(isolate_,
+ StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded"));
return NULL;
}
// Allocate executable memory either from code range or from the
// OS.
- if (CodeRange::exists()) {
- mem = CodeRange::AllocateRawMemory(requested, allocated);
+ if (isolate_->code_range()->exists()) {
+ mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
} else {
mem = OS::Allocate(requested, allocated, true);
}
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif
- Counters::memory_allocated.Increment(alloced);
+ COUNTERS->memory_allocated()->Increment(alloced);
return mem;
}
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length);
#endif
- if (CodeRange::contains(static_cast<Address>(mem))) {
- CodeRange::FreeRawMemory(mem, length);
+ if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
+ isolate_->code_range()->FreeRawMemory(mem, length);
} else {
OS::Free(mem, length);
}
- Counters::memory_allocated.Decrement(static_cast<int>(length));
+ COUNTERS->memory_allocated()->Decrement(static_cast<int>(length));
size_ -= static_cast<int>(length);
if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
// We are sure that we have mapped a block of requested addresses.
ASSERT(initial_chunk_->size() == requested);
- LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
+ LOG(isolate_,
+ NewEvent("InitialChunk", initial_chunk_->address(), requested));
size_ += static_cast<int>(requested);
return initial_chunk_->address();
}
void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
if (chunk == NULL) return Page::FromAddress(NULL);
- LOG(NewEvent("PagedChunk", chunk, chunk_size));
+ LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
// We may 'lose' a page due to alignment.
ASSERT(*allocated_pages >= kPagesPerChunk - 1);
if (*allocated_pages == 0) {
FreeRawMemory(chunk, chunk_size, owner->executable());
- LOG(DeleteEvent("PagedChunk", chunk));
+ LOG(isolate_, DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
- AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
-
return new_pages;
}
#ifdef DEBUG
ZapBlock(start, size);
#endif
- Counters::memory_allocated.Increment(static_cast<int>(size));
+ COUNTERS->memory_allocated()->Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
// run out of chunk ids.
#ifdef DEBUG
ZapBlock(start, size);
#endif
- Counters::memory_allocated.Increment(static_cast<int>(size));
+ COUNTERS->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Uncommit(start, size)) return false;
- Counters::memory_allocated.Decrement(static_cast<int>(size));
+ COUNTERS->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
Address page_addr = low;
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
+ p->heap_ = owner->heap();
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
p->InvalidateWatermark(true);
p->SetIsLargeObjectPage(false);
// TODO(1240712): VirtualMemory::Uncommit has a return value which
// is ignored here.
initial_chunk_->Uncommit(c.address(), c.size());
- Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
+ COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size()));
} else {
- RemoveFromAllocatedChunks(c.address(), c.size());
- LOG(DeleteEvent("PagedChunk", c.address()));
- ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
+ LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
+ ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
size_t size = c.size();
FreeRawMemory(c.address(), size, c.executable());
PerformAllocationCallback(space, kAllocationActionFree, size);
}
-void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
- ASSERT(size == kChunkSize);
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
- AddChunkUsingAddress(int_address, int_address);
- AddChunkUsingAddress(int_address, int_address + size - 1);
-}
-
-
-void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
- uintptr_t chunk_index_base) {
- uintptr_t* fine_grained = AllocatedChunksFinder(
- chunk_table_,
- chunk_index_base,
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
- kCreateTablesAsNeeded);
- int index = FineGrainedIndexForAddress(chunk_index_base);
- if (fine_grained[index] != kUnusedChunkTableEntry) index++;
- ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
- fine_grained[index] = chunk_start;
-}
-
-
-void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
- ASSERT(size == kChunkSize);
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
- RemoveChunkFoundUsingAddress(int_address, int_address);
- RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
-}
-
-
-void MemoryAllocator::RemoveChunkFoundUsingAddress(
- uintptr_t chunk_start,
- uintptr_t chunk_index_base) {
- uintptr_t* fine_grained = AllocatedChunksFinder(
- chunk_table_,
- chunk_index_base,
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
- kDontCreateTables);
- // Can't remove an entry that's not there.
- ASSERT(fine_grained != kUnusedChunkTableEntry);
- int index = FineGrainedIndexForAddress(chunk_index_base);
- ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
- if (fine_grained[index] != chunk_start) {
- index++;
- ASSERT(fine_grained[index] == chunk_start);
- fine_grained[index] = kUnusedChunkTableEntry;
- } else {
- // If only one of the entries is used it must be the first, since
- // InAllocatedChunks relies on that. Move things around so that this is
- // the case.
- fine_grained[index] = fine_grained[index + 1];
- fine_grained[index + 1] = kUnusedChunkTableEntry;
- }
-}
-
-
-bool MemoryAllocator::InAllocatedChunks(Address addr) {
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
- uintptr_t* fine_grained = AllocatedChunksFinder(
- chunk_table_,
- int_address,
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
- kDontCreateTables);
- if (fine_grained == NULL) return false;
- int index = FineGrainedIndexForAddress(int_address);
- if (fine_grained[index] == kUnusedChunkTableEntry) return false;
- uintptr_t entry = fine_grained[index];
- if (entry <= int_address && entry + kChunkSize > int_address) return true;
- index++;
- if (fine_grained[index] == kUnusedChunkTableEntry) return false;
- entry = fine_grained[index];
- if (entry <= int_address && entry + kChunkSize > int_address) return true;
- return false;
-}
-
-
-uintptr_t* MemoryAllocator::AllocatedChunksFinder(
- uintptr_t* table,
- uintptr_t address,
- int bit_position,
- CreateTables create_as_needed) {
- if (bit_position == kChunkSizeLog2) {
- return table;
- }
- ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
- int index =
- ((address >> bit_position) &
- ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
- uintptr_t more_fine_grained_address =
- address & ((V8_INTPTR_C(1) << bit_position) - 1);
- ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
- (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
- uintptr_t* more_fine_grained_table =
- reinterpret_cast<uintptr_t*>(table[index]);
- if (more_fine_grained_table == kUnusedChunkTableEntry) {
- if (create_as_needed == kDontCreateTables) return NULL;
- int words_needed = 1 << kChunkTableBitsPerLevel;
- if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
- words_needed =
- (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
- }
- more_fine_grained_table = new uintptr_t[words_needed];
- for (int i = 0; i < words_needed; i++) {
- more_fine_grained_table[i] = kUnusedChunkTableEntry;
- }
- table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
- }
- return AllocatedChunksFinder(
- more_fine_grained_table,
- more_fine_grained_address,
- bit_position - kChunkTableBitsPerLevel,
- create_as_needed);
-}
-
-
-uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
-
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(intptr_t max_capacity,
+PagedSpace::PagedSpace(Heap* heap,
+ intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : Space(id, executable) {
+ : Space(heap, id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
// contain at least one page, ignore it and allocate instead.
int pages_in_chunk = PagesInChunk(start, size);
if (pages_in_chunk > 0) {
- first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
- Page::kPageSize * pages_in_chunk,
- this, &num_pages);
+ first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
+ RoundUp(start, Page::kPageSize),
+ Page::kPageSize * pages_in_chunk,
+ this, &num_pages);
} else {
int requested_pages =
Min(MemoryAllocator::kPagesPerChunk,
static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
first_page_ =
- MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
+ Isolate::Current()->memory_allocator()->AllocatePages(
+ requested_pages, &num_pages, this);
if (!first_page_->is_valid()) return false;
}
void PagedSpace::TearDown() {
- MemoryAllocator::FreeAllPages(this);
+ Isolate::Current()->memory_allocator()->FreeAllPages(this);
first_page_ = NULL;
accounting_stats_.Clear();
}
void PagedSpace::Protect() {
Page* page = first_page_;
while (page->is_valid()) {
- MemoryAllocator::ProtectChunkFromPage(page);
- page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+ Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
+ page = Isolate::Current()->memory_allocator()->
+ FindLastPageInSameChunk(page)->next_page();
}
}
void PagedSpace::Unprotect() {
Page* page = first_page_;
while (page->is_valid()) {
- MemoryAllocator::UnprotectChunkFromPage(page);
- page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+ Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
+ page = Isolate::Current()->memory_allocator()->
+ FindLastPageInSameChunk(page)->next_page();
}
}
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called before or after mark-compact GC
// because it accesses map pointers.
- ASSERT(!MarkCompactCollector::in_use());
+ ASSERT(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Failure::Exception();
if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
- Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
+ desired_pages, &desired_pages, this);
if (!p->is_valid()) return false;
accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- MemoryAllocator::SetNextPage(last_page, p);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
// Sequentially clear region marks of new pages and and cache the
// new last page in the space.
}
// Free pages after top_page.
- Page* p = MemoryAllocator::FreePages(top_page->next_page());
- MemoryAllocator::SetNextPage(top_page, p);
+ Page* p = heap()->isolate()->memory_allocator()->
+ FreePages(top_page->next_page());
+ heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
// Find out how many pages we failed to free and update last_page_.
// Please note pages can only be freed in whole chunks.
Page* last_page = AllocationTopPage();
Page* next_page = last_page->next_page();
while (next_page->is_valid()) {
- last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
+ last_page = heap()->isolate()->memory_allocator()->
+ FindLastPageInSameChunk(next_page);
next_page = last_page->next_page();
}
if (!Expand(last_page)) return false;
ASSERT(last_page->next_page()->is_valid());
last_page =
- MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
+ heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
+ last_page->next_page());
} while (Capacity() < capacity);
return true;
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
+ ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
- int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
+ int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
+ int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
#undef SET_NAME
#endif
- ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
+ ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0));
if (!to_space_.Setup(start,
#ifdef ENABLE_HEAP_PROTECTION
void NewSpace::Protect() {
- MemoryAllocator::Protect(ToSpaceLow(), Capacity());
- MemoryAllocator::Protect(FromSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
}
void NewSpace::Unprotect() {
- MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
- to_space_.executable());
- MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
- from_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
+ to_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
+ from_space_.executable());
}
#endif
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// The object should not be code or a map.
ASSERT(!object->IsMap());
bool SemiSpace::Commit() {
ASSERT(!is_committed());
- if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ start_, capacity_, executable())) {
return false;
}
committed_ = true;
bool SemiSpace::Uncommit() {
ASSERT(is_committed());
- if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ start_, capacity_)) {
return false;
}
committed_ = false;
int maximum_extra = maximum_capacity_ - capacity_;
int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
- if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ high(), extra, executable())) {
return false;
}
capacity_ += extra;
ASSERT(new_capacity > capacity_);
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ high(), delta, executable())) {
return false;
}
capacity_ = new_capacity;
ASSERT(new_capacity < capacity_);
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ high() - delta, delta)) {
return false;
}
capacity_ = new_capacity;
#ifdef DEBUG
-// A static array of histogram info for each type.
-static HistogramInfo heap_histograms[LAST_TYPE+1];
-static JSObject::SpillInformation js_spill_information;
-
// heap_histograms is shared, always clear it before using it.
static void ClearHistograms() {
+ Isolate* isolate = Isolate::Current();
// We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
+#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
#undef DEF_TYPE_NAME
-#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
+#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
#undef CLEAR_HISTOGRAM
- js_spill_information.Clear();
+ isolate->js_spill_information()->Clear();
}
-static int code_kind_statistics[Code::NUMBER_OF_KINDS];
-
-
static void ClearCodeKindStatistics() {
+ Isolate* isolate = Isolate::Current();
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- code_kind_statistics[i] = 0;
+ isolate->code_kind_statistics()[i] = 0;
}
}
static void ReportCodeKindStatistics() {
+ Isolate* isolate = Isolate::Current();
const char* table[Code::NUMBER_OF_KINDS] = { NULL };
#define CASE(name) \
PrintF("\n Code kind histograms: \n");
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (code_kind_statistics[i] > 0) {
- PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
+ if (isolate->code_kind_statistics()[i] > 0) {
+ PrintF(" %-20s: %10d bytes\n", table[i],
+ isolate->code_kind_statistics()[i]);
}
}
PrintF("\n");
static int CollectHistogramInfo(HeapObject* obj) {
+ Isolate* isolate = Isolate::Current();
InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE);
- ASSERT(heap_histograms[type].name() != NULL);
- heap_histograms[type].increment_number(1);
- heap_histograms[type].increment_bytes(obj->Size());
+ ASSERT(isolate->heap_histograms()[type].name() != NULL);
+ isolate->heap_histograms()[type].increment_number(1);
+ isolate->heap_histograms()[type].increment_bytes(obj->Size());
if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
+ JSObject::cast(obj)->IncrementSpillStatistics(
+ isolate->js_spill_information());
}
return obj->Size();
static void ReportHistogram(bool print_spill) {
+ Isolate* isolate = Isolate::Current();
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
- if (heap_histograms[i].number() > 0) {
+ if (isolate->heap_histograms()[i].number() > 0) {
PrintF(" %-34s%10d (%10d bytes)\n",
- heap_histograms[i].name(),
- heap_histograms[i].number(),
- heap_histograms[i].bytes());
+ isolate->heap_histograms()[i].name(),
+ isolate->heap_histograms()[i].number(),
+ isolate->heap_histograms()[i].bytes());
}
}
PrintF("\n");
int string_number = 0;
int string_bytes = 0;
#define INCREMENT(type, size, name, camel_name) \
- string_number += heap_histograms[type].number(); \
- string_bytes += heap_histograms[type].bytes();
+ string_number += isolate->heap_histograms()[type].number(); \
+ string_bytes += isolate->heap_histograms()[type].bytes();
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
}
if (FLAG_collect_heap_spill_statistics && print_spill) {
- js_spill_information.Print();
+ isolate->js_spill_information()->Print();
}
}
#endif // DEBUG
#ifdef ENABLE_LOGGING_AND_PROFILING
-static void DoReportStatistics(HistogramInfo* info, const char* description) {
- LOG(HeapSampleBeginEvent("NewSpace", description));
+static void DoReportStatistics(Isolate* isolate,
+ HistogramInfo* info, const char* description) {
+ LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
// Lump all the string types together.
int string_number = 0;
int string_bytes = 0;
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+ LOG(isolate,
+ HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
}
// Then do the other types.
for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
if (info[i].number() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ LOG(isolate,
+ HeapSampleItemEvent(info[i].name(), info[i].number(),
info[i].bytes()));
}
}
- LOG(HeapSampleEndEvent("NewSpace", description));
+ LOG(isolate, HeapSampleEndEvent("NewSpace", description));
}
#endif // ENABLE_LOGGING_AND_PROFILING
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) {
- DoReportStatistics(allocated_histogram_, "allocated");
- DoReportStatistics(promoted_histogram_, "promoted");
+ Isolate* isolate = ISOLATE;
+ DoReportStatistics(isolate, allocated_histogram_, "allocated");
+ DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > ByteArray::kHeaderSize) {
- set_map(Heap::raw_unchecked_byte_array_map());
+ set_map(HEAP->raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
- set_map(Heap::raw_unchecked_one_pointer_filler_map());
+ set_map(HEAP->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map(Heap::raw_unchecked_two_pointer_filler_map());
+ set_map(HEAP->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
Address FreeListNode::next() {
ASSERT(IsFreeListNode(this));
- if (map() == Heap::raw_unchecked_byte_array_map()) {
+ if (map() == HEAP->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
} else {
void FreeListNode::set_next(Address next) {
ASSERT(IsFreeListNode(this));
- if (map() == Heap::raw_unchecked_byte_array_map()) {
+ if (map() == HEAP->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
} else {
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
#ifdef DEBUG
- MemoryAllocator::ZapBlock(start, size_in_bytes);
+ Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
#endif
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(size_in_bytes);
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
- MemoryAllocator::ZapBlock(start, object_size_);
+ Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
#endif
// We only use the freelists with mark-sweep.
- ASSERT(!MarkCompactCollector::IsCompacting());
+ ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
node->set_next(NULL);
first_page_ = last->next_page();
} else {
first = prev->next_page();
- MemoryAllocator::SetNextPage(prev, last->next_page());
+ heap()->isolate()->memory_allocator()->SetNextPage(
+ prev, last->next_page());
}
// Attach it after the last page.
- MemoryAllocator::SetNextPage(last_page_, first);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
last_page_ = last;
- MemoryAllocator::SetNextPage(last, NULL);
+ heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
// Clean them up.
do {
if (page_list_is_chunk_ordered_) return;
Page* new_last_in_use = Page::FromAddress(NULL);
- MemoryAllocator::RelinkPageListInChunkOrder(this,
- &first_page_,
- &last_page_,
- &new_last_in_use);
+ heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
+ this, &first_page_, &last_page_, &new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- Heap::CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- Heap::CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
}
int bytes_left_to_reserve = bytes;
while (bytes_left_to_reserve > 0) {
if (!reserved_page->next_page()->is_valid()) {
- if (Heap::OldGenerationAllocationLimitReached()) return false;
+ if (heap()->OldGenerationAllocationLimitReached()) return false;
Expand(reserved_page);
}
bytes_left_to_reserve -= Page::kPageSize;
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
- return Heap::OldGenerationSpaceAvailable() >= bytes;
+ return heap()->OldGenerationSpaceAvailable() >= bytes;
}
// There is no next page in this space. Try free list allocation unless that
// is currently forbidden.
- if (!Heap::linear_allocation()) {
+ if (!heap()->linear_allocation()) {
int wasted_bytes;
Object* result;
MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
#ifdef DEBUG
-struct CommentStatistic {
- const char* comment;
- int size;
- int count;
- void Clear() {
- comment = NULL;
- size = 0;
- count = 0;
- }
-};
-
-
-// must be small, since an iteration is used for lookup
-const int kMaxComments = 64;
-static CommentStatistic comments_statistics[kMaxComments+1];
-
-
void PagedSpace::ReportCodeStatistics() {
+ Isolate* isolate = Isolate::Current();
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
ReportCodeKindStatistics();
PrintF("Code comment statistics (\" [ comment-txt : size/ "
"count (average)\"):\n");
- for (int i = 0; i <= kMaxComments; i++) {
+ for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
const CommentStatistic& cs = comments_statistics[i];
if (cs.size > 0) {
PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
void PagedSpace::ResetCodeStatistics() {
+ Isolate* isolate = Isolate::Current();
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
ClearCodeKindStatistics();
- for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
- comments_statistics[kMaxComments].comment = "Unknown";
- comments_statistics[kMaxComments].size = 0;
- comments_statistics[kMaxComments].count = 0;
+ for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+ comments_statistics[i].Clear();
+ }
+ comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
+ comments_statistics[CommentStatistic::kMaxComments].size = 0;
+ comments_statistics[CommentStatistic::kMaxComments].count = 0;
}
-// Adds comment to 'comment_statistics' table. Performance OK sa long as
+// Adds comment to 'comment_statistics' table. Performance OK as long as
// 'kMaxComments' is small
-static void EnterComment(const char* comment, int delta) {
+static void EnterComment(Isolate* isolate, const char* comment, int delta) {
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
// Do not count empty comments
if (delta <= 0) return;
- CommentStatistic* cs = &comments_statistics[kMaxComments];
+ CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
// Search for a free or matching entry in 'comments_statistics': 'cs'
// points to result.
- for (int i = 0; i < kMaxComments; i++) {
+ for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
if (comments_statistics[i].comment == NULL) {
cs = &comments_statistics[i];
cs->comment = comment;
// Call for each nested comment start (start marked with '[ xxx', end marked
// with ']'. RelocIterator 'it' must point to a comment reloc info.
-static void CollectCommentStatistics(RelocIterator* it) {
+static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
ASSERT(!it->done());
ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
if (txt[0] == ']') break; // End of nested comment
// A new comment
- CollectCommentStatistics(it);
+ CollectCommentStatistics(isolate, it);
// Skip code that was covered with previous comment
prev_pc = it->rinfo()->pc();
}
it->next();
}
- EnterComment(comment_txt, flat_delta);
+ EnterComment(isolate, comment_txt, flat_delta);
}
// - by code kind
// - by code comment
void PagedSpace::CollectCodeStatistics() {
+ Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
- code_kind_statistics[code->kind()] += code->Size();
+ isolate->code_kind_statistics()[code->kind()] += code->Size();
RelocIterator it(code);
int delta = 0;
const byte* prev_pc = code->instruction_start();
while (!it.done()) {
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
- CollectCommentStatistics(&it);
+ CollectCommentStatistics(isolate, &it);
prev_pc = it.rinfo()->pc();
}
it.next();
ASSERT(code->instruction_start() <= prev_pc &&
prev_pc <= code->instruction_end());
delta += static_cast<int>(code->instruction_end() - prev_pc);
- EnterComment("NoComment", delta);
+ EnterComment(isolate, "NoComment", delta);
}
}
}
// There is no next page in this space. Try free list allocation unless
// that is currently forbidden. The fixed space free list implicitly assumes
// that all free blocks are of the fixed size.
- if (!Heap::linear_allocation()) {
+ if (!heap()->linear_allocation()) {
Object* result;
MaybeObject* maybe = free_list_.Allocate();
if (maybe->ToObject(&result)) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
ASSERT(object->IsJSGlobalPropertyCell() ||
- object->map() == Heap::two_pointer_filler_map());
+ object->map() == heap()->two_pointer_filler_map());
}
#endif
Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
size_t size;
- void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
+ Isolate* isolate = Isolate::Current();
+ void* mem = isolate->memory_allocator()->AllocateRawMemory(
+ requested, &size, executable);
if (mem == NULL) return NULL;
// The start of the chunk may be overlayed with a page so we have to
// make sure that the page flags fit in the size field.
ASSERT((size & Page::kPageFlagMask) == 0);
- LOG(NewEvent("LargeObjectChunk", mem, size));
+ LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
if (size < requested) {
- MemoryAllocator::FreeRawMemory(mem, size, executable);
- LOG(DeleteEvent("LargeObjectChunk", mem));
+ isolate->memory_allocator()->FreeRawMemory(
+ mem, size, executable);
+ LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
ObjectSpace space = (executable == EXECUTABLE)
? kObjectSpaceCodeSpace
: kObjectSpaceLoSpace;
- MemoryAllocator::PerformAllocationCallback(
+ isolate->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionAllocate, size);
LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
chunk->size_ = size;
+ Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+ page->heap_ = Isolate::Current()->heap();
return chunk;
}
// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
- : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+ : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
first_chunk_(NULL),
size_(0),
page_count_(0),
while (first_chunk_ != NULL) {
LargeObjectChunk* chunk = first_chunk_;
first_chunk_ = first_chunk_->next();
- LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
+ LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Executability executable =
page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
- MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
- MemoryAllocator::PerformAllocationCallback(
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
+ size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
void LargeObjectSpace::Protect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
- MemoryAllocator::Protect(chunk->address(), chunk->size());
+ heap()->isolate()->memory_allocator()->Protect(chunk->address(),
+ chunk->size());
chunk = chunk->next();
}
}
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
bool is_code = chunk->GetObject()->IsCode();
- MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
- is_code ? EXECUTABLE : NOT_EXECUTABLE);
+ heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
+ chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
chunk = chunk->next();
}
}
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(identity());
}
// Iterate regions of the first normal page covering object.
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
newmarks |=
- Heap::IterateDirtyRegions(marks >> first_region_number,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object) << first_region_number;
+ heap()->IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object) << first_region_number;
start = end;
end = start + Page::kPageSize;
while (end <= object_end) {
// Iterate next 32 regions.
newmarks |=
- Heap::IterateDirtyRegions(marks,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
start = end;
end = start + Page::kPageSize;
}
// Iterate the last piece of an object which is less than
// Page::kPageSize.
newmarks |=
- Heap::IterateDirtyRegions(marks,
- start,
- object_end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ object_end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
}
page->SetRegionMarks(newmarks);
HeapObject* object = current->GetObject();
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap()->mark_compact_collector()->tracer()->decrement_marked_count();
previous = current;
current = current->next();
} else {
}
// Free the chunk.
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ heap()->mark_compact_collector()->ReportDeleteIfNeeded(object);
LiveObjectList::ProcessNonLive(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
- MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
- size_);
- LOG(DeleteEvent("LargeObjectChunk", chunk_address));
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
+ chunk_size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+ space, kAllocationActionFree, size_);
+ LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
}
}
}
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
- if (Heap::new_space()->Contains(address)) {
+ if (heap()->new_space()->Contains(address)) {
return false;
}
Page* page = Page::FromAddress(address);
// in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(Heap::Contains(element_object));
+ ASSERT(heap()->Contains(element_object));
ASSERT(element_object->map()->IsMap());
- if (Heap::InNewSpace(element_object)) {
+ if (heap()->InNewSpace(element_object)) {
Address array_addr = object->address();
Address element_addr = array_addr + FixedArray::kHeaderSize +
j * kPointerSize;
void LargeObjectSpace::CollectCodeStatistics() {
+ Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
- code_kind_statistics[code->kind()] += code->Size();
+ isolate->code_kind_statistics()[code->kind()] += code->Size();
}
}
}
namespace v8 {
namespace internal {
+class Isolate;
+
// -----------------------------------------------------------------------------
// Heap structures:
//
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
- kIntSize + kPointerSize;
+ kIntSize + kPointerSize + kPointerSize;
// The start offset of the object area in a page. Aligned to both maps and
// code alignment to be suitable for both.
// This invariant guarantees that after flipping flag meaning at the
// beginning of scavenge all pages in use will be marked as having valid
// watermark.
- static inline void FlipMeaningOfInvalidatedWatermarkFlag();
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
// Returns true if the page allocation watermark was not altered during
// scavenge.
STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
kAllocationWatermarkOffsetBits);
- // This field contains the meaning of the WATERMARK_INVALIDATED flag.
- // Instead of clearing this flag from all pages we just flip
- // its meaning at the beginning of a scavenge.
- static intptr_t watermark_invalidated_mark_;
-
//---------------------------------------------------------------------------
// Page header description.
//
// During scavenge collection this field is used to store allocation watermark
// if it is altered during scavenge.
Address mc_first_forwarded;
+
+ Heap* heap_;
};
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
- Space(AllocationSpace id, Executability executable)
- : id_(id), executable_(executable) {}
+ Space(Heap* heap, AllocationSpace id, Executability executable)
+ : heap_(heap), id_(id), executable_(executable) {}
virtual ~Space() {}
+ Heap* heap() const { return heap_; }
+
// Does the space need executable memory?
Executability executable() { return executable_; }
virtual bool ReserveSpace(int bytes) = 0;
private:
+ Heap* heap_;
AllocationSpace id_;
Executability executable_;
};
// displacements cover the entire 4GB virtual address space. On 64-bit
// platforms, we support this using the CodeRange object, which reserves and
// manages a range of virtual memory.
-class CodeRange : public AllStatic {
+class CodeRange {
public:
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
// Returns false on failure.
- static bool Setup(const size_t requested_size);
+ bool Setup(const size_t requested_size);
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
- static void TearDown();
+ void TearDown();
- static bool exists() { return code_range_ != NULL; }
- static bool contains(Address address) {
+ bool exists() { return code_range_ != NULL; }
+ bool contains(Address address) {
if (code_range_ == NULL) return false;
Address start = static_cast<Address>(code_range_->address());
return start <= address && address < start + code_range_->size();
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
- size_t* allocated);
- static void FreeRawMemory(void* buf, size_t length);
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated);
+ void FreeRawMemory(void* buf, size_t length);
private:
+ CodeRange();
+
// The reserved range of virtual memory that all code objects are put in.
- static VirtualMemory* code_range_;
+ VirtualMemory* code_range_;
// Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
- static List<FreeBlock> free_list_;
+ List<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
- static List<FreeBlock> allocation_list_;
- static int current_allocation_block_index_;
+ List<FreeBlock> allocation_list_;
+ int current_allocation_block_index_;
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, terminates V8 with FatalProcessOutOfMemory.
- static void GetNextAllocationBlock(size_t requested);
+ void GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
+
+ friend class Isolate;
+
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
//
-class MemoryAllocator : public AllStatic {
+class MemoryAllocator {
public:
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- static bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
+ bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
// Deletes valid chunks.
- static void TearDown();
+ void TearDown();
// Reserves an initial address range of virtual memory to be split between
// the two new space semispaces, the old space, and the map space. The
// address of the initial chunk if successful, with the side effect of
// setting the initial chunk, or else NULL if unsuccessful and leaves the
// initial chunk NULL.
- static void* ReserveInitialChunk(const size_t requested);
+ void* ReserveInitialChunk(const size_t requested);
// Commits pages from an as-yet-unmanaged block of virtual memory into a
// paged space. The block should be part of the initial chunk reserved via
// address is non-null and that it is big enough to hold at least one
// page-aligned page. The call always succeeds, and num_pages is always
// greater than zero.
- static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
- int* num_pages);
+ Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+ int* num_pages);
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- static bool CommitBlock(Address start, size_t size, Executability executable);
+ bool CommitBlock(Address start, size_t size, Executability executable);
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- static bool UncommitBlock(Address start, size_t size);
+ bool UncommitBlock(Address start, size_t size);
// Zaps a contiguous block of memory [start..(start+size)[ thus
// filling it up with a recognizable non-NULL bit pattern.
- static void ZapBlock(Address start, size_t size);
+ void ZapBlock(Address start, size_t size);
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// number of allocated pages is returned in the output parameter
// allocated_pages. If the PagedSpace owner is executable and there is
// a code range, the pages are allocated from the code range.
- static Page* AllocatePages(int requested_pages, int* allocated_pages,
- PagedSpace* owner);
+ Page* AllocatePages(int requested_pages, int* allocated_pages,
+ PagedSpace* owner);
// Frees pages from a given page and after. Requires pages to be
// linked in chunk-order (see comment for class).
// Otherwise, the function searches a page after 'p' that is
// the first page of a chunk. Pages after the found page
// are freed and the function returns 'p'.
- static Page* FreePages(Page* p);
+ Page* FreePages(Page* p);
// Frees all pages owned by given space.
- static void FreeAllPages(PagedSpace* space);
+ void FreeAllPages(PagedSpace* space);
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// If the flag is EXECUTABLE and a code range exists, the requested
// memory is allocated from the code range. If a code range exists
// and the freed memory is in it, the code range manages the freed memory.
- MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
- size_t* allocated,
- Executability executable);
- static void FreeRawMemory(void* buf,
- size_t length,
- Executability executable);
- static void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
-
- static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
- static void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
- static bool MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback);
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated,
+ Executability executable);
+ void FreeRawMemory(void* buf,
+ size_t length,
+ Executability executable);
+ void PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size);
+
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+ bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
- static intptr_t Available() {
- return capacity_ < size_ ? 0 : capacity_ - size_;
- }
+ intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
// Returns allocated spaces in bytes.
- static intptr_t Size() { return size_; }
+ intptr_t Size() { return size_; }
// Returns the maximum available executable bytes of heaps.
- static intptr_t AvailableExecutable() {
+ intptr_t AvailableExecutable() {
if (capacity_executable_ < size_executable_) return 0;
return capacity_executable_ - size_executable_;
}
// Returns allocated executable spaces in bytes.
- static intptr_t SizeExecutable() { return size_executable_; }
+ intptr_t SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have.
- static intptr_t MaxAvailable() {
+ intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
- // Sanity check on a pointer.
- static bool SafeIsInAPageChunk(Address addr);
-
// Links two pages.
- static inline void SetNextPage(Page* prev, Page* next);
+ inline void SetNextPage(Page* prev, Page* next);
// Returns the next page of a given page.
- static inline Page* GetNextPage(Page* p);
+ inline Page* GetNextPage(Page* p);
// Checks whether a page belongs to a space.
- static inline bool IsPageInSpace(Page* p, PagedSpace* space);
+ inline bool IsPageInSpace(Page* p, PagedSpace* space);
// Returns the space that owns the given page.
- static inline PagedSpace* PageOwner(Page* page);
+ inline PagedSpace* PageOwner(Page* page);
// Finds the first/last page in the same chunk as a given page.
- static Page* FindFirstPageInSameChunk(Page* p);
- static Page* FindLastPageInSameChunk(Page* p);
+ Page* FindFirstPageInSameChunk(Page* p);
+ Page* FindLastPageInSameChunk(Page* p);
// Relinks list of pages owned by space to make it chunk-ordered.
// Returns new first and last pages of space.
// Also returns last page in relinked list which has WasInUsedBeforeMC
// flag set.
- static void RelinkPageListInChunkOrder(PagedSpace* space,
- Page** first_page,
- Page** last_page,
- Page** last_page_in_use);
+ void RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
- static inline void Protect(Address start, size_t size);
- static inline void Unprotect(Address start, size_t size,
- Executability executable);
+ inline void Protect(Address start, size_t size);
+ inline void Unprotect(Address start, size_t size,
+ Executability executable);
// Protect/unprotect a chunk given a page in the chunk.
- static inline void ProtectChunkFromPage(Page* page);
- static inline void UnprotectChunkFromPage(Page* page);
+ inline void ProtectChunkFromPage(Page* page);
+ inline void UnprotectChunkFromPage(Page* page);
#endif
#ifdef DEBUG
// Reports statistic info of the space.
- static void ReportStatistics();
+ void ReportStatistics();
#endif
- static void AddToAllocatedChunks(Address addr, intptr_t size);
- static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
- // Note: This only checks the regular chunks, not the odd-sized initial
- // chunk.
- static bool InAllocatedChunks(Address addr);
-
// Due to encoding limitation, we can only have 8K chunks.
static const int kMaxNofChunks = 1 << kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about
#endif
private:
+ MemoryAllocator();
+
static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
- static const int kChunkTableTopLevelEntries =
- 1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
- (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
-
- // The chunks are not chunk-size aligned so for a given chunk-sized area of
- // memory there can be two chunks that cover it.
- static const int kChunkTableFineGrainedWordsPerEntry = 2;
- static const uintptr_t kUnusedChunkTableEntry = 0;
// Maximum space size in bytes.
- static intptr_t capacity_;
+ intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
- static intptr_t capacity_executable_;
-
- // Top level table to track whether memory is part of a chunk or not.
- static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
+ intptr_t capacity_executable_;
// Allocated space size in bytes.
- static intptr_t size_;
+ intptr_t size_;
+
// Allocated executable space size in bytes.
- static intptr_t size_executable_;
+ intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
AllocationAction action;
};
// A List of callback that are triggered when memory is allocated or free'd
- static List<MemoryAllocationCallbackRegistration>
+ List<MemoryAllocationCallbackRegistration>
memory_allocation_callbacks_;
// The initial chunk of virtual memory.
- static VirtualMemory* initial_chunk_;
+ VirtualMemory* initial_chunk_;
// Allocated chunk info: chunk start address, chunk size, and owning space.
class ChunkInfo BASE_EMBEDDED {
ChunkInfo() : address_(NULL),
size_(0),
owner_(NULL),
- executable_(NOT_EXECUTABLE) {}
+ executable_(NOT_EXECUTABLE),
+ owner_identity_(FIRST_SPACE) {}
inline void init(Address a, size_t s, PagedSpace* o);
Address address() { return address_; }
size_t size() { return size_; }
// We save executability of the owner to allow using it
// when collecting stats after the owner has been destroyed.
Executability executable() const { return executable_; }
+ AllocationSpace owner_identity() const { return owner_identity_; }
private:
Address address_;
size_t size_;
PagedSpace* owner_;
Executability executable_;
+ AllocationSpace owner_identity_;
};
// Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
- static List<ChunkInfo> chunks_;
- static List<int> free_chunk_ids_;
- static int max_nof_chunks_;
- static int top_;
+ List<ChunkInfo> chunks_;
+ List<int> free_chunk_ids_;
+ int max_nof_chunks_;
+ int top_;
// Push/pop a free chunk id onto/from the stack.
- static void Push(int free_chunk_id);
- static int Pop();
- static bool OutOfChunkIds() { return top_ == 0; }
+ void Push(int free_chunk_id);
+ int Pop();
+ bool OutOfChunkIds() { return top_ == 0; }
// Frees a chunk.
- static void DeleteChunk(int chunk_id);
-
- // Helpers to maintain and query the chunk tables.
- static void AddChunkUsingAddress(
- uintptr_t chunk_start, // Where the chunk starts.
- uintptr_t chunk_index_base); // Used to place the chunk in the tables.
- static void RemoveChunkFoundUsingAddress(
- uintptr_t chunk_start, // Where the chunk starts.
- uintptr_t chunk_index_base); // Used to locate the entry in the tables.
- // Controls whether the lookup creates intermediate levels of tables as
- // needed.
- enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
- static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
- uintptr_t address,
- int bit_position,
- CreateTables create_as_needed);
- static void FreeChunkTables(uintptr_t* array, int length, int level);
- static int FineGrainedIndexForAddress(uintptr_t address) {
- int index = ((address >> kChunkSizeLog2) &
- ((1 << kChunkTableBitsPerLevel) - 1));
- return index * kChunkTableFineGrainedWordsPerEntry;
- }
-
+ void DeleteChunk(int chunk_id);
// Basic check whether a chunk id is in the valid range.
- static inline bool IsValidChunkId(int chunk_id);
+ inline bool IsValidChunkId(int chunk_id);
// Checks whether a chunk id identifies an allocated chunk.
- static inline bool IsValidChunk(int chunk_id);
+ inline bool IsValidChunk(int chunk_id);
// Returns the chunk id that a page belongs to.
- static inline int GetChunkId(Page* p);
+ inline int GetChunkId(Page* p);
// True if the address lies in the initial chunk.
- static inline bool InInitialChunk(Address address);
+ inline bool InInitialChunk(Address address);
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
// used as a marking stack and its page headers are destroyed.
- static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
+ Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner);
- static Page* RelinkPagesInChunk(int chunk_id,
- Address chunk_start,
- size_t chunk_size,
- Page* prev,
- Page** last_page_in_use);
+ Page* RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ size_t chunk_size,
+ Page* prev,
+ Page** last_page_in_use);
+
+ friend class Isolate;
+
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
};
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(intptr_t max_capacity,
+ PagedSpace(Heap* heap,
+ intptr_t max_capacity,
AllocationSpace id,
Executability executable);
class SemiSpace : public Space {
public:
// Constructor.
- SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
+ explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
start_ = NULL;
age_mark_ = NULL;
}
class NewSpace : public Space {
public:
// Constructor.
- NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
+ explicit NewSpace(Heap* heap)
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ to_space_(heap),
+ from_space_(heap) {}
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : PagedSpace(max_capacity, id, executable), free_list_(id) {
+ OldSpace(Heap* heap,
+ intptr_t max_capacity,
+ AllocationSpace id,
+ Executability executable)
+ : PagedSpace(heap, max_capacity, id, executable), free_list_(id) {
page_extra_ = 0;
}
class FixedSpace : public PagedSpace {
public:
- FixedSpace(intptr_t max_capacity,
+ FixedSpace(Heap* heap,
+ intptr_t max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
- : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
free_list_(id, object_size_in_bytes) {
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
- : FixedSpace(max_capacity, id, Map::kSize, "map"),
+ MapSpace(Heap* heap,
+ intptr_t max_capacity,
+ int max_map_space_pages,
+ AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
ASSERT(max_map_space_pages < kMaxMapPageIndex);
}
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
- CellSpace(intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
+ CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
+ {}
protected:
#ifdef DEBUG
class LargeObjectSpace : public Space {
public:
- explicit LargeObjectSpace(AllocationSpace id);
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
virtual ~LargeObjectSpace() {}
// Initializes internal data structures.
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space.
- intptr_t Available() {
- return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
- }
+ inline intptr_t Available();
virtual intptr_t Size() {
return size_;
};
+#ifdef DEBUG
+struct CommentStatistic {
+ const char* comment;
+ int size;
+ int count;
+ void Clear() {
+ comment = NULL;
+ size = 0;
+ count = 0;
+ }
+ // Must be small, since an iteration is used for lookup.
+ static const int kMaxComments = 64;
+};
+#endif
+
+
} } // namespace v8::internal
#endif // V8_SPACES_H_
// Storage for constants used by string-search.
-int StringSearchBase::kBadCharShiftTable[kUC16AlphabetSize];
-int StringSearchBase::kGoodSuffixShiftTable[kBMMaxShift + 1];
-int StringSearchBase::kSuffixTable[kBMMaxShift + 1];
+// Now in Isolate:
+// bad_char_shift_table()
+// good_suffix_shift_table()
+// suffix_table()
}} // namespace v8::internal
// limit, we can fix the size of tables. For a needle longer than this limit,
// search will not be optimal, since we only build tables for a suffix
// of the string, but it is a safe approximation.
- static const int kBMMaxShift = 250;
+ static const int kBMMaxShift = Isolate::kBMMaxShift;
// Reduce alphabet to this size.
// One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
// For needles using only characters in the same Unicode 256-code point page,
// there is no search speed degradation.
static const int kAsciiAlphabetSize = 128;
- static const int kUC16AlphabetSize = 256;
+ static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
// Bad-char shift table stored in the state. It's length is the alphabet size.
// For patterns below this length, the skip length of Boyer-Moore is too short
return String::IsAscii(string.start(), string.length());
}
- // The following tables are shared by all searches.
- // TODO(lrn): Introduce a way for a pattern to keep its tables
- // between searches (e.g., for an Atom RegExp).
-
- // Store for the BoyerMoore(Horspool) bad char shift table.
- static int kBadCharShiftTable[kUC16AlphabetSize];
- // Store for the BoyerMoore good suffix shift table.
- static int kGoodSuffixShiftTable[kBMMaxShift + 1];
- // Table used temporarily while building the BoyerMoore good suffix
- // shift table.
- static int kSuffixTable[kBMMaxShift + 1];
+ friend class Isolate;
};
template <typename PatternChar, typename SubjectChar>
class StringSearch : private StringSearchBase {
public:
- explicit StringSearch(Vector<const PatternChar> pattern)
- : pattern_(pattern),
+ StringSearch(Isolate* isolate, Vector<const PatternChar> pattern)
+ : isolate_(isolate),
+ pattern_(pattern),
start_(Max(0, pattern.length() - kBMMaxShift)) {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
if (!IsAsciiString(pattern_)) {
return bad_char_occurrence[equiv_class];
}
+ // The following tables are shared by all searches.
+ // TODO(lrn): Introduce a way for a pattern to keep its tables
+ // between searches (e.g., for an Atom RegExp).
+
+ // Store for the BoyerMoore(Horspool) bad char shift table.
// Return a table covering the last kBMMaxShift+1 positions of
// pattern.
int* bad_char_table() {
- return kBadCharShiftTable;
+ return isolate_->bad_char_shift_table();
}
+ // Store for the BoyerMoore good suffix shift table.
int* good_suffix_shift_table() {
// Return biased pointer that maps the range [start_..pattern_.length()
// to the kGoodSuffixShiftTable array.
- return kGoodSuffixShiftTable - start_;
+ return isolate_->good_suffix_shift_table() - start_;
}
+ // Table used temporarily while building the BoyerMoore good suffix
+ // shift table.
int* suffix_table() {
// Return biased pointer that maps the range [start_..pattern_.length()
// to the kSuffixTable array.
- return kSuffixTable - start_;
+ return isolate_->suffix_table() - start_;
}
+ Isolate* isolate_;
// The pattern to search for.
Vector<const PatternChar> pattern_;
// Pointer to implementation of the search.
// object should be constructed once and the Search function then called
// for each search.
template <typename SubjectChar, typename PatternChar>
-static int SearchString(Vector<const SubjectChar> subject,
+static int SearchString(Isolate* isolate,
+ Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
int start_index) {
- StringSearch<PatternChar, SubjectChar> search(pattern);
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
return search.Search(subject, start_index);
}
namespace internal {
static const int kMentionedObjectCacheMaxSize = 256;
-static List<HeapObject*, PreallocatedStorage>* debug_object_cache = NULL;
-static Object* current_security_token = NULL;
-
char* HeapStringAllocator::allocate(unsigned bytes) {
space_ = NewArray<char>(bytes);
return;
}
if (o->IsHeapObject()) {
+ DebugObjectCache* debug_object_cache = Isolate::Current()->
+ string_stream_debug_object_cache();
for (int i = 0; i < debug_object_cache->length(); i++) {
if ((*debug_object_cache)[i] == o) {
Add("#%d#", i);
void StringStream::Log() {
- LOG(StringEvent("StackDump", buffer_));
+ LOG(ISOLATE, StringEvent("StackDump", buffer_));
}
Handle<String> StringStream::ToString() {
- return Factory::NewStringFromUtf8(Vector<const char>(buffer_, length_));
+ return FACTORY->NewStringFromUtf8(Vector<const char>(buffer_, length_));
}
void StringStream::ClearMentionedObjectCache() {
- current_security_token = NULL;
- if (debug_object_cache == NULL) {
- debug_object_cache = new List<HeapObject*, PreallocatedStorage>(0);
+ Isolate* isolate = Isolate::Current();
+ isolate->set_string_stream_current_security_token(NULL);
+ if (isolate->string_stream_debug_object_cache() == NULL) {
+ isolate->set_string_stream_debug_object_cache(
+ new List<HeapObject*, PreallocatedStorage>(0));
}
- debug_object_cache->Clear();
+ isolate->string_stream_debug_object_cache()->Clear();
}
#ifdef DEBUG
bool StringStream::IsMentionedObjectCacheClear() {
- return (debug_object_cache->length() == 0);
+ return (
+ Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
}
#endif
void StringStream::PrintUsingMap(JSObject* js_object) {
Map* map = js_object->map();
- if (!Heap::Contains(map) ||
+ if (!HEAP->Contains(map) ||
!map->IsHeapObject() ||
!map->IsMap()) {
Add("<Invalid map>\n");
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
+ Heap* heap = HEAP;
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
- if (element != Heap::the_hole_value()) {
+ if (element != heap->the_hole_value()) {
for (int len = 1; len < 18; len++)
Put(' ');
Add("%d: %o\n", i, array->get(i));
void StringStream::PrintMentionedObjectCache() {
+ DebugObjectCache* debug_object_cache =
+ Isolate::Current()->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (int i = 0; i < debug_object_cache->length(); i++) {
HeapObject* printee = (*debug_object_cache)[i];
void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- if (!f->IsHeapObject() || !Heap::Contains(HeapObject::cast(f))) {
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
return;
}
Map* map = HeapObject::cast(f)->map();
if (!map->IsHeapObject() ||
- !Heap::Contains(map) ||
+ !heap->Contains(map) ||
!map->IsMap() ||
!f->IsJSFunction()) {
return;
JSFunction* fun = JSFunction::cast(f);
Object* perhaps_context = fun->unchecked_context();
if (perhaps_context->IsHeapObject() &&
- Heap::Contains(HeapObject::cast(perhaps_context)) &&
+ heap->Contains(HeapObject::cast(perhaps_context)) &&
perhaps_context->IsContext()) {
Context* context = fun->context();
- if (!Heap::Contains(context)) {
+ if (!heap->Contains(context)) {
Add("(Function context is outside heap)\n");
return;
}
Object* token = context->global_context()->security_token();
- if (token != current_security_token) {
+ if (token != isolate->string_stream_current_security_token()) {
Add("Security context: %o\n", token);
- current_security_token = token;
+ isolate->set_string_stream_current_security_token(token);
}
} else {
Add("(Function context is corrupt)\n");
void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
if (f->IsHeapObject() &&
- Heap::Contains(HeapObject::cast(f)) &&
- Heap::Contains(HeapObject::cast(f)->map()) &&
+ HEAP->Contains(HeapObject::cast(f)) &&
+ HEAP->Contains(HeapObject::cast(f)->map()) &&
HeapObject::cast(f)->map()->IsMap()) {
if (f->IsJSFunction()) {
JSFunction* fun = JSFunction::cast(f);
Add("/* warning: 'function' was not a heap object */ ");
return;
}
- if (!Heap::Contains(HeapObject::cast(f))) {
+ if (!HEAP->Contains(HeapObject::cast(f))) {
Add("/* warning: 'function' was not on the heap */ ");
return;
}
- if (!Heap::Contains(HeapObject::cast(f)->map())) {
+ if (!HEAP->Contains(HeapObject::cast(f)->map())) {
Add("/* warning: function's map was not on the heap */ ");
return;
}
void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
- for (Object* p = receiver; p != Heap::null_value(); p = p->GetPrototype()) {
+ Heap* heap = HEAP;
+ for (Object* p = receiver; p != heap->null_value(); p = p->GetPrototype()) {
if (p->IsJSObject()) {
Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
- if (key != Heap::undefined_value()) {
+ if (key != heap->undefined_value()) {
if (!name->IsString() ||
!key->IsString() ||
!String::cast(name)->Equals(String::cast(key))) {
// StubCache implementation.
-StubCache::Entry StubCache::primary_[StubCache::kPrimaryTableSize];
-StubCache::Entry StubCache::secondary_[StubCache::kSecondaryTableSize];
+StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
+ ASSERT(isolate == Isolate::Current());
+ memset(primary_, 0, sizeof(primary_[0]) * StubCache::kPrimaryTableSize);
+ memset(secondary_, 0, sizeof(secondary_[0]) * StubCache::kSecondaryTableSize);
+}
+
void StubCache::Initialize(bool create_heap_objects) {
ASSERT(IsPowerOf2(kPrimaryTableSize));
// Validate that the name does not move on scavenge, and that we
// can use identity checks instead of string equality checks.
- ASSERT(!Heap::InNewSpace(name));
+ ASSERT(!isolate_->heap()->InNewSpace(name));
ASSERT(name->IsSymbol());
// The state bits are not important to the hash function because
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
- if (hit != Builtins::builtin(Builtins::Illegal)) {
+ if (hit != isolate_->builtins()->builtin(Builtins::Illegal)) {
Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
int secondary_offset =
SecondaryOffset(primary->key, primary_flags, primary_offset);
// there are global objects involved, we need to check global
// property cells in the stub and therefore the stub will be
// specific to the name.
- String* cache_name = Heap::empty_string();
+ String* cache_name = isolate_->heap()->empty_string();
if (receiver->IsGlobalObject()) cache_name = name;
JSObject* last = receiver;
- while (last->GetPrototype() != Heap::null_value()) {
+ while (last->GetPrototype() != isolate_->heap()->null_value()) {
last = JSObject::cast(last->GetPrototype());
if (last->IsGlobalObject()) cache_name = name;
}
compiler.CompileLoadNonexistent(cache_name, receiver, last);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadField(receiver, holder, field_index, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadCallback(name, receiver, holder, callback);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadConstant(receiver, holder, value, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadInterceptor(receiver, holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
MaybeObject* StubCache::ComputeLoadNormal() {
- return Builtins::builtin(Builtins::LoadIC_Normal);
+ return isolate_->builtins()->builtin(Builtins::LoadIC_Normal);
}
is_dont_delete);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadField(name, receiver, holder, field_index);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadConstant(name, receiver, holder, value);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadInterceptor(receiver, holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileLoadCallback(name, receiver, holder, callback);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
{ MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
{ MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
{ MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
// keyed loads that are not array elements go through a generic builtin stub.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
- String* name = Heap::KeyedLoadSpecialized_symbol();
+ String* name = isolate_->heap()->KeyedLoadSpecialized_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
{ MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
StrictModeFlag strict_mode) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
- String* name = Heap::KeyedStoreSpecialized_symbol();
+ String* name = isolate_->heap()->KeyedStoreSpecialized_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedStoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
if (is_store) {
switch (array_type) {
case kExternalByteArray:
- return Heap::KeyedStoreExternalByteArray_symbol();
+ return HEAP->KeyedStoreExternalByteArray_symbol();
case kExternalUnsignedByteArray:
- return Heap::KeyedStoreExternalUnsignedByteArray_symbol();
+ return HEAP->KeyedStoreExternalUnsignedByteArray_symbol();
case kExternalShortArray:
- return Heap::KeyedStoreExternalShortArray_symbol();
+ return HEAP->KeyedStoreExternalShortArray_symbol();
case kExternalUnsignedShortArray:
- return Heap::KeyedStoreExternalUnsignedShortArray_symbol();
+ return HEAP->KeyedStoreExternalUnsignedShortArray_symbol();
case kExternalIntArray:
- return Heap::KeyedStoreExternalIntArray_symbol();
+ return HEAP->KeyedStoreExternalIntArray_symbol();
case kExternalUnsignedIntArray:
- return Heap::KeyedStoreExternalUnsignedIntArray_symbol();
+ return HEAP->KeyedStoreExternalUnsignedIntArray_symbol();
case kExternalFloatArray:
- return Heap::KeyedStoreExternalFloatArray_symbol();
+ return HEAP->KeyedStoreExternalFloatArray_symbol();
case kExternalPixelArray:
- return Heap::KeyedStoreExternalPixelArray_symbol();
+ return HEAP->KeyedStoreExternalPixelArray_symbol();
default:
UNREACHABLE();
return NULL;
} else {
switch (array_type) {
case kExternalByteArray:
- return Heap::KeyedLoadExternalByteArray_symbol();
+ return HEAP->KeyedLoadExternalByteArray_symbol();
case kExternalUnsignedByteArray:
- return Heap::KeyedLoadExternalUnsignedByteArray_symbol();
+ return HEAP->KeyedLoadExternalUnsignedByteArray_symbol();
case kExternalShortArray:
- return Heap::KeyedLoadExternalShortArray_symbol();
+ return HEAP->KeyedLoadExternalShortArray_symbol();
case kExternalUnsignedShortArray:
- return Heap::KeyedLoadExternalUnsignedShortArray_symbol();
+ return HEAP->KeyedLoadExternalUnsignedShortArray_symbol();
case kExternalIntArray:
- return Heap::KeyedLoadExternalIntArray_symbol();
+ return HEAP->KeyedLoadExternalIntArray_symbol();
case kExternalUnsignedIntArray:
- return Heap::KeyedLoadExternalUnsignedIntArray_symbol();
+ return HEAP->KeyedLoadExternalUnsignedIntArray_symbol();
case kExternalFloatArray:
- return Heap::KeyedLoadExternalFloatArray_symbol();
+ return HEAP->KeyedLoadExternalFloatArray_symbol();
case kExternalPixelArray:
- return Heap::KeyedLoadExternalPixelArray_symbol();
+ return HEAP->KeyedLoadExternalPixelArray_symbol();
default:
UNREACHABLE();
return NULL;
}
Code::cast(code)->set_external_array_type(array_type);
if (is_store) {
- PROFILE(
+ PROFILE(isolate_,
CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG,
Code::cast(code), 0));
} else {
- PROFILE(
+ PROFILE(isolate_,
CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG,
Code::cast(code), 0));
}
MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return Builtins::builtin((strict_mode == kStrictMode)
+ return isolate_->builtins()->builtin((strict_mode == kStrictMode)
? Builtins::StoreIC_Normal_Strict
: Builtins::StoreIC_Normal);
}
compiler.CompileStoreGlobal(receiver, cell, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileStoreCallback(receiver, callback, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileStoreInterceptor(receiver, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(
- Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+ Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
}
Code::cast(code)->set_check_type(check);
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
}
-static Object* GetProbeValue(Code::Flags flags) {
+static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
// Use raw_unchecked... so we don't get assert failures during GC.
- NumberDictionary* dictionary = Heap::raw_unchecked_non_monomorphic_cache();
- int entry = dictionary->FindEntry(flags);
+ NumberDictionary* dictionary =
+ isolate->heap()->raw_unchecked_non_monomorphic_cache();
+ int entry = dictionary->FindEntry(isolate, flags);
if (entry != -1) return dictionary->ValueAt(entry);
- return Heap::raw_unchecked_undefined_value();
+ return isolate->heap()->raw_unchecked_undefined_value();
}
-MUST_USE_RESULT static MaybeObject* ProbeCache(Code::Flags flags) {
- Object* probe = GetProbeValue(flags);
- if (probe != Heap::undefined_value()) return probe;
+MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
+ Code::Flags flags) {
+ Heap* heap = isolate->heap();
+ Object* probe = GetProbeValue(isolate, flags);
+ if (probe != heap->undefined_value()) return probe;
// Seed the cache with an undefined value to make sure that any
// generated code object can always be inserted into the cache
// without causing allocation failures.
Object* result;
{ MaybeObject* maybe_result =
- Heap::non_monomorphic_cache()->AtNumberPut(flags,
- Heap::undefined_value());
+ heap->non_monomorphic_cache()->AtNumberPut(flags,
+ heap->undefined_value());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Heap::public_set_non_monomorphic_cache(NumberDictionary::cast(result));
+ heap->public_set_non_monomorphic_cache(NumberDictionary::cast(result));
return probe;
}
-static MaybeObject* FillCache(MaybeObject* maybe_code) {
+static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
Object* code;
if (maybe_code->ToObject(&code)) {
if (code->IsCode()) {
- int entry =
- Heap::non_monomorphic_cache()->FindEntry(
- Code::cast(code)->flags());
+ Heap* heap = isolate->heap();
+ int entry = heap->non_monomorphic_cache()->FindEntry(
+ Code::cast(code)->flags());
// The entry must be present see comment in ProbeCache.
ASSERT(entry != -1);
- ASSERT(Heap::non_monomorphic_cache()->ValueAt(entry) ==
- Heap::undefined_value());
- Heap::non_monomorphic_cache()->ValueAtPut(entry, code);
- CHECK(GetProbeValue(Code::cast(code)->flags()) == code);
+ ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
+ heap->undefined_value());
+ heap->non_monomorphic_cache()->ValueAtPut(entry, code);
+ CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
}
}
return maybe_code;
Code::kNoExtraICState,
NORMAL,
argc);
- Object* result = ProbeCache(flags)->ToObjectUnchecked();
- ASSERT(!result->IsUndefined());
+ Object* result = ProbeCache(isolate_, flags)->ToObjectUnchecked();
+ ASSERT(result != isolate_->heap()->undefined_value());
// This might be called during the marking phase of the collector
// hence the unchecked cast.
return reinterpret_cast<Code*>(result);
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallInitialize(flags));
+ return FillCache(isolate_, compiler.CompileCallInitialize(flags));
}
// that it needs so we need to ensure it is generated already.
ComputeCallInitialize(argc, NOT_IN_LOOP);
}
- CALL_HEAP_FUNCTION(ComputeCallInitialize(argc, in_loop, Code::CALL_IC), Code);
+ CALL_HEAP_FUNCTION(isolate_,
+ ComputeCallInitialize(argc, in_loop, Code::CALL_IC), Code);
}
ComputeKeyedCallInitialize(argc, NOT_IN_LOOP);
}
CALL_HEAP_FUNCTION(
+ isolate_,
ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC), Code);
}
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallPreMonomorphic(flags));
+ return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
}
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallNormal(flags));
+ return FillCache(isolate_, compiler.CompileCallNormal(flags));
}
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallMegamorphic(flags));
+ return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
}
argc,
OWN_MAP);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallMiss(flags));
+ return FillCache(isolate_, compiler.CompileCallMiss(flags));
}
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallDebugBreak(flags));
+ return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
}
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallDebugPrepareStepIn(flags));
+ return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
}
#endif
void StubCache::Clear() {
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = Heap::empty_string();
- primary_[i].value = Builtins::builtin(Builtins::Illegal);
+ primary_[i].key = isolate_->heap()->empty_string();
+ primary_[i].value = isolate_->builtins()->builtin(
+ Builtins::Illegal);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = Heap::empty_string();
- secondary_[j].value = Builtins::builtin(Builtins::Illegal);
+ secondary_[j].key = isolate_->heap()->empty_string();
+ secondary_[j].value = isolate_->builtins()->builtin(
+ Builtins::Illegal);
}
}
// StubCompiler implementation.
-MaybeObject* LoadCallbackProperty(Arguments args) {
+MaybeObject* LoadCallbackProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args[0]->IsJSObject());
ASSERT(args[1]->IsJSObject());
AccessorInfo* callback = AccessorInfo::cast(args[3]);
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
v8::AccessorInfo info(&args[0]);
- HandleScope scope;
+ HandleScope scope(isolate);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(getter_address);
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate, getter_address);
result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
- if (result.IsEmpty()) return Heap::undefined_value();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsEmpty()) return HEAP->undefined_value();
return *v8::Utils::OpenHandle(*result);
}
-MaybeObject* StoreCallbackProperty(Arguments args) {
+MaybeObject* StoreCallbackProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
JSObject* recv = JSObject::cast(args[0]);
AccessorInfo* callback = AccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
ASSERT(fun != NULL);
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
- HandleScope scope;
- LOG(ApiNamedPropertyAccess("store", recv, *name));
- CustomArguments custom_args(callback->data(), recv, recv);
+ HandleScope scope(isolate);
+ LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
+ CustomArguments custom_args(isolate, callback->data(), recv, recv);
v8::AccessorInfo info(custom_args.end());
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(setter_address);
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate, setter_address);
fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
* Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
* provide any value for the given name.
*/
-MaybeObject* LoadPropertyWithInterceptorOnly(Arguments args) {
+MaybeObject* LoadPropertyWithInterceptorOnly(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
Handle<String> name_handle = args.at<String>(0);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
// Use the interceptor getter.
v8::AccessorInfo info(args.arguments() -
kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope;
+ HandleScope scope(isolate);
v8::Handle<v8::Value> r;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
r = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
return *v8::Utils::OpenHandle(*r);
}
}
- return Heap::no_interceptor_result_sentinel();
+ return isolate->heap()->no_interceptor_result_sentinel();
}
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
- IC ic(IC::NO_EXTRA_FRAME);
+ IC ic(IC::NO_EXTRA_FRAME, Isolate::Current());
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsContextual()) return Heap::undefined_value();
+ if (!ic.SlowIsContextual()) return HEAP->undefined_value();
// Throw a reference error.
HandleScope scope;
Handle<String> name_handle(name);
Handle<Object> error =
- Factory::NewReferenceError("not_defined",
+ FACTORY->NewReferenceError("not_defined",
HandleVector(&name_handle, 1));
- return Top::Throw(*error);
+ return Isolate::Current()->Throw(*error);
}
Handle<JSObject> holder_handle = args->at<JSObject>(3);
ASSERT(args->length() == 5); // Last arg is data object.
+ Isolate* isolate = receiver_handle->GetIsolate();
+
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
// Use the interceptor getter.
v8::AccessorInfo info(args->arguments() -
kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope;
+ HandleScope scope(isolate);
v8::Handle<v8::Value> r;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
r = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
*attrs = NONE;
return *v8::Utils::OpenHandle(*r);
*receiver_handle,
*name_handle,
attrs);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
* Loads a property with an interceptor performing post interceptor
* lookup if interceptor failed.
*/
-MaybeObject* LoadPropertyWithInterceptorForLoad(Arguments args) {
+MaybeObject* LoadPropertyWithInterceptorForLoad(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
PropertyAttributes attr = NONE;
Object* result;
{ MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
}
-MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args) {
+MaybeObject* LoadPropertyWithInterceptorForCall(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
PropertyAttributes attr;
MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
// This is call IC. In this case, we simply return the undefined result which
// will lead to an exception when trying to invoke the result as a
// function.
}
-MaybeObject* StoreInterceptorProperty(Arguments args) {
+MaybeObject* StoreInterceptorProperty(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
ASSERT(args.length() == 4);
JSObject* recv = JSObject::cast(args[0]);
String* name = String::cast(args[1]);
}
-MaybeObject* KeyedLoadPropertyWithInterceptor(Arguments args) {
+MaybeObject* KeyedLoadPropertyWithInterceptor(RUNTIME_CALLING_CONVENTION) {
+ RUNTIME_GET_ISOLATE;
JSObject* receiver = JSObject::cast(args[0]);
ASSERT(Smi::cast(args[1])->value() >= 0);
uint32_t index = Smi::cast(args[1])->value();
MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
GetCodeWithFlags(flags, "CompileCallInitialize");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_initialize_stubs.Increment();
+ COUNTERS->call_initialize_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
return result;
MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
// The code of the PreMonomorphic stub is the same as the code
// of the Initialized stub. They just differ on the code object flags.
GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_premonomorphic_stubs.Increment();
+ COUNTERS->call_premonomorphic_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
return result;
MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
{ MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_normal_stubs.Increment();
+ COUNTERS->call_normal_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
return result;
MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
} else {
KeyedCallIC::GenerateMegamorphic(masm(), argc);
}
-
Object* result;
{ MaybeObject* maybe_result =
GetCodeWithFlags(flags, "CompileCallMegamorphic");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_megamorphic_stubs.Increment();
+ COUNTERS->call_megamorphic_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
return result;
MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
{ MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_megamorphic_stubs.Increment();
+ COUNTERS->call_megamorphic_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
return result;
#ifdef ENABLE_DEBUGGER_SUPPORT
MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
Debug::GenerateCallICDebugBreak(masm());
Object* result;
{ MaybeObject* maybe_result =
USE(code);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
USE(kind);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count()));
return result;
}
MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
// Use the same code for the the step in preparations as we do for
// the miss case.
int argc = Code::ExtractArgumentsCountFromFlags(flags);
}
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(
- CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- code,
- code->arguments_count()));
+ PROFILE(isolate(),
+ CodeCreateEvent(
+ CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
+ code,
+ code->arguments_count()));
return result;
}
#endif
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
- MaybeObject* result = Heap::CreateCode(desc, flags, masm_.CodeObject());
+ MaybeObject* result = HEAP->CreateCode(desc, flags, masm_.CodeObject());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs && !result->IsFailure()) {
Code::cast(result->ToObjectUnchecked())->Disassemble(name);
if (!lookup->IsProperty()) {
lookup->NotFound();
Object* proto = holder->GetPrototype();
- if (proto != Heap::null_value()) {
+ if (!proto->IsNull()) {
proto->Lookup(name, lookup);
}
}
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
Code::STORE_IC, type, strict_mode_);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC,
Code::KEYED_STORE_IC, type, strict_mode_);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
}
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
return result;
}
}
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
return result;
}
#ifndef V8_STUB_CACHE_H_
#define V8_STUB_CACHE_H_
+#include "arguments.h"
#include "macro-assembler.h"
#include "zone-inl.h"
// invalidate the cache whenever a prototype map is changed. The stub
// validates the map chain as in the mono-morphic case.
-class SCTableReference;
+class StubCache;
+class SCTableReference {
+ public:
+ Address address() const { return address_; }
+
+ private:
+ explicit SCTableReference(Address address) : address_(address) {}
+
+ Address address_;
+
+ friend class StubCache;
+};
-class StubCache : public AllStatic {
+
+class StubCache {
public:
struct Entry {
String* key;
Code* value;
};
+ void Initialize(bool create_heap_objects);
- static void Initialize(bool create_heap_objects);
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
- MUST_USE_RESULT static MaybeObject* ComputeLoadNonexistent(
+ MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
String* name,
JSObject* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- MUST_USE_RESULT static MaybeObject* ComputeLoadCallback(
+ MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback);
- MUST_USE_RESULT static MaybeObject* ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
+ MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
- MUST_USE_RESULT static MaybeObject* ComputeLoadInterceptor(
+ MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
String* name,
JSObject* receiver,
JSObject* holder);
- MUST_USE_RESULT static MaybeObject* ComputeLoadNormal();
+ MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
- MUST_USE_RESULT static MaybeObject* ComputeLoadGlobal(
+ MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
String* name,
JSObject* receiver,
GlobalObject* holder,
// ---
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadCallback(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadConstant(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
String* name,
JSObject* receiver,
JSObject* holder,
Object* value);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadInterceptor(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
String* name,
JSObject* receiver,
JSObject* holder);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadArrayLength(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
String* name,
JSArray* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadStringLength(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
String* name,
String* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadFunctionPrototype(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
String* name,
JSFunction* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadSpecialized(
JSObject* receiver);
// ---
- MUST_USE_RESULT static MaybeObject* ComputeStoreField(
+ MUST_USE_RESULT MaybeObject* ComputeStoreField(
String* name,
JSObject* receiver,
int field_index,
Map* transition,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreNormal(
+ MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal(
+ MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreCallback(
+ MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
String* name,
JSObject* receiver,
AccessorInfo* callback,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor(
+ MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
String* name,
JSObject* receiver,
StrictModeFlag strict_mode);
// ---
- MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreField(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
String* name,
JSObject* receiver,
int field_index,
Map* transition,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
+
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode);
// ---
- MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- int index);
+ MUST_USE_RESULT MaybeObject* ComputeCallField(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ int index);
- MUST_USE_RESULT static MaybeObject* ComputeCallConstant(
+ MUST_USE_RESULT MaybeObject* ComputeCallConstant(
int argc,
InLoopFlag in_loop,
Code::Kind,
JSObject* holder,
JSFunction* function);
- MUST_USE_RESULT static MaybeObject* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ JSObject* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeCallInterceptor(int argc,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder);
+ MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(int argc,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder);
- MUST_USE_RESULT static MaybeObject* ComputeCallGlobal(
+ MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
int argc,
InLoopFlag in_loop,
Code::Kind,
// ---
- MUST_USE_RESULT static MaybeObject* ComputeCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
- static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+ Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
- MUST_USE_RESULT static MaybeObject* ComputeCallPreMonomorphic(
+ MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
int argc,
InLoopFlag in_loop,
Code::Kind kind);
- MUST_USE_RESULT static MaybeObject* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
-
- MUST_USE_RESULT static MaybeObject* ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- MUST_USE_RESULT static MaybeObject* ComputeCallMiss(int argc,
+ MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
+ InLoopFlag in_loop,
Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc, Code::Kind kind);
+
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- MUST_USE_RESULT static Code* FindCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT Code* FindCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
- MUST_USE_RESULT static MaybeObject* ComputeCallDebugBreak(int argc,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
- MUST_USE_RESULT static MaybeObject* ComputeCallDebugPrepareStepIn(
- int argc,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
+ Code::Kind kind);
#endif
// Update cache for entry hash(name, map).
- static Code* Set(String* name, Map* map, Code* code);
+ Code* Set(String* name, Map* map, Code* code);
// Clear the lookup table (@ mark compact collection).
- static void Clear();
+ void Clear();
// Collect all maps that match the name and flags.
- static void CollectMatchingMaps(ZoneMapList* types,
- String* name,
- Code::Flags flags);
+ void CollectMatchingMaps(ZoneMapList* types,
+ String* name,
+ Code::Flags flags);
// Generate code for probing the stub cache table.
// Arguments extra and extra2 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
- static void GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2 = no_reg);
+ void GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2 = no_reg);
enum Table {
kPrimary,
kSecondary
};
+
+ SCTableReference key_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->key));
+ }
+
+
+ SCTableReference value_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->value));
+ }
+
+
+ StubCache::Entry* first_entry(StubCache::Table table) {
+ switch (table) {
+ case StubCache::kPrimary: return StubCache::primary_;
+ case StubCache::kSecondary: return StubCache::secondary_;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+
private:
+ explicit StubCache(Isolate* isolate);
+
+ friend class Isolate;
friend class SCTableReference;
static const int kPrimaryTableSize = 2048;
static const int kSecondaryTableSize = 512;
- static Entry primary_[];
- static Entry secondary_[];
+ Entry primary_[kPrimaryTableSize];
+ Entry secondary_[kSecondaryTableSize];
// Computes the hashed offsets for primary and secondary caches.
- static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+ RLYSTC int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
// This works well because the heap object tag size and the hash
// shift are equal. Shifting down the length field to get the
// hash code would effectively throw away two bits of the hash
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
}
- static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
+ RLYSTC int SecondaryOffset(String* name, Code::Flags flags, int seed) {
// Use the seed from the primary cache in the secondary cache.
uint32_t string_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
// ends in String::kHashShift 0s. Then we shift it so it is a multiple
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
- static Entry* entry(Entry* table, int offset) {
+ RLYSTC Entry* entry(Entry* table, int offset) {
const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
return reinterpret_cast<Entry*>(
reinterpret_cast<Address>(table) + (offset << shift_amount));
}
-};
+ Isolate* isolate_;
-class SCTableReference {
- public:
- static SCTableReference keyReference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->key));
- }
-
-
- static SCTableReference valueReference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->value));
- }
-
- Address address() const { return address_; }
-
- private:
- explicit SCTableReference(Address address) : address_(address) {}
-
- static StubCache::Entry* first_entry(StubCache::Table table) {
- switch (table) {
- case StubCache::kPrimary: return StubCache::primary_;
- case StubCache::kSecondary: return StubCache::secondary_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Address address_;
+ DISALLOW_COPY_AND_ASSIGN(StubCache);
};
+
// ------------------------------------------------------------------------
// Support functions for IC stubs for callbacks.
-MaybeObject* LoadCallbackProperty(Arguments args);
-MaybeObject* StoreCallbackProperty(Arguments args);
+MaybeObject* LoadCallbackProperty(RUNTIME_CALLING_CONVENTION);
+MaybeObject* StoreCallbackProperty(RUNTIME_CALLING_CONVENTION);
// Support functions for IC stubs for interceptors.
-MaybeObject* LoadPropertyWithInterceptorOnly(Arguments args);
-MaybeObject* LoadPropertyWithInterceptorForLoad(Arguments args);
-MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args);
-MaybeObject* StoreInterceptorProperty(Arguments args);
-MaybeObject* CallInterceptorProperty(Arguments args);
-MaybeObject* KeyedLoadPropertyWithInterceptor(Arguments args);
+MaybeObject* LoadPropertyWithInterceptorOnly(RUNTIME_CALLING_CONVENTION);
+MaybeObject* LoadPropertyWithInterceptorForLoad(RUNTIME_CALLING_CONVENTION);
+MaybeObject* LoadPropertyWithInterceptorForCall(RUNTIME_CALLING_CONVENTION);
+MaybeObject* StoreInterceptorProperty(RUNTIME_CALLING_CONVENTION);
+MaybeObject* CallInterceptorProperty(RUNTIME_CALLING_CONVENTION);
+MaybeObject* KeyedLoadPropertyWithInterceptor(RUNTIME_CALLING_CONVENTION);
// The stub compiler compiles stubs for the stub cache.
String* name,
LookupResult* lookup);
+ Isolate* isolate() { return scope_.isolate(); }
+
private:
HandleScope scope_;
MacroAssembler masm_;
namespace internal {
#define T(name, string, precedence) #name,
-const char* Token::name_[NUM_TOKENS] = {
+const char* const Token::name_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
#define T(name, string, precedence) string,
-const char* Token::string_[NUM_TOKENS] = {
+const char* const Token::string_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
#define T(name, string, precedence) precedence,
-int8_t Token::precedence_[NUM_TOKENS] = {
+const int8_t Token::precedence_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
}
private:
- static const char* name_[NUM_TOKENS];
- static const char* string_[NUM_TOKENS];
- static int8_t precedence_[NUM_TOKENS];
+ static const char* const name_[NUM_TOKENS];
+ static const char* const string_[NUM_TOKENS];
+ static const int8_t precedence_[NUM_TOKENS];
static const char token_type[NUM_TOKENS];
};
#include "string-stream.h"
#include "vm-state-inl.h"
+// TODO(isolates): move to isolate.cc. This stuff is kept here to
+// simplify merging.
+
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-Semaphore* Top::runtime_profiler_semaphore_ = NULL;
-#endif
-ThreadLocalTop Top::thread_local_;
-Mutex* Top::break_access_ = OS::CreateMutex();
-
-NoAllocationStringAllocator* preallocated_message_space = NULL;
-
-bool capture_stack_trace_for_uncaught_exceptions = false;
-int stack_trace_for_uncaught_exceptions_frame_limit = 0;
-StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options =
- StackTrace::kOverview;
-
-Address top_addresses[] = {
-#define C(name) reinterpret_cast<Address>(Top::name()),
- TOP_ADDRESS_LIST(C)
- TOP_ADDRESS_LIST_PROF(C)
-#undef C
- NULL
-};
-
-
v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
}
handler_ = 0;
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
- simulator_ = Simulator::current();
+ simulator_ = Simulator::current(Isolate::Current());
#elif V8_TARGET_ARCH_MIPS
- simulator_ = assembler::mips::Simulator::current();
+ simulator_ = Simulator::current(Isolate::Current());
#endif
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
#endif
#ifdef ENABLE_VMSTATE_TRACKING
current_vm_state_ = EXTERNAL;
- runtime_profiler_state_ = Top::PROF_NOT_IN_JS;
#endif
try_catch_handler_address_ = NULL;
context_ = NULL;
- int id = ThreadManager::CurrentId();
+ int id = Isolate::Current()->thread_manager()->CurrentId();
thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
external_caught_exception_ = false;
failed_access_check_callback_ = NULL;
}
-Address Top::get_address_from_id(Top::AddressId id) {
- return top_addresses[id];
+Address Isolate::get_address_from_id(Isolate::AddressId id) {
+ return isolate_addresses_[id];
}
-char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
+char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
Iterate(v, thread);
return thread_storage + sizeof(ThreadLocalTop);
}
-void Top::IterateThread(ThreadVisitor* v) {
- v->VisitThread(&thread_local_);
+void Isolate::IterateThread(ThreadVisitor* v) {
+ v->VisitThread(thread_local_top());
}
-void Top::IterateThread(ThreadVisitor* v, char* t) {
+void Isolate::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(thread);
}
-void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- Object *pending;
+ Object* pending;
// The pending exception can sometimes be a failure. We can't show
// that to the GC, which only understands objects.
if (thread->pending_exception_->ToObject(&pending)) {
}
-void Top::Iterate(ObjectVisitor* v) {
- ThreadLocalTop* current_t = &thread_local_;
+void Isolate::Iterate(ObjectVisitor* v) {
+ ThreadLocalTop* current_t = thread_local_top();
Iterate(v, current_t);
}
-void Top::InitializeThreadLocal() {
- thread_local_.Initialize();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
-}
-
-
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- PreallocatedMemoryThread()
- : Thread("v8:PreallocMem"),
- keep_running_(true) {
- wait_for_ever_semaphore_ = OS::CreateSemaphore(0);
- data_ready_semaphore_ = OS::CreateSemaphore(0);
- }
-
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
- static char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- static unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- static void StartThread() {
- if (the_thread_ != NULL) return;
-
- the_thread_ = new PreallocatedMemoryThread();
- the_thread_->Start();
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- static void StopThread() {
- if (the_thread_ == NULL) return;
-
- the_thread_->keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- the_thread_->Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
-
- // Done with the thread entirely.
- delete the_thread_;
- the_thread_ = NULL;
- }
-
- private:
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // The preallocated memory thread singleton.
- static PreallocatedMemoryThread* the_thread_;
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- static Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- static Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- static char* data_;
- static unsigned length_;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-PreallocatedMemoryThread* PreallocatedMemoryThread::the_thread_ = NULL;
-Semaphore* PreallocatedMemoryThread::wait_for_ever_semaphore_ = NULL;
-Semaphore* PreallocatedMemoryThread::data_ready_semaphore_ = NULL;
-char* PreallocatedMemoryThread::data_ = NULL;
-unsigned PreallocatedMemoryThread::length_ = 0;
-
-static bool initialized = false;
-
-void Top::Initialize() {
- CHECK(!initialized);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ASSERT(runtime_profiler_semaphore_ == NULL);
- runtime_profiler_semaphore_ = OS::CreateSemaphore(0);
-#endif
-
- InitializeThreadLocal();
-
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && (preallocated_message_space == NULL)) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThread::StartThread();
- preallocated_message_space =
- new NoAllocationStringAllocator(PreallocatedMemoryThread::data(),
- PreallocatedMemoryThread::length());
- PreallocatedStorage::Init(PreallocatedMemoryThread::length() / 4);
- }
- initialized = true;
-}
-
-
-void Top::TearDown() {
- if (initialized) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- delete runtime_profiler_semaphore_;
- runtime_profiler_semaphore_ = NULL;
-#endif
-
- // Remove the external reference to the preallocated stack memory.
- if (preallocated_message_space != NULL) {
- delete preallocated_message_space;
- preallocated_message_space = NULL;
- }
-
- PreallocatedMemoryThread::StopThread();
- initialized = false;
- }
-}
-
-
-void Top::RegisterTryCatchHandler(v8::TryCatch* that) {
+void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
// The ARM simulator has a separate JS stack. We therefore register
// the C++ try catch handler with the simulator and get back an
// address that can be used for comparisons with addresses into the
// returned will be the address of the C++ try catch handler itself.
Address address = reinterpret_cast<Address>(
SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
- thread_local_.set_try_catch_handler_address(address);
+ thread_local_top()->set_try_catch_handler_address(address);
}
-void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(try_catch_handler() == that);
- thread_local_.set_try_catch_handler_address(
+void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
+ ASSERT(thread_local_top()->TryCatchHandler() == that);
+ thread_local_top()->set_try_catch_handler_address(
reinterpret_cast<Address>(that->next_));
- thread_local_.catcher_ = NULL;
+ thread_local_top()->catcher_ = NULL;
SimulatorStack::UnregisterCTryCatch();
}
-
-static int stack_trace_nesting_level = 0;
-static StringStream* incomplete_message = NULL;
-
-
-Handle<String> Top::StackTraceString() {
- if (stack_trace_nesting_level == 0) {
- stack_trace_nesting_level++;
+Handle<String> Isolate::StackTraceString() {
+ if (stack_trace_nesting_level_ == 0) {
+ stack_trace_nesting_level_++;
HeapStringAllocator allocator;
StringStream::ClearMentionedObjectCache();
StringStream accumulator(&allocator);
- incomplete_message = &accumulator;
+ incomplete_message_ = &accumulator;
PrintStack(&accumulator);
Handle<String> stack_trace = accumulator.ToString();
- incomplete_message = NULL;
- stack_trace_nesting_level = 0;
+ incomplete_message_ = NULL;
+ stack_trace_nesting_level_ = 0;
return stack_trace;
- } else if (stack_trace_nesting_level == 1) {
- stack_trace_nesting_level++;
+ } else if (stack_trace_nesting_level_ == 1) {
+ stack_trace_nesting_level_++;
OS::PrintError(
"\n\nAttempt to print stack while printing stack (double fault)\n");
OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message->OutputToStdOut();
- return Factory::empty_symbol();
+ incomplete_message_->OutputToStdOut();
+ return factory()->empty_symbol();
} else {
OS::Abort();
// Unreachable
- return Factory::empty_symbol();
+ return factory()->empty_symbol();
}
}
-Handle<JSArray> Top::CaptureCurrentStackTrace(
+Handle<JSArray> Isolate::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
// Ensure no negative values.
int limit = Max(frame_limit, 0);
- Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
+ Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
- Handle<String> column_key = Factory::LookupAsciiSymbol("column");
- Handle<String> line_key = Factory::LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = Factory::LookupAsciiSymbol("scriptName");
+ Handle<String> column_key = factory()->LookupAsciiSymbol("column");
+ Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
+ Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
Handle<String> name_or_source_url_key =
- Factory::LookupAsciiSymbol("nameOrSourceURL");
+ factory()->LookupAsciiSymbol("nameOrSourceURL");
Handle<String> script_name_or_source_url_key =
- Factory::LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = Factory::LookupAsciiSymbol("functionName");
- Handle<String> eval_key = Factory::LookupAsciiSymbol("isEval");
- Handle<String> constructor_key = Factory::LookupAsciiSymbol("isConstructor");
+ factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
+ Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
+ Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+ Handle<String> constructor_key =
+ factory()->LookupAsciiSymbol("isConstructor");
StackTraceFrameIterator it;
int frames_seen = 0;
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
// Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
+ Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
Handle<JSFunction> fun = frames[i].function();
Handle<Script> script(Script::cast(fun->shared()->script()));
}
if (options & StackTrace::kScriptName) {
- Handle<Object> script_name(script->name());
+ Handle<Object> script_name(script->name(), this);
SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
}
if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> script_name(script->name());
+ Handle<Object> script_name(script->name(), this);
Handle<JSValue> script_wrapper = GetScriptWrapper(script);
Handle<Object> property = GetProperty(script_wrapper,
name_or_source_url_key);
Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
NULL, &caught_exception);
if (caught_exception) {
- result = Factory::undefined_value();
+ result = factory()->undefined_value();
}
SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
result);
}
if (options & StackTrace::kFunctionName) {
- Handle<Object> fun_name(fun->shared()->name());
+ Handle<Object> fun_name(fun->shared()->name(), this);
if (fun_name->ToBoolean()->IsFalse()) {
- fun_name = Handle<Object>(fun->shared()->inferred_name());
+ fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
}
SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
}
if (options & StackTrace::kIsEval) {
int type = Smi::cast(script->compilation_type())->value();
Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
- Factory::true_value() : Factory::false_value();
+ factory()->true_value() : factory()->false_value();
SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
}
if (options & StackTrace::kIsConstructor) {
Handle<Object> is_constructor = (frames[i].is_constructor()) ?
- Factory::true_value() : Factory::false_value();
+ factory()->true_value() : factory()->false_value();
SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
}
}
-void Top::PrintStack() {
- if (stack_trace_nesting_level == 0) {
- stack_trace_nesting_level++;
+void Isolate::PrintStack() {
+ if (stack_trace_nesting_level_ == 0) {
+ stack_trace_nesting_level_++;
StringAllocator* allocator;
- if (preallocated_message_space == NULL) {
+ if (preallocated_message_space_ == NULL) {
allocator = new HeapStringAllocator();
} else {
- allocator = preallocated_message_space;
+ allocator = preallocated_message_space_;
}
NativeAllocationChecker allocation_checker(
StringStream::ClearMentionedObjectCache();
StringStream accumulator(allocator);
- incomplete_message = &accumulator;
+ incomplete_message_ = &accumulator;
PrintStack(&accumulator);
accumulator.OutputToStdOut();
accumulator.Log();
- incomplete_message = NULL;
- stack_trace_nesting_level = 0;
- if (preallocated_message_space == NULL) {
+ incomplete_message_ = NULL;
+ stack_trace_nesting_level_ = 0;
+ if (preallocated_message_space_ == NULL) {
// Remove the HeapStringAllocator created above.
delete allocator;
}
- } else if (stack_trace_nesting_level == 1) {
- stack_trace_nesting_level++;
+ } else if (stack_trace_nesting_level_ == 1) {
+ stack_trace_nesting_level_++;
OS::PrintError(
"\n\nAttempt to print stack while printing stack (double fault)\n");
OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message->OutputToStdOut();
+ incomplete_message_->OutputToStdOut();
}
}
}
-void Top::PrintStack(StringStream* accumulator) {
+void Isolate::PrintStack(StringStream* accumulator) {
+ if (!IsInitialized()) {
+ accumulator->Add(
+ "\n==== Stack trace is not available ==========================\n\n");
+ accumulator->Add(
+ "\n==== Isolate for the thread is not initialized =============\n\n");
+ return;
+ }
// The MentionedObjectCache is not GC-proof at the moment.
AssertNoAllocation nogc;
ASSERT(StringStream::IsMentionedObjectCacheClear());
// Avoid printing anything if there are no frames.
- if (c_entry_fp(GetCurrentThread()) == 0) return;
+ if (c_entry_fp(thread_local_top()) == 0) return;
accumulator->Add(
"\n==== Stack trace ============================================\n\n");
}
-void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) {
- thread_local_.failed_access_check_callback_ = callback;
+void Isolate::SetFailedAccessCheckCallback(
+ v8::FailedAccessCheckCallback callback) {
+ thread_local_top()->failed_access_check_callback_ = callback;
}
-void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
- if (!thread_local_.failed_access_check_callback_) return;
+void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+ if (!thread_local_top()->failed_access_check_callback_) return;
ASSERT(receiver->IsAccessCheckNeeded());
- ASSERT(Top::context());
+ ASSERT(context());
// Get the data object from access check info.
JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
if (!constructor->shared()->IsApiFunction()) return;
Object* data_obj =
constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == Heap::undefined_value()) return;
+ if (data_obj == heap_.undefined_value()) return;
HandleScope scope;
Handle<JSObject> receiver_handle(receiver);
Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- thread_local_.failed_access_check_callback_(
+ thread_local_top()->failed_access_check_callback_(
v8::Utils::ToLocal(receiver_handle),
type,
v8::Utils::ToLocal(data));
};
-static MayAccessDecision MayAccessPreCheck(JSObject* receiver,
+static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
+ JSObject* receiver,
v8::AccessType type) {
// During bootstrapping, callback functions are not enabled yet.
- if (Bootstrapper::IsActive()) return YES;
+ if (isolate->bootstrapper()->IsActive()) return YES;
if (receiver->IsJSGlobalProxy()) {
Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
if (!receiver_context->IsContext()) return NO;
// Get the global context of current top context.
- // avoid using Top::global_context() because it uses Handle.
- Context* global_context = Top::context()->global()->global_context();
+ // avoid using Isolate::global_context() because it uses Handle.
+ Context* global_context = isolate->context()->global()->global_context();
if (receiver_context == global_context) return YES;
if (Context::cast(receiver_context)->security_token() ==
}
-bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
+bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
+ v8::AccessType type) {
ASSERT(receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
// Skip checks for hidden properties access. Note, we do not
// require existence of a context in this case.
- if (key == Heap::hidden_symbol()) return true;
+ if (key == heap_.hidden_symbol()) return true;
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
- ASSERT(Top::context());
+ ASSERT(context());
- MayAccessDecision decision = MayAccessPreCheck(receiver, type);
+ MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
if (decision != UNKNOWN) return decision == YES;
// Get named access check callback
Object* data_obj =
constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == Heap::undefined_value()) return false;
+ if (data_obj == heap_.undefined_value()) return false;
Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
v8::NamedSecurityCallback callback =
if (!callback) return false;
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> key_handle(key);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- LOG(ApiNamedSecurityCheck(key));
+ HandleScope scope(this);
+ Handle<JSObject> receiver_handle(receiver, this);
+ Handle<Object> key_handle(key, this);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ LOG(this, ApiNamedSecurityCheck(key));
bool result = false;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(this, EXTERNAL);
result = callback(v8::Utils::ToLocal(receiver_handle),
v8::Utils::ToLocal(key_handle),
type,
}
-bool Top::MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type) {
+bool Isolate::MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type) {
ASSERT(receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
- ASSERT(Top::context());
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
+ ASSERT(context());
- MayAccessDecision decision = MayAccessPreCheck(receiver, type);
+ MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
if (decision != UNKNOWN) return decision == YES;
// Get indexed access check callback
Object* data_obj =
constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == Heap::undefined_value()) return false;
+ if (data_obj == heap_.undefined_value()) return false;
Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
v8::IndexedSecurityCallback callback =
if (!callback) return false;
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- LOG(ApiIndexedSecurityCheck(index));
+ HandleScope scope(this);
+ Handle<JSObject> receiver_handle(receiver, this);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ LOG(this, ApiIndexedSecurityCheck(index));
bool result = false;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(this, EXTERNAL);
result = callback(v8::Utils::ToLocal(receiver_handle),
index,
type,
}
-const char* Top::kStackOverflowMessage =
+const char* const Isolate::kStackOverflowMessage =
"Uncaught RangeError: Maximum call stack size exceeded";
-Failure* Top::StackOverflow() {
+Failure* Isolate::StackOverflow() {
HandleScope scope;
- Handle<String> key = Factory::stack_overflow_symbol();
+ Handle<String> key = factory()->stack_overflow_symbol();
Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(Top::builtins(), key));
+ Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
Handle<Object> exception = Copy(boilerplate);
// TODO(1240995): To avoid having to call JavaScript code to compute
// the message for stack overflow exceptions which is very likely to
}
-Failure* Top::TerminateExecution() {
- DoThrow(Heap::termination_exception(), NULL, NULL);
+Failure* Isolate::TerminateExecution() {
+ DoThrow(heap_.termination_exception(), NULL, NULL);
return Failure::Exception();
}
-Failure* Top::Throw(Object* exception, MessageLocation* location) {
+Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
DoThrow(exception, location, NULL);
return Failure::Exception();
}
-Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) {
+Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
bool can_be_caught_externally = false;
ShouldReportException(&can_be_caught_externally,
is_catchable_by_javascript(exception));
- thread_local_.catcher_ = can_be_caught_externally ?
+ thread_local_top()->catcher_ = can_be_caught_externally ?
try_catch_handler() : NULL;
// Set the exception being re-thrown.
}
-Failure* Top::ThrowIllegalOperation() {
- return Throw(Heap::illegal_access_symbol());
+Failure* Isolate::ThrowIllegalOperation() {
+ return Throw(heap_.illegal_access_symbol());
}
-void Top::ScheduleThrow(Object* exception) {
+void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
- thread_local_.scheduled_exception_ = pending_exception();
- thread_local_.external_caught_exception_ = false;
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
}
-Failure* Top::PromoteScheduledException() {
+Failure* Isolate::PromoteScheduledException() {
MaybeObject* thrown = scheduled_exception();
clear_scheduled_exception();
// Re-throw the exception to avoid getting repeated error reporting.
}
-void Top::PrintCurrentStackTrace(FILE* out) {
+void Isolate::PrintCurrentStackTrace(FILE* out) {
StackTraceFrameIterator it;
while (!it.done()) {
HandleScope scope;
// Find code position if recorded in relocation info.
JavaScriptFrame* frame = it.frame();
- int pos = frame->code()->SourcePosition(frame->pc());
+ int pos = frame->LookupCode(this)->SourcePosition(frame->pc());
Handle<Object> pos_obj(Smi::FromInt(pos));
// Fetch function and receiver.
Handle<JSFunction> fun(JSFunction::cast(frame->function()));
// current frame is the top-level frame.
it.Advance();
Handle<Object> is_top_level = it.done()
- ? Factory::true_value()
- : Factory::false_value();
+ ? factory()->true_value()
+ : factory()->false_value();
// Generate and print stack trace line.
Handle<String> line =
Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
}
-void Top::ComputeLocation(MessageLocation* target) {
- *target = MessageLocation(Handle<Script>(Heap::empty_script()), -1, -1);
+void Isolate::ComputeLocation(MessageLocation* target) {
+ *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
StackTraceFrameIterator it;
if (!it.done()) {
JavaScriptFrame* frame = it.frame();
Object* script = fun->shared()->script();
if (script->IsScript() &&
!(Script::cast(script)->source()->IsUndefined())) {
- int pos = frame->code()->SourcePosition(frame->pc());
+ int pos = frame->LookupCode(this)->SourcePosition(frame->pc());
// Compute the location from the function and the reloc info.
Handle<Script> casted_script(Script::cast(script));
*target = MessageLocation(casted_script, pos, pos + 1);
}
-bool Top::ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript) {
+bool Isolate::ShouldReportException(bool* can_be_caught_externally,
+ bool catchable_by_javascript) {
// Find the top-most try-catch handler.
StackHandler* handler =
- StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
+ StackHandler::FromAddress(Isolate::handler(thread_local_top()));
while (handler != NULL && !handler->is_try_catch()) {
handler = handler->next();
}
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
- Address external_handler_address = thread_local_.try_catch_handler_address();
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
// The exception has been externally caught if and only if there is
// an external handler which is on top of the top-most try-catch
}
-void Top::DoThrow(MaybeObject* exception,
- MessageLocation* location,
- const char* message) {
+void Isolate::DoThrow(MaybeObject* exception,
+ MessageLocation* location,
+ const char* message) {
ASSERT(!has_pending_exception());
HandleScope scope;
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception.
if (catchable_by_javascript) {
- Debugger::OnException(exception_handle, report_exception);
+ debugger_->OnException(exception_handle, report_exception);
}
#endif
ComputeLocation(&potential_computed_location);
location = &potential_computed_location;
}
- if (!Bootstrapper::IsActive()) {
+ if (!bootstrapper()->IsActive()) {
// It's not safe to try to make message objects or collect stack
// traces while the bootstrapper is active since the infrastructure
// may not have been properly initialized.
Handle<String> stack_trace;
if (FLAG_trace_exception) stack_trace = StackTraceString();
Handle<JSArray> stack_trace_object;
- if (report_exception && capture_stack_trace_for_uncaught_exceptions) {
- stack_trace_object = Top::CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit,
- stack_trace_for_uncaught_exceptions_options);
+ if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
+ stack_trace_object = CaptureCurrentStackTrace(
+ stack_trace_for_uncaught_exceptions_frame_limit_,
+ stack_trace_for_uncaught_exceptions_options_);
}
ASSERT(is_object); // Can't use the handle unless there's a real object.
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
}
// Save the message for reporting if the the exception remains uncaught.
- thread_local_.has_pending_message_ = report_exception;
- thread_local_.pending_message_ = message;
+ thread_local_top()->has_pending_message_ = report_exception;
+ thread_local_top()->pending_message_ = message;
if (!message_obj.is_null()) {
- thread_local_.pending_message_obj_ = *message_obj;
+ thread_local_top()->pending_message_obj_ = *message_obj;
if (location != NULL) {
- thread_local_.pending_message_script_ = *location->script();
- thread_local_.pending_message_start_pos_ = location->start_pos();
- thread_local_.pending_message_end_pos_ = location->end_pos();
+ thread_local_top()->pending_message_script_ = *location->script();
+ thread_local_top()->pending_message_start_pos_ = location->start_pos();
+ thread_local_top()->pending_message_end_pos_ = location->end_pos();
}
}
// Do not forget to clean catcher_ if currently thrown exception cannot
// be caught. If necessary, ReThrow will update the catcher.
- thread_local_.catcher_ = can_be_caught_externally ?
+ thread_local_top()->catcher_ = can_be_caught_externally ?
try_catch_handler() : NULL;
// NOTE: Notifying the debugger or generating the message
}
-bool Top::IsExternallyCaught() {
+bool Isolate::IsExternallyCaught() {
ASSERT(has_pending_exception());
- if ((thread_local_.catcher_ == NULL) ||
- (try_catch_handler() != thread_local_.catcher_)) {
+ if ((thread_local_top()->catcher_ == NULL) ||
+ (try_catch_handler() != thread_local_top()->catcher_)) {
// When throwing the exception, we found no v8::TryCatch
// which should care about this exception.
return false;
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
- Address external_handler_address = thread_local_.try_catch_handler_address();
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
ASSERT(external_handler_address != NULL);
// The exception has been externally caught if and only if there is
// aborted by jumps in control flow like return, break, etc. and we'll
// have another chances to set proper v8::TryCatch.
StackHandler* handler =
- StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
+ StackHandler::FromAddress(Isolate::handler(thread_local_top()));
while (handler != NULL && handler->address() < external_handler_address) {
ASSERT(!handler->is_try_catch());
if (handler->is_try_finally()) return false;
}
-void Top::ReportPendingMessages() {
+void Isolate::ReportPendingMessages() {
ASSERT(has_pending_exception());
// If the pending exception is OutOfMemoryException set out_of_memory in
// the global context. Note: We have to mark the global context here
// since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
// set it.
bool external_caught = IsExternallyCaught();
- thread_local_.external_caught_exception_ = external_caught;
- HandleScope scope;
- if (thread_local_.pending_exception_ == Failure::OutOfMemoryException()) {
+ thread_local_top()->external_caught_exception_ = external_caught;
+ HandleScope scope(this);
+ if (thread_local_top()->pending_exception_ ==
+ Failure::OutOfMemoryException()) {
context()->mark_out_of_memory();
- } else if (thread_local_.pending_exception_ ==
- Heap::termination_exception()) {
+ } else if (thread_local_top()->pending_exception_ ==
+ heap_.termination_exception()) {
if (external_caught) {
try_catch_handler()->can_continue_ = false;
- try_catch_handler()->exception_ = Heap::null_value();
+ try_catch_handler()->exception_ = heap_.null_value();
}
} else {
// At this point all non-object (failure) exceptions have
// been dealt with so this shouldn't fail.
Object* pending_exception_object = pending_exception()->ToObjectUnchecked();
Handle<Object> exception(pending_exception_object);
- thread_local_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
if (external_caught) {
try_catch_handler()->can_continue_ = true;
- try_catch_handler()->exception_ = thread_local_.pending_exception_;
- if (!thread_local_.pending_message_obj_->IsTheHole()) {
- try_catch_handler()->message_ = thread_local_.pending_message_obj_;
+ try_catch_handler()->exception_ = thread_local_top()->pending_exception_;
+ if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
+ try_catch_handler()->message_ =
+ thread_local_top()->pending_message_obj_;
}
}
- if (thread_local_.has_pending_message_) {
- thread_local_.has_pending_message_ = false;
- if (thread_local_.pending_message_ != NULL) {
- MessageHandler::ReportMessage(thread_local_.pending_message_);
- } else if (!thread_local_.pending_message_obj_->IsTheHole()) {
- Handle<Object> message_obj(thread_local_.pending_message_obj_);
- if (thread_local_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_.pending_message_script_);
- int start_pos = thread_local_.pending_message_start_pos_;
- int end_pos = thread_local_.pending_message_end_pos_;
+ if (thread_local_top()->has_pending_message_) {
+ thread_local_top()->has_pending_message_ = false;
+ if (thread_local_top()->pending_message_ != NULL) {
+ MessageHandler::ReportMessage(thread_local_top()->pending_message_);
+ } else if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
+ Handle<Object> message_obj(thread_local_top()->pending_message_obj_);
+ if (thread_local_top()->pending_message_script_ != NULL) {
+ Handle<Script> script(thread_local_top()->pending_message_script_);
+ int start_pos = thread_local_top()->pending_message_start_pos_;
+ int end_pos = thread_local_top()->pending_message_end_pos_;
MessageLocation location(script, start_pos, end_pos);
MessageHandler::ReportMessage(&location, message_obj);
} else {
}
}
}
- thread_local_.external_caught_exception_ = external_caught;
+ thread_local_top()->external_caught_exception_ = external_caught;
set_pending_exception(*exception);
}
clear_pending_message();
}
-void Top::TraceException(bool flag) {
- FLAG_trace_exception = flag;
+void Isolate::TraceException(bool flag) {
+ FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
}
-bool Top::OptionalRescheduleException(bool is_bottom_call) {
+bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
// Allways reschedule out of memory exceptions.
if (!is_out_of_memory()) {
bool is_termination_exception =
- pending_exception() == Heap::termination_exception();
+ pending_exception() == heap_.termination_exception();
// Do not reschedule the exception if this is the bottom call.
bool clear_exception = is_bottom_call;
if (is_termination_exception) {
if (is_bottom_call) {
- thread_local_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
return false;
}
- } else if (thread_local_.external_caught_exception_) {
+ } else if (thread_local_top()->external_caught_exception_) {
// If the exception is externally caught, clear it if there are no
// JavaScript frames on the way to the C++ frame that has the
// external handler.
- ASSERT(thread_local_.try_catch_handler_address() != NULL);
+ ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
Address external_handler_address =
- thread_local_.try_catch_handler_address();
+ thread_local_top()->try_catch_handler_address();
JavaScriptFrameIterator it;
if (it.done() || (it.frame()->sp() > external_handler_address)) {
clear_exception = true;
// Clear the exception if needed.
if (clear_exception) {
- thread_local_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
return false;
}
}
// Reschedule the exception.
- thread_local_.scheduled_exception_ = pending_exception();
+ thread_local_top()->scheduled_exception_ = pending_exception();
clear_pending_exception();
return true;
}
-void Top::SetCaptureStackTraceForUncaughtExceptions(
+void Isolate::SetCaptureStackTraceForUncaughtExceptions(
bool capture,
int frame_limit,
StackTrace::StackTraceOptions options) {
- capture_stack_trace_for_uncaught_exceptions = capture;
- stack_trace_for_uncaught_exceptions_frame_limit = frame_limit;
- stack_trace_for_uncaught_exceptions_options = options;
+ capture_stack_trace_for_uncaught_exceptions_ = capture;
+ stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
+ stack_trace_for_uncaught_exceptions_options_ = options;
}
-bool Top::is_out_of_memory() {
+bool Isolate::is_out_of_memory() {
if (has_pending_exception()) {
MaybeObject* e = pending_exception();
if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
}
-Handle<Context> Top::global_context() {
- GlobalObject* global = thread_local_.context_->global();
+Handle<Context> Isolate::global_context() {
+ GlobalObject* global = thread_local_top()->context_->global();
return Handle<Context>(global->global_context());
}
-Handle<Context> Top::GetCallingGlobalContext() {
+Handle<Context> Isolate::GetCallingGlobalContext() {
JavaScriptFrameIterator it;
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::InDebugger()) {
+ if (debug_->InDebugger()) {
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
- if (context->global_context() == *Debug::debug_context()) {
+ if (context->global_context() == *debug_->debug_context()) {
it.Advance();
} else {
break;
}
-char* Top::ArchiveThread(char* to) {
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(thread_local_));
+char* Isolate::ArchiveThread(char* to) {
+ if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+ RuntimeProfiler::IsolateExitedJS(this);
+ }
+ memcpy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
InitializeThreadLocal();
- return to + sizeof(thread_local_);
+ return to + sizeof(ThreadLocalTop);
}
-char* Top::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(thread_local_));
+char* Isolate::RestoreThread(char* from) {
+ memcpy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
// This might be just paranoia, but it seems to be needed in case a
// thread_local_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
- thread_local_.simulator_ = Simulator::current();
+ thread_local_top()->simulator_ = Simulator::current(this);
#elif V8_TARGET_ARCH_MIPS
- thread_local_.simulator_ = assembler::mips::Simulator::current();
+ thread_local_top()->simulator_ = Simulator::current(this);
#endif
#endif
- return from + sizeof(thread_local_);
+ if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+ RuntimeProfiler::IsolateEnteredJS(this);
+ }
+ return from + sizeof(ThreadLocalTop);
}
} } // namespace v8::internal
+++ /dev/null
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TOP_H_
-#define V8_TOP_H_
-
-#include "atomicops.h"
-#include "compilation-cache.h"
-#include "frames-inl.h"
-#include "runtime-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-class Simulator;
-
-#define RETURN_IF_SCHEDULED_EXCEPTION() \
- if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
-
-#define RETURN_IF_EMPTY_HANDLE_VALUE(call, value) \
- if (call.is_null()) { \
- ASSERT(Top::has_pending_exception()); \
- return value; \
- }
-
-#define RETURN_IF_EMPTY_HANDLE(call) \
- RETURN_IF_EMPTY_HANDLE_VALUE(call, Failure::Exception())
-
-// Top has static variables used for JavaScript execution.
-
-class SaveContext; // Forward declaration.
-class ThreadVisitor; // Defined in v8threads.h
-class VMState; // Defined in vm-state.h
-
-class ThreadLocalTop BASE_EMBEDDED {
- public:
- // Initialize the thread data.
- void Initialize();
-
- // Get the top C++ try catch handler or NULL if none are registered.
- //
- // This method is not guarenteed to return an address that can be
- // used for comparison with addresses into the JS stack. If such an
- // address is needed, use try_catch_handler_address.
- v8::TryCatch* TryCatchHandler();
-
- // Get the address of the top C++ try catch handler or NULL if
- // none are registered.
- //
- // This method always returns an address that can be compared to
- // pointers into the JavaScript stack. When running on actual
- // hardware, try_catch_handler_address and TryCatchHandler return
- // the same pointer. When running on a simulator with a separate JS
- // stack, try_catch_handler_address returns a JS stack address that
- // corresponds to the place on the JS stack where the C++ handler
- // would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
-
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_address_ == NULL);
- }
-
- // The context where the current execution method is created and for variable
- // lookups.
- Context* context_;
- int thread_id_;
- MaybeObject* pending_exception_;
- bool has_pending_message_;
- const char* pending_message_;
- Object* pending_message_obj_;
- Script* pending_message_script_;
- int pending_message_start_pos_;
- int pending_message_end_pos_;
- // Use a separate value for scheduled exceptions to preserve the
- // invariants that hold about pending_exception. We may want to
- // unify them later.
- MaybeObject* scheduled_exception_;
- bool external_caught_exception_;
- SaveContext* save_context_;
- v8::TryCatch* catcher_;
-
- // Stack.
- Address c_entry_fp_; // the frame pointer of the top c entry frame
- Address handler_; // try-blocks are chained through the stack
-
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- Simulator* simulator_;
-#elif V8_TARGET_ARCH_MIPS
- assembler::mips::Simulator* simulator_;
-#endif
-#endif // USE_SIMULATOR
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Address js_entry_sp_; // the stack pointer of the bottom js entry frame
- Address external_callback_; // the external callback we're currently in
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
- StateTag current_vm_state_;
-
- // Used for communication with the runtime profiler thread.
- // Possible values are specified in RuntimeProfilerState.
- Atomic32 runtime_profiler_state_;
-#endif
-
- // Generated code scratch locations.
- int32_t formal_count_;
-
- // Call back function to report unsafe JS accesses.
- v8::FailedAccessCheckCallback failed_access_check_callback_;
-
- private:
- Address try_catch_handler_address_;
-};
-
-#define TOP_ADDRESS_LIST(C) \
- C(handler_address) \
- C(c_entry_fp_address) \
- C(context_address) \
- C(pending_exception_address) \
- C(external_caught_exception_address)
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define TOP_ADDRESS_LIST_PROF(C) \
- C(js_entry_sp_address)
-#else
-#define TOP_ADDRESS_LIST_PROF(C)
-#endif
-
-
-class Top {
- public:
- enum AddressId {
-#define C(name) k_##name,
- TOP_ADDRESS_LIST(C)
- TOP_ADDRESS_LIST_PROF(C)
-#undef C
- k_top_address_count
- };
-
- static Address get_address_from_id(AddressId id);
-
- // Access to top context (where the current function object was created).
- static Context* context() { return thread_local_.context_; }
- static void set_context(Context* context) {
- thread_local_.context_ = context;
- }
- static Context** context_address() { return &thread_local_.context_; }
-
- static SaveContext* save_context() {return thread_local_.save_context_; }
- static void set_save_context(SaveContext* save) {
- thread_local_.save_context_ = save;
- }
-
- // Access to current thread id.
- static int thread_id() { return thread_local_.thread_id_; }
- static void set_thread_id(int id) { thread_local_.thread_id_ = id; }
-
- // Interface to pending exception.
- static MaybeObject* pending_exception() {
- ASSERT(has_pending_exception());
- return thread_local_.pending_exception_;
- }
- static bool external_caught_exception() {
- return thread_local_.external_caught_exception_;
- }
- static void set_pending_exception(MaybeObject* exception) {
- thread_local_.pending_exception_ = exception;
- }
- static void clear_pending_exception() {
- thread_local_.pending_exception_ = Heap::the_hole_value();
- }
-
- static MaybeObject** pending_exception_address() {
- return &thread_local_.pending_exception_;
- }
- static bool has_pending_exception() {
- return !thread_local_.pending_exception_->IsTheHole();
- }
- static void clear_pending_message() {
- thread_local_.has_pending_message_ = false;
- thread_local_.pending_message_ = NULL;
- thread_local_.pending_message_obj_ = Heap::the_hole_value();
- thread_local_.pending_message_script_ = NULL;
- }
- static v8::TryCatch* try_catch_handler() {
- return thread_local_.TryCatchHandler();
- }
- static Address try_catch_handler_address() {
- return thread_local_.try_catch_handler_address();
- }
- // This method is called by the api after operations that may throw
- // exceptions. If an exception was thrown and not handled by an external
- // handler the exception is scheduled to be rethrown when we return to running
- // JavaScript code. If an exception is scheduled true is returned.
- static bool OptionalRescheduleException(bool is_bottom_call);
-
-
- static bool* external_caught_exception_address() {
- return &thread_local_.external_caught_exception_;
- }
-
- static MaybeObject** scheduled_exception_address() {
- return &thread_local_.scheduled_exception_;
- }
-
- static MaybeObject* scheduled_exception() {
- ASSERT(has_scheduled_exception());
- return thread_local_.scheduled_exception_;
- }
- static bool has_scheduled_exception() {
- return !thread_local_.scheduled_exception_->IsTheHole();
- }
- static void clear_scheduled_exception() {
- thread_local_.scheduled_exception_ = Heap::the_hole_value();
- }
-
- static bool IsExternallyCaught();
-
- static void SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Tells whether the current context has experienced an out of memory
- // exception.
- static bool is_out_of_memory();
-
- static bool is_catchable_by_javascript(MaybeObject* exception) {
- return (exception != Failure::OutOfMemoryException()) &&
- (exception != Heap::termination_exception());
- }
-
- // JS execution stack (see frames.h).
- static Address c_entry_fp(ThreadLocalTop* thread) {
- return thread->c_entry_fp_;
- }
- static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
-
- static inline Address* c_entry_fp_address() {
- return &thread_local_.c_entry_fp_;
- }
- static inline Address* handler_address() { return &thread_local_.handler_; }
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Bottom JS entry (see StackTracer::Trace in log.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
- }
- static inline Address* js_entry_sp_address() {
- return &thread_local_.js_entry_sp_;
- }
-
- static Address external_callback() {
- return thread_local_.external_callback_;
- }
- static void set_external_callback(Address callback) {
- thread_local_.external_callback_ = callback;
- }
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
- static StateTag current_vm_state() {
- return thread_local_.current_vm_state_;
- }
-
- static void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- if (state == JS) {
- // JS or non-JS -> JS transition.
- RuntimeProfilerState old_state = SwapRuntimeProfilerState(PROF_IN_JS);
- if (old_state == PROF_NOT_IN_JS_WAITING_FOR_JS) {
- // If the runtime profiler was waiting, we reset the eager
- // optimizing data in the compilation cache to get a fresh
- // start after not running JavaScript code for a while and
- // signal the runtime profiler so it can resume.
- CompilationCache::ResetEagerOptimizingData();
- runtime_profiler_semaphore_->Signal();
- }
- } else if (thread_local_.current_vm_state_ == JS) {
- // JS -> non-JS transition. Update the runtime profiler state.
- ASSERT(IsInJSState());
- SetRuntimeProfilerState(PROF_NOT_IN_JS);
- }
- }
- thread_local_.current_vm_state_ = state;
- }
-
- // Called in the runtime profiler thread.
- // Returns whether the current VM state is set to JS.
- static bool IsInJSState() {
- ASSERT(RuntimeProfiler::IsEnabled());
- return static_cast<RuntimeProfilerState>(
- NoBarrier_Load(&thread_local_.runtime_profiler_state_)) == PROF_IN_JS;
- }
-
- // Called in the runtime profiler thread.
- // Waits for the VM state to transtion from non-JS to JS. Returns
- // true when notified of the transition, false when the current
- // state is not the expected non-JS state.
- static bool WaitForJSState() {
- ASSERT(RuntimeProfiler::IsEnabled());
- // Try to switch to waiting state.
- RuntimeProfilerState old_state = CompareAndSwapRuntimeProfilerState(
- PROF_NOT_IN_JS, PROF_NOT_IN_JS_WAITING_FOR_JS);
- if (old_state == PROF_NOT_IN_JS) {
- runtime_profiler_semaphore_->Wait();
- return true;
- }
- return false;
- }
-
- // When shutting down we join the profiler thread. Doing so while
- // it's waiting on a semaphore will cause a deadlock, so we have to
- // wake it up first.
- static void WakeUpRuntimeProfilerThreadBeforeShutdown() {
- runtime_profiler_semaphore_->Signal();
- }
-#endif
-
- // Generated code scratch locations.
- static void* formal_count_address() { return &thread_local_.formal_count_; }
-
- static void PrintCurrentStackTrace(FILE* out);
- static void PrintStackTrace(FILE* out, char* thread_data);
- static void PrintStack(StringStream* accumulator);
- static void PrintStack();
- static Handle<String> StackTraceString();
- static Handle<JSArray> CaptureCurrentStackTrace(
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Returns if the top context may access the given global object. If
- // the result is false, the pending exception is guaranteed to be
- // set.
- static bool MayNamedAccess(JSObject* receiver,
- Object* key,
- v8::AccessType type);
- static bool MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type);
-
- static void SetFailedAccessCheckCallback(
- v8::FailedAccessCheckCallback callback);
- static void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
-
- // Exception throwing support. The caller should use the result
- // of Throw() as its return value.
- static Failure* Throw(Object* exception, MessageLocation* location = NULL);
- // Re-throw an exception. This involves no error reporting since
- // error reporting was handled when the exception was thrown
- // originally.
- static Failure* ReThrow(MaybeObject* exception,
- MessageLocation* location = NULL);
- static void ScheduleThrow(Object* exception);
- static void ReportPendingMessages();
- static Failure* ThrowIllegalOperation();
-
- // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- static Failure* PromoteScheduledException();
- static void DoThrow(MaybeObject* exception,
- MessageLocation* location,
- const char* message);
- // Checks if exception should be reported and finds out if it's
- // caught externally.
- static bool ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript);
-
- // Attempts to compute the current source location, storing the
- // result in the target out parameter.
- static void ComputeLocation(MessageLocation* target);
-
- // Override command line flag.
- static void TraceException(bool flag);
-
- // Out of resource exception helpers.
- static Failure* StackOverflow();
- static Failure* TerminateExecution();
-
- // Administration
- static void Initialize();
- static void TearDown();
- static void Iterate(ObjectVisitor* v);
- static void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
- static char* Iterate(ObjectVisitor* v, char* t);
- static void IterateThread(ThreadVisitor* v);
- static void IterateThread(ThreadVisitor* v, char* t);
-
- // Returns the global object of the current context. It could be
- // a builtin object, or a js global object.
- static Handle<GlobalObject> global() {
- return Handle<GlobalObject>(context()->global());
- }
-
- // Returns the global proxy object of the current context.
- static Object* global_proxy() {
- return context()->global_proxy();
- }
-
- // Returns the current global context.
- static Handle<Context> global_context();
-
- // Returns the global context of the calling JavaScript code. That
- // is, the global context of the top-most JavaScript frame.
- static Handle<Context> GetCallingGlobalContext();
-
- static Handle<JSBuiltinsObject> builtins() {
- return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
- }
-
- static void RegisterTryCatchHandler(v8::TryCatch* that);
- static void UnregisterTryCatchHandler(v8::TryCatch* that);
-
-#define TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- static Handle<type> name() { \
- return Handle<type>(context()->global_context()->name()); \
- }
- GLOBAL_CONTEXT_FIELDS(TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR)
-#undef TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR
-
- static inline ThreadLocalTop* GetCurrentThread() { return &thread_local_; }
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
- static char* ArchiveThread(char* to);
- static char* RestoreThread(char* from);
- static void FreeThreadResources() { thread_local_.Free(); }
-
- static const char* kStackOverflowMessage;
-
- private:
-#ifdef ENABLE_VMSTATE_TRACKING
- // Set of states used when communicating with the runtime profiler.
- //
- // The set of possible transitions is divided between the VM and the
- // profiler threads.
- //
- // The VM thread can perform these transitions:
- // o IN_JS -> NOT_IN_JS
- // o NOT_IN_JS -> IN_JS
- // o NOT_IN_JS_WAITING_FOR_JS -> IN_JS notifying the profiler thread
- // using the semaphore.
- // All the above transitions are caused by VM state changes.
- //
- // The profiler thread can only perform a single transition
- // NOT_IN_JS -> NOT_IN_JS_WAITING_FOR_JS before it starts waiting on
- // the semaphore.
- enum RuntimeProfilerState {
- PROF_NOT_IN_JS,
- PROF_NOT_IN_JS_WAITING_FOR_JS,
- PROF_IN_JS
- };
-
- static void SetRuntimeProfilerState(RuntimeProfilerState state) {
- NoBarrier_Store(&thread_local_.runtime_profiler_state_, state);
- }
-
- static RuntimeProfilerState SwapRuntimeProfilerState(
- RuntimeProfilerState state) {
- return static_cast<RuntimeProfilerState>(
- NoBarrier_AtomicExchange(&thread_local_.runtime_profiler_state_,
- state));
- }
-
- static RuntimeProfilerState CompareAndSwapRuntimeProfilerState(
- RuntimeProfilerState old_state,
- RuntimeProfilerState state) {
- return static_cast<RuntimeProfilerState>(
- NoBarrier_CompareAndSwap(&thread_local_.runtime_profiler_state_,
- old_state,
- state));
- }
-
- static Semaphore* runtime_profiler_semaphore_;
-#endif // ENABLE_VMSTATE_TRACKING
-
- // The context that initiated this JS execution.
- static ThreadLocalTop thread_local_;
- static void InitializeThreadLocal();
- static void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
- static void MarkCompactPrologue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
- static void MarkCompactEpilogue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
-
- // Debug.
- // Mutex for serializing access to break control structures.
- static Mutex* break_access_;
-
- friend class SaveContext;
- friend class AssertNoContextChange;
- friend class ExecutionAccess;
- friend class ThreadLocalTop;
-
- static void FillCache();
-};
-
-
-// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
-// class as a work around for a bug in the generated code found with these
-// versions of GCC. See V8 issue 122 for details.
-class SaveContext BASE_EMBEDDED {
- public:
- SaveContext()
- : context_(Top::context()),
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_(Top::context()),
-#endif
- prev_(Top::save_context()) {
- Top::set_save_context(this);
-
- // If there is no JS frame under the current C frame, use the value 0.
- JavaScriptFrameIterator it;
- js_sp_ = it.done() ? 0 : it.frame()->sp();
- }
-
- ~SaveContext() {
- Top::set_context(*context_);
- Top::set_save_context(prev_);
- }
-
- Handle<Context> context() { return context_; }
- SaveContext* prev() { return prev_; }
-
- // Returns true if this save context is below a given JavaScript frame.
- bool below(JavaScriptFrame* frame) {
- return (js_sp_ == 0) || (frame->sp() < js_sp_);
- }
-
- private:
- Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
- SaveContext* prev_;
- Address js_sp_; // The top JS frame's sp when saving context.
-};
-
-
-class AssertNoContextChange BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- AssertNoContextChange() :
- context_(Top::context()) {
- }
-
- ~AssertNoContextChange() {
- ASSERT(Top::context() == *context_);
- }
-
- private:
- HandleScope scope_;
- Handle<Context> context_;
-#else
- public:
- AssertNoContextChange() { }
-#endif
-};
-
-
-class ExecutionAccess BASE_EMBEDDED {
- public:
- ExecutionAccess() { Lock(); }
- ~ExecutionAccess() { Unlock(); }
-
- static void Lock() { Top::break_access_->Lock(); }
- static void Unlock() { Top::break_access_->Unlock(); }
-
- static bool TryLock() {
- return Top::break_access_->TryLock();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_TOP_H_
int entry = dictionary_->FindEntry(pos);
return entry != NumberDictionary::kNotFound
? Handle<Object>(dictionary_->ValueAt(entry))
- : Factory::undefined_value();
+ : Isolate::Current()->factory()->undefined_value();
}
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->position()) == Builtins::builtin(id);
+ return *GetInfo(expr->position()) ==
+ Isolate::Current()->builtins()->builtin(id);
}
ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
Handle<String> name,
Code::Flags flags) {
+ Isolate* isolate = Isolate::Current();
Handle<Object> object = GetInfo(position);
if (object->IsUndefined() || object->IsSmi()) return NULL;
- if (*object == Builtins::builtin(Builtins::StoreIC_GlobalProxy)) {
+ if (*object == isolate->builtins()->builtin(Builtins::StoreIC_GlobalProxy)) {
// TODO(fschneider): We could collect the maps and signal that
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
} else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
ZoneMapList* types = new ZoneMapList(4);
ASSERT(object->IsCode());
- StubCache::CollectMatchingMaps(types, *name, flags);
+ isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
return types->length() > 0 ? types : NULL;
} else {
return NULL;
void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
const int kInitialCapacity = 16;
List<int> code_positions(kInitialCapacity);
CollectPositions(*code, &code_positions, &source_positions);
ASSERT(dictionary_.is_null()); // Only initialize once.
- dictionary_ = Factory::NewNumberDictionary(code_positions.length());
+ dictionary_ = isolate->factory()->NewNumberDictionary(
+ code_positions.length());
int length = code_positions.length();
ASSERT(source_positions.length() == length);
for (int i = 0; i < length; i++) {
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
RelocInfo info(code->instruction_start() + code_positions[i],
RelocInfo::CODE_TARGET, 0);
Handle<Code> target(Code::GetCodeFromTargetAddress(info.target_address()));
if (kind == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC ||
kind == Code::KEYED_EXTERNAL_ARRAY_STORE_IC) {
value = target;
- } else if (kind != Code::CALL_IC ||
- target->check_type() == RECEIVER_MAP_CHECK) {
- Handle<Map> map = Handle<Map>(target->FindFirstMap());
- if (*map == NULL) {
+ } else if (target->kind() != Code::CALL_IC ||
+ target->check_type() == RECEIVER_MAP_CHECK) {
+ Map* map = target->FindFirstMap();
+ if (map == NULL) {
value = target;
} else {
- value = map;
+ value = Handle<Map>(map);
}
} else {
ASSERT(target->kind() == Code::CALL_IC);
if (!value.is_null()) {
Handle<NumberDictionary> new_dict =
- Factory::DictionaryAtNumberPut(dictionary_, position, value);
+ isolate->factory()->DictionaryAtNumberPut(
+ dictionary_, position, value);
dictionary_ = loop_scope.CloseAndEscape(new_dict);
}
}
}
-uchar UnicodeData::kMaxCodePoint = 65533;
+const uchar UnicodeData::kMaxCodePoint = 65533;
int UnicodeData::GetByteCount() {
return kUppercaseTable0Size * sizeof(int32_t) // NOLINT
private:
friend class Test;
static int GetByteCount();
- static uchar kMaxCodePoint;
+ static const uchar kMaxCodePoint;
};
// --- U t f 8 ---
namespace v8 {
namespace internal {
+Counters::Counters() {
#define HT(name, caption) \
- HistogramTimer Counters::name = { #caption, NULL, false, 0, 0 }; \
-
- HISTOGRAM_TIMER_LIST(HT)
-#undef SR
+ HistogramTimer name = { #caption, NULL, false, 0, 0 }; \
+ name##_ = name;
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
#define SC(name, caption) \
- StatsCounter Counters::name = { "c:" #caption, NULL, false };
+ StatsCounter name = { "c:" #caption, NULL, false };\
+ name##_ = name;
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
#undef SC
-StatsCounter Counters::state_counters[] = {
+ StatsCounter state_counters[] = {
#define COUNTER_NAME(name) \
- { "c:V8.State" #name, NULL, false },
- STATE_TAG_LIST(COUNTER_NAME)
+ { "c:V8.State" #name, NULL, false },
+ STATE_TAG_LIST(COUNTER_NAME)
#undef COUNTER_NAME
-};
+ };
+
+ for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) {
+ state_counters_[i] = state_counters[i];
+ }
+}
} } // namespace v8::internal
// This file contains all the v8 counters that are in use.
-class Counters : AllStatic {
+class Counters {
public:
#define HT(name, caption) \
- static HistogramTimer name;
+ HistogramTimer* name() { return &name##_; }
HISTOGRAM_TIMER_LIST(HT)
#undef HT
#define SC(name, caption) \
- static StatsCounter name;
+ StatsCounter* name() { return &name##_; }
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
HISTOGRAM_TIMER_LIST(RATE_ID)
#undef RATE_ID
#define COUNTER_ID(name, caption) k_##name,
- STATS_COUNTER_LIST_1(COUNTER_ID)
- STATS_COUNTER_LIST_2(COUNTER_ID)
+ STATS_COUNTER_LIST_1(COUNTER_ID)
+ STATS_COUNTER_LIST_2(COUNTER_ID)
#undef COUNTER_ID
#define COUNTER_ID(name) k_##name,
- STATE_TAG_LIST(COUNTER_ID)
+ STATE_TAG_LIST(COUNTER_ID)
#undef COUNTER_ID
stats_counter_count
};
+ StatsCounter* state_counters(StateTag state) {
+ return &state_counters_[state];
+ }
+
+ private:
+#define HT(name, caption) \
+ HistogramTimer name##_;
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define SC(name, caption) \
+ StatsCounter name##_;
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+ enum {
+#define COUNTER_ID(name) __##name,
+ STATE_TAG_LIST(COUNTER_ID)
+#undef COUNTER_ID
+ kSlidingStateWindowCounterCount
+ };
+
// Sliding state window counters.
- static StatsCounter state_counters[];
+ StatsCounter state_counters_[kSlidingStateWindowCounterCount];
+ friend class Isolate;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
+#define COUNTERS Isolate::Current()->counters()
+
} } // namespace v8::internal
#endif // V8_V8_COUNTERS_H_
#include "v8.h"
+#include "isolate.h"
#include "bootstrapper.h"
#include "debug.h"
#include "deoptimizer.h"
#include "log.h"
#include "runtime-profiler.h"
#include "serialize.h"
-#include "simulator.h"
-#include "stub-cache.h"
namespace v8 {
namespace internal {
bool V8::Initialize(Deserializer* des) {
- bool create_heap_objects = des == NULL;
- if (has_been_disposed_ || has_fatal_error_) return false;
- if (IsRunning()) return true;
+ // The current thread may not yet had entered an isolate to run.
+ // Note the Isolate::Current() may be non-null because for various
+ // initialization purposes an initializing thread may be assigned an isolate
+ // but not actually enter it.
+ if (i::Isolate::CurrentPerIsolateThreadData() == NULL) {
+ i::Isolate::EnterDefaultIsolate();
+ }
+
+ ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL);
+ ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id() ==
+ i::Thread::GetThreadLocalInt(i::Isolate::thread_id_key()));
+ ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
+ i::Isolate::Current());
+
+ if (IsDead()) return false;
+
+ Isolate* isolate = Isolate::Current();
+ if (isolate->IsInitialized()) return true;
#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
use_crankshaft_ = false;
// Peephole optimization might interfere with deoptimization.
FLAG_peephole_optimization = !use_crankshaft_;
+
is_running_ = true;
has_been_setup_ = true;
has_fatal_error_ = false;
has_been_disposed_ = false;
-#ifdef DEBUG
- // The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
-#endif
-
- // Enable logging before setting up the heap
- Logger::Setup();
-
- CpuProfiler::Setup();
- HeapProfiler::Setup();
-
- // Setup the platform OS support.
- OS::Setup();
- // Initialize other runtime facilities
-#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM)
- Simulator::Initialize();
-#elif defined(V8_TARGET_ARCH_MIPS)
- ::assembler::mips::Simulator::Initialize();
-#endif
-#endif
-
- { // NOLINT
- // Ensure that the thread has a valid stack guard. The v8::Locker object
- // will ensure this too, but we don't have to use lockers if we are only
- // using one thread.
- ExecutionAccess lock;
- StackGuard::InitThread(lock);
- }
-
- // Setup the object heap
- ASSERT(!Heap::HasBeenSetup());
- if (!Heap::Setup(create_heap_objects)) {
- SetFatalError();
- return false;
- }
-
- Bootstrapper::Initialize(create_heap_objects);
- Builtins::Setup(create_heap_objects);
- Top::Initialize();
-
- if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StartPreemption(100);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::Setup(create_heap_objects);
-#endif
- StubCache::Initialize(create_heap_objects);
-
- // If we are deserializing, read the state into the now-empty heap.
- if (des != NULL) {
- des->Deserialize();
- StubCache::Clear();
- }
-
- // Deserializing may put strange things in the root array's copy of the
- // stack guard.
- Heap::SetStackLimits();
-
- // Setup the CPU support. Must be done after heap setup and after
- // any deserialization because we have to have the initial heap
- // objects in place for creating the code object used for probing.
- CPU::Setup();
-
- Deoptimizer::Setup();
- LAllocator::Setup();
- RuntimeProfiler::Setup();
-
- // If we are deserializing, log non-function code objects and compiled
- // functions found in the snapshot.
- if (des != NULL && FLAG_log_code) {
- HandleScope scope;
- LOG(LogCodeObjects());
- LOG(LogCompiledFunctions());
- }
-
- return true;
+ return isolate->Init(des);
}
void V8::TearDown() {
- if (!has_been_setup_ || has_been_disposed_) return;
-
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->IsDefaultIsolate());
- // We must stop the logger before we tear down other components.
- Logger::EnsureTickerStopped();
-
- Deoptimizer::TearDown();
-
- if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StopPreemption();
- }
-
- Builtins::TearDown();
- Bootstrapper::TearDown();
-
- Top::TearDown();
-
- HeapProfiler::TearDown();
- CpuProfiler::TearDown();
- RuntimeProfiler::TearDown();
-
- Logger::TearDown();
- Heap::TearDown();
+ if (!has_been_setup_ || has_been_disposed_) return;
+ isolate->TearDown();
is_running_ = false;
has_been_disposed_ = true;
// Used by JavaScript APIs
-uint32_t V8::Random() {
+uint32_t V8::Random(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ // TODO(isolates): move lo and hi to isolate
static random_state state = {0, 0};
return random_base(&state);
}
// Used internally by the JIT and memory allocator for security
// purposes. So, we keep a different state to prevent informations
// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate() {
+uint32_t V8::RandomPrivate(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ // TODO(isolates): move lo and hi to isolate
static random_state state = {0, 0};
return random_base(&state);
}
if (!FLAG_use_idle_notification) return true;
// Tell the heap that it may want to adjust.
- return Heap::IdleNotification();
+ return HEAP->IdleNotification();
}
Object* V8::FillHeapNumberWithRandom(Object* heap_number) {
- uint64_t random_bits = Random();
+ uint64_t random_bits = Random(Isolate::Current());
// Make a double* from address (heap_number + sizeof(double)).
double_int_union* r = reinterpret_cast<double_int_union*>(
reinterpret_cast<char*>(heap_number) +
static bool UseCrankshaft() { return use_crankshaft_; }
static void DisableCrankshaft() { use_crankshaft_ = false; }
// To be dead you have to have lived
+ // TODO(isolates): move IsDead to Isolate.
static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
static void SetFatalError();
bool take_snapshot = false);
// Random number generation support. Not cryptographically safe.
- static uint32_t Random();
+ static uint32_t Random(Isolate* isolate);
// We use random numbers internally in memory allocation and in the
// compilers for security. In order to prevent information leaks we
// use a separate random state for internal random number
// generation.
- static uint32_t RandomPrivate();
+ static uint32_t RandomPrivate(Isolate* isolate);
static Object* FillHeapNumberWithRandom(Object* heap_number);
// Idle notification directly from the API.
#define TRACK_MEMORY(name) \
void* operator new(size_t size) { \
void* result = ::operator new(size); \
- Logger::NewEvent(name, result, size); \
+ Logger::NewEventStatic(name, result, size); \
return result; \
} \
void operator delete(void* object) { \
- Logger::DeleteEvent(name, object); \
+ Logger::DeleteEventStatic(name, object); \
::operator delete(object); \
}
#else
namespace v8 {
-static internal::Thread::LocalStorageKey thread_state_key =
- internal::Thread::CreateThreadLocalKey();
-static internal::Thread::LocalStorageKey thread_id_key =
- internal::Thread::CreateThreadLocalKey();
-
// Track whether this V8 instance has ever called v8::Locker. This allows the
// API code to verify that the lock is always held when V8 is being entered.
// Constructor for the Locker object. Once the Locker is constructed the
// current thread will be guaranteed to have the big V8 lock.
Locker::Locker() : has_lock_(false), top_level_(true) {
+ // TODO(isolates): When Locker has Isolate parameter and it is provided, grab
+ // that one instead of using the current one.
+ // We pull default isolate for Locker constructor w/o p[arameter.
+ // A thread should not enter an isolate before acquiring a lock,
+ // in cases which mandate using Lockers.
+ // So getting a lock is the first thing threads do in a scenario where
+ // multple threads share an isolate. Hence, we need to access
+ // 'locking isolate' before we can actually enter into default isolate.
+ internal::Isolate* isolate = internal::Isolate::GetDefaultIsolateForLocking();
+ ASSERT(isolate != NULL);
+
// Record that the Locker has been used at least once.
active_ = true;
// Get the big lock if necessary.
- if (!internal::ThreadManager::IsLockedByCurrentThread()) {
- internal::ThreadManager::Lock();
+ if (!isolate->thread_manager()->IsLockedByCurrentThread()) {
+ isolate->thread_manager()->Lock();
has_lock_ = true;
+
+ if (isolate->IsDefaultIsolate()) {
+ // This only enters if not yet entered.
+ internal::Isolate::EnterDefaultIsolate();
+ }
+
+ ASSERT(internal::Thread::HasThreadLocal(
+ internal::Isolate::thread_id_key()));
+
// Make sure that V8 is initialized. Archiving of threads interferes
// with deserialization by adding additional root pointers, so we must
// initialize here, before anyone can call ~Locker() or Unlocker().
- if (!internal::V8::IsRunning()) {
+ if (!isolate->IsInitialized()) {
V8::Initialize();
}
// This may be a locker within an unlocker in which case we have to
// get the saved state for this thread and restore it.
- if (internal::ThreadManager::RestoreThread()) {
+ if (isolate->thread_manager()->RestoreThread()) {
top_level_ = false;
} else {
- internal::ExecutionAccess access;
- internal::StackGuard::ClearThread(access);
- internal::StackGuard::InitThread(access);
+ internal::ExecutionAccess access(isolate);
+ isolate->stack_guard()->ClearThread(access);
+ isolate->stack_guard()->InitThread(access);
}
}
- ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
-
- // Make sure this thread is assigned a thread id.
- internal::ThreadManager::AssignId();
+ ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
}
bool Locker::IsLocked() {
- return internal::ThreadManager::IsLockedByCurrentThread();
+ return internal::Isolate::Current()->thread_manager()->
+ IsLockedByCurrentThread();
}
Locker::~Locker() {
- ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+ // TODO(isolate): this should use a field storing the isolate it
+ // locked instead.
+ internal::Isolate* isolate = internal::Isolate::Current();
+ ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
if (has_lock_) {
if (top_level_) {
- internal::ThreadManager::FreeThreadResources();
+ isolate->thread_manager()->FreeThreadResources();
} else {
- internal::ThreadManager::ArchiveThread();
+ isolate->thread_manager()->ArchiveThread();
}
- internal::ThreadManager::Unlock();
+ isolate->thread_manager()->Unlock();
}
}
Unlocker::Unlocker() {
- ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
- internal::ThreadManager::ArchiveThread();
- internal::ThreadManager::Unlock();
+ internal::Isolate* isolate = internal::Isolate::Current();
+ ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
+ isolate->thread_manager()->ArchiveThread();
+ isolate->thread_manager()->Unlock();
}
Unlocker::~Unlocker() {
- ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
- internal::ThreadManager::Lock();
- internal::ThreadManager::RestoreThread();
+ // TODO(isolates): check it's the isolate we unlocked.
+ internal::Isolate* isolate = internal::Isolate::Current();
+ ASSERT(!isolate->thread_manager()->IsLockedByCurrentThread());
+ isolate->thread_manager()->Lock();
+ isolate->thread_manager()->RestoreThread();
}
// had prepared back in the free list, since we didn't need it after all.
if (lazily_archived_thread_.IsSelf()) {
lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
- ASSERT(Thread::GetThreadLocal(thread_state_key) ==
+ ASSERT(Isolate::CurrentPerIsolateThreadData()->thread_state() ==
lazily_archived_thread_state_);
lazily_archived_thread_state_->set_id(kInvalidId);
lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
lazily_archived_thread_state_ = NULL;
- Thread::SetThreadLocal(thread_state_key, NULL);
+ Isolate::CurrentPerIsolateThreadData()->set_thread_state(NULL);
return true;
}
// Make sure that the preemption thread cannot modify the thread state while
// it is being archived or restored.
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
// If there is another thread that was lazily archived then we have to really
// archive it now.
if (lazily_archived_thread_.IsValid()) {
EagerlyArchiveThread();
}
- ThreadState* state =
- reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
- if (state == NULL) {
+ Isolate::PerIsolateThreadData* per_thread =
+ Isolate::CurrentPerIsolateThreadData();
+ if (per_thread == NULL || per_thread->thread_state() == NULL) {
// This is a new thread.
- StackGuard::InitThread(access);
+ isolate_->stack_guard()->InitThread(access);
return false;
}
+ ThreadState* state = per_thread->thread_state();
char* from = state->data();
- from = HandleScopeImplementer::RestoreThread(from);
- from = Top::RestoreThread(from);
+ from = isolate_->handle_scope_implementer()->RestoreThread(from);
+ from = isolate_->RestoreThread(from);
from = Relocatable::RestoreState(from);
#ifdef ENABLE_DEBUGGER_SUPPORT
- from = Debug::RestoreDebug(from);
+ from = isolate_->debug()->RestoreDebug(from);
#endif
- from = StackGuard::RestoreStackGuard(from);
- from = RegExpStack::RestoreStack(from);
- from = Bootstrapper::RestoreState(from);
- Thread::SetThreadLocal(thread_state_key, NULL);
+ from = isolate_->stack_guard()->RestoreStackGuard(from);
+ from = isolate_->regexp_stack()->RestoreStack(from);
+ from = isolate_->bootstrapper()->RestoreState(from);
+ per_thread->set_thread_state(NULL);
if (state->terminate_on_restore()) {
- StackGuard::TerminateExecution();
+ isolate_->stack_guard()->TerminateExecution();
state->set_terminate_on_restore(false);
}
state->set_id(kInvalidId);
static int ArchiveSpacePerThread() {
return HandleScopeImplementer::ArchiveSpacePerThread() +
- Top::ArchiveSpacePerThread() +
+ Isolate::ArchiveSpacePerThread() +
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::ArchiveSpacePerThread() +
#endif
}
-ThreadState* ThreadState::free_anchor_ = new ThreadState();
-ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
-
-
-ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
- terminate_on_restore_(false),
- next_(this), previous_(this) {
+ThreadState::ThreadState(ThreadManager* thread_manager)
+ : id_(ThreadManager::kInvalidId),
+ terminate_on_restore_(false),
+ next_(this),
+ previous_(this),
+ thread_manager_(thread_manager) {
}
void ThreadState::LinkInto(List list) {
ThreadState* flying_anchor =
- list == FREE_LIST ? free_anchor_ : in_use_anchor_;
+ list == FREE_LIST ? thread_manager_->free_anchor_
+ : thread_manager_->in_use_anchor_;
next_ = flying_anchor->next_;
previous_ = flying_anchor;
flying_anchor->next_ = this;
}
-ThreadState* ThreadState::GetFree() {
+ThreadState* ThreadManager::GetFreeThreadState() {
ThreadState* gotten = free_anchor_->next_;
if (gotten == free_anchor_) {
- ThreadState* new_thread_state = new ThreadState();
+ ThreadState* new_thread_state = new ThreadState(this);
new_thread_state->AllocateSpace();
return new_thread_state;
}
// Gets the first in the list of archived threads.
-ThreadState* ThreadState::FirstInUse() {
+ThreadState* ThreadManager::FirstThreadStateInUse() {
return in_use_anchor_->Next();
}
ThreadState* ThreadState::Next() {
- if (next_ == in_use_anchor_) return NULL;
+ if (next_ == thread_manager_->in_use_anchor_) return NULL;
return next_;
}
// Thread ids must start with 1, because in TLS having thread id 0 can't
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
-int ThreadManager::last_id_ = 0;
-Mutex* ThreadManager::mutex_ = OS::CreateMutex();
-ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
-ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
-ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
+ThreadManager::ThreadManager()
+ : mutex_(OS::CreateMutex()),
+ mutex_owner_(ThreadHandle::INVALID),
+ lazily_archived_thread_(ThreadHandle::INVALID),
+ lazily_archived_thread_state_(NULL),
+ free_anchor_(NULL),
+ in_use_anchor_(NULL) {
+ free_anchor_ = new ThreadState(this);
+ in_use_anchor_ = new ThreadState(this);
+}
+
+
+ThreadManager::~ThreadManager() {
+ // TODO(isolates): Destroy mutexes.
+}
void ThreadManager::ArchiveThread() {
ASSERT(!lazily_archived_thread_.IsValid());
ASSERT(!IsArchived());
- ThreadState* state = ThreadState::GetFree();
+ ThreadState* state = GetFreeThreadState();
state->Unlink();
- Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
+ Isolate::CurrentPerIsolateThreadData()->set_thread_state(state);
lazily_archived_thread_.Initialize(ThreadHandle::SELF);
lazily_archived_thread_state_ = state;
ASSERT(state->id() == kInvalidId);
char* to = state->data();
// Ensure that data containing GC roots are archived first, and handle them
// in ThreadManager::Iterate(ObjectVisitor*).
- to = HandleScopeImplementer::ArchiveThread(to);
- to = Top::ArchiveThread(to);
+ to = isolate_->handle_scope_implementer()->ArchiveThread(to);
+ to = isolate_->ArchiveThread(to);
to = Relocatable::ArchiveState(to);
#ifdef ENABLE_DEBUGGER_SUPPORT
- to = Debug::ArchiveDebug(to);
+ to = isolate_->debug()->ArchiveDebug(to);
#endif
- to = StackGuard::ArchiveStackGuard(to);
- to = RegExpStack::ArchiveStack(to);
- to = Bootstrapper::ArchiveState(to);
+ to = isolate_->stack_guard()->ArchiveStackGuard(to);
+ to = isolate_->regexp_stack()->ArchiveStack(to);
+ to = isolate_->bootstrapper()->ArchiveState(to);
lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
lazily_archived_thread_state_ = NULL;
}
void ThreadManager::FreeThreadResources() {
- HandleScopeImplementer::FreeThreadResources();
- Top::FreeThreadResources();
+ isolate_->handle_scope_implementer()->FreeThreadResources();
+ isolate_->FreeThreadResources();
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::FreeThreadResources();
+ isolate_->debug()->FreeThreadResources();
#endif
- StackGuard::FreeThreadResources();
- RegExpStack::FreeThreadResources();
- Bootstrapper::FreeThreadResources();
+ isolate_->stack_guard()->FreeThreadResources();
+ isolate_->regexp_stack()->FreeThreadResources();
+ isolate_->bootstrapper()->FreeThreadResources();
}
bool ThreadManager::IsArchived() {
- return Thread::HasThreadLocal(thread_state_key);
+ Isolate::PerIsolateThreadData* data = Isolate::CurrentPerIsolateThreadData();
+ return data != NULL && data->thread_state() != NULL;
}
void ThreadManager::Iterate(ObjectVisitor* v) {
// Expecting no threads during serialization/deserialization
- for (ThreadState* state = ThreadState::FirstInUse();
+ for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data = HandleScopeImplementer::Iterate(v, data);
- data = Top::Iterate(v, data);
+ data = isolate_->Iterate(v, data);
data = Relocatable::Iterate(v, data);
}
}
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
- for (ThreadState* state = ThreadState::FirstInUse();
+ for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::IterateThread(v, data);
+ isolate_->IterateThread(v, data);
}
}
int ThreadManager::CurrentId() {
- return Thread::GetThreadLocalInt(thread_id_key);
-}
-
-
-void ThreadManager::AssignId() {
- if (!HasId()) {
- ASSERT(Locker::IsLocked());
- int thread_id = ++last_id_;
- ASSERT(thread_id > 0); // see the comment near last_id_ definition.
- Thread::SetThreadLocalInt(thread_id_key, thread_id);
- Top::set_thread_id(thread_id);
- }
-}
-
-
-bool ThreadManager::HasId() {
- return Thread::HasThreadLocal(thread_id_key);
+ return Thread::GetThreadLocalInt(Isolate::thread_id_key());
}
void ThreadManager::TerminateExecution(int thread_id) {
- for (ThreadState* state = ThreadState::FirstInUse();
+ for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
state = state->Next()) {
if (thread_id == state->id()) {
}
-// This is the ContextSwitcher singleton. There is at most a single thread
-// running which delivers preemption events to V8 threads.
-ContextSwitcher* ContextSwitcher::singleton_ = NULL;
-
-
-ContextSwitcher::ContextSwitcher(int every_n_ms)
- : Thread("v8:CtxtSwitcher"),
+ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
+ : Thread(isolate, "v8:CtxtSwitcher"),
keep_going_(true),
sleep_ms_(every_n_ms) {
}
// Set the scheduling interval of V8 threads. This function starts the
// ContextSwitcher thread if needed.
void ContextSwitcher::StartPreemption(int every_n_ms) {
+ Isolate* isolate = Isolate::Current();
ASSERT(Locker::IsLocked());
- if (singleton_ == NULL) {
+ if (isolate->context_switcher() == NULL) {
// If the ContextSwitcher thread is not running at the moment start it now.
- singleton_ = new ContextSwitcher(every_n_ms);
- singleton_->Start();
+ isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
+ isolate->context_switcher()->Start();
} else {
// ContextSwitcher thread is already running, so we just change the
// scheduling interval.
- singleton_->sleep_ms_ = every_n_ms;
+ isolate->context_switcher()->sleep_ms_ = every_n_ms;
}
}
// Disable preemption of V8 threads. If multiple threads want to use V8 they
// must cooperatively schedule amongst them from this point on.
void ContextSwitcher::StopPreemption() {
+ Isolate* isolate = Isolate::Current();
ASSERT(Locker::IsLocked());
- if (singleton_ != NULL) {
+ if (isolate->context_switcher() != NULL) {
// The ContextSwitcher thread is running. We need to stop it and release
// its resources.
- singleton_->keep_going_ = false;
- singleton_->Join(); // Wait for the ContextSwitcher thread to exit.
+ isolate->context_switcher()->keep_going_ = false;
+ // Wait for the ContextSwitcher thread to exit.
+ isolate->context_switcher()->Join();
// Thread has exited, now we can delete it.
- delete(singleton_);
- singleton_ = NULL;
+ delete(isolate->context_switcher());
+ isolate->set_context_switcher(NULL);
}
}
void ContextSwitcher::Run() {
while (keep_going_) {
OS::Sleep(sleep_ms_);
- StackGuard::Preempt();
+ isolate()->stack_guard()->Preempt();
}
}
class ThreadState {
public:
- // Iterate over in-use states.
- static ThreadState* FirstInUse();
// Returns NULL after the last one.
ThreadState* Next();
void LinkInto(List list);
void Unlink();
- static ThreadState* GetFree();
-
// Id of thread.
void set_id(int id) { id_ = id; }
int id() { return id_; }
// Get data area for archiving a thread.
char* data() { return data_; }
private:
- ThreadState();
+ explicit ThreadState(ThreadManager* thread_manager);
void AllocateSpace();
ThreadState* next_;
ThreadState* previous_;
- // In the following two lists there is always at least one object on the list.
- // The first object is a flying anchor that is only there to simplify linking
- // and unlinking.
- // Head of linked list of free states.
- static ThreadState* free_anchor_;
- // Head of linked list of states in use.
- static ThreadState* in_use_anchor_;
+ ThreadManager* thread_manager_;
+
+ friend class ThreadManager;
};
};
-class ThreadManager : public AllStatic {
+class ThreadManager {
public:
- static void Lock();
- static void Unlock();
+ void Lock();
+ void Unlock();
- static void ArchiveThread();
- static bool RestoreThread();
- static void FreeThreadResources();
- static bool IsArchived();
+ void ArchiveThread();
+ bool RestoreThread();
+ void FreeThreadResources();
+ bool IsArchived();
- static void Iterate(ObjectVisitor* v);
- static void IterateArchivedThreads(ThreadVisitor* v);
- static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
+ void Iterate(ObjectVisitor* v);
+ void IterateArchivedThreads(ThreadVisitor* v);
+ bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
- static int CurrentId();
- static void AssignId();
- static bool HasId();
+ int CurrentId();
- static void TerminateExecution(int thread_id);
+ void TerminateExecution(int thread_id);
+
+ // Iterate over in-use states.
+ ThreadState* FirstThreadStateInUse();
+ ThreadState* GetFreeThreadState();
static const int kInvalidId = -1;
private:
- static void EagerlyArchiveThread();
+ ThreadManager();
+ ~ThreadManager();
- static int last_id_; // V8 threads are identified through an integer.
- static Mutex* mutex_;
- static ThreadHandle mutex_owner_;
- static ThreadHandle lazily_archived_thread_;
- static ThreadState* lazily_archived_thread_state_;
+ void EagerlyArchiveThread();
+
+ Mutex* mutex_;
+ ThreadHandle mutex_owner_;
+ ThreadHandle lazily_archived_thread_;
+ ThreadState* lazily_archived_thread_state_;
+
+ // In the following two lists there is always at least one object on the list.
+ // The first object is a flying anchor that is only there to simplify linking
+ // and unlinking.
+ // Head of linked list of free states.
+ ThreadState* free_anchor_;
+ // Head of linked list of states in use.
+ ThreadState* in_use_anchor_;
+
+ Isolate* isolate_;
+
+ friend class Isolate;
+ friend class ThreadState;
};
static void PreemptionReceived();
private:
- explicit ContextSwitcher(int every_n_ms);
+ explicit ContextSwitcher(Isolate* isolate, int every_n_ms);
void Run();
bool keep_going_;
int sleep_ms_;
-
- static ContextSwitcher* singleton_;
};
} } // namespace v8::internal
// True if the variable is named eval and not known to be shadowed.
bool is_possibly_eval() const {
- return IsVariable(Factory::eval_symbol()) &&
+ return IsVariable(FACTORY->eval_symbol()) &&
(mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
}
#define MINOR_VERSION 2
#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
-#define CANDIDATE_VERSION true
+// Use 1 for candidates and 0 otherwise.
+// (Boolean macro values are not supported by all preprocessors.)
+#define IS_CANDIDATE_VERSION 0
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
// number. This define is mainly used by the SCons build script.
#define SONAME ""
+#if IS_CANDIDATE_VERSION
+#define CANDIDATE_STRING " (candidate)"
+#else
+#define CANDIDATE_STRING ""
+#endif
+
+#define SX(x) #x
+#define S(x) SX(x)
+
+#if PATCH_LEVEL > 0
+#define VERSION_STRING \
+ S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) "." \
+ S(PATCH_LEVEL) CANDIDATE_STRING
+#else
+#define VERSION_STRING \
+ S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) \
+ CANDIDATE_STRING
+#endif
+
namespace v8 {
namespace internal {
int Version::minor_ = MINOR_VERSION;
int Version::build_ = BUILD_NUMBER;
int Version::patch_ = PATCH_LEVEL;
-bool Version::candidate_ = CANDIDATE_VERSION;
+bool Version::candidate_ = (IS_CANDIDATE_VERSION != 0);
const char* Version::soname_ = SONAME;
-
+const char* Version::version_string_ = VERSION_STRING;
// Calculate the V8 version string.
void Version::GetString(Vector<char> str) {
// Calculate the SONAME for the V8 shared library.
static void GetSONAME(Vector<char> str);
+ static const char* GetVersion() { return version_string_; }
+
private:
+ // NOTE: can't make these really const because of test-version.cc.
static int major_;
static int minor_;
static int build_;
static int patch_;
static bool candidate_;
static const char* soname_;
+ static const char* version_string_;
// In test-version.cc.
friend void SetVersion(int major, int minor, int build, int patch,
VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
CodeGenerator* cgen)
: cgen_(cgen),
- old_is_spilled_(SpilledScope::is_spilled_) {
- SpilledScope::is_spilled_ = false;
+ old_is_spilled_(
+ Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
+ Isolate::Current()->set_is_virtual_frame_in_spilled_scope(false);
if (old_is_spilled_) {
VirtualFrame* frame = cgen->frame();
if (frame != NULL) {
VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
- SpilledScope::is_spilled_ = old_is_spilled_;
+ Isolate::Current()->set_is_virtual_frame_in_spilled_scope(old_is_spilled_);
if (old_is_spilled_) {
VirtualFrame* frame = cgen_->frame();
if (frame != NULL) {
CodeGenerator* VirtualFrame::cgen() const {
- return CodeGeneratorScope::Current();
+ return CodeGeneratorScope::Current(Isolate::Current());
}
}
}
-VMState::VMState(StateTag tag) : previous_tag_(Top::current_vm_state()) {
+
+VMState::VMState(Isolate* isolate, StateTag tag)
+ : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Entering", StateToString(tag)));
- LOG(UncheckedStringEvent("From", StateToString(previous_tag_)));
+ LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
+ LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
}
#endif
- Top::SetCurrentVMState(tag);
+ isolate_->SetCurrentVMState(tag);
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap) {
if (tag == EXTERNAL) {
// We are leaving V8.
ASSERT(previous_tag_ != EXTERNAL);
- Heap::Protect();
+ isolate_->heap()->Protect();
} else if (previous_tag_ = EXTERNAL) {
// We are entering V8.
- Heap::Unprotect();
+ isolate_->heap()->Unprotect();
}
}
#endif
VMState::~VMState() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Leaving",
- StateToString(Top::current_vm_state())));
- LOG(UncheckedStringEvent("To", StateToString(previous_tag_)));
+ LOG(isolate_,
+ UncheckedStringEvent("Leaving",
+ StateToString(isolate_->current_vm_state())));
+ LOG(isolate_,
+ UncheckedStringEvent("To", StateToString(previous_tag_)));
}
#endif // ENABLE_LOGGING_AND_PROFILING
#ifdef ENABLE_HEAP_PROTECTION
- StateTag tag = Top::current_vm_state();
+ StateTag tag = isolate_->current_vm_state();
#endif
- Top::SetCurrentVMState(previous_tag_);
+ isolate_->SetCurrentVMState(previous_tag_);
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap) {
if (tag == EXTERNAL) {
// We are reentering V8.
ASSERT(previous_tag_ != EXTERNAL);
- Heap::Unprotect();
+ isolate_->heap()->Unprotect();
} else if (previous_tag_ == EXTERNAL) {
// We are leaving V8.
- Heap::Protect();
+ isolate_->heap()->Protect();
}
}
#endif // ENABLE_HEAP_PROTECTION
#ifdef ENABLE_LOGGING_AND_PROFILING
-ExternalCallbackScope::ExternalCallbackScope(Address callback)
- : previous_callback_(Top::external_callback()) {
- Top::set_external_callback(callback);
+ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
+ : isolate_(isolate), previous_callback_(isolate->external_callback()) {
+ isolate_->set_external_callback(callback);
}
ExternalCallbackScope::~ExternalCallbackScope() {
- Top::set_external_callback(previous_callback_);
+ isolate_->set_external_callback(previous_callback_);
}
#endif // ENABLE_LOGGING_AND_PROFILING
#ifndef V8_VM_STATE_H_
#define V8_VM_STATE_H_
-#include "top.h"
+#include "isolate.h"
namespace v8 {
namespace internal {
class VMState BASE_EMBEDDED {
#ifdef ENABLE_VMSTATE_TRACKING
public:
- inline explicit VMState(StateTag tag);
+ inline VMState(Isolate* isolate, StateTag tag);
inline ~VMState();
private:
+ Isolate* isolate_;
StateTag previous_tag_;
#else
public:
- explicit VMState(StateTag state) {}
+ VMState(Isolate* isolate, StateTag state) {}
#endif
};
class ExternalCallbackScope BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
- inline explicit ExternalCallbackScope(Address callback);
+ inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
private:
+ Isolate* isolate_;
Address previous_callback_;
#else
public:
- explicit ExternalCallbackScope(Address callback) {}
+ ExternalCallbackScope(Isolate* isolate, Address callback) {}
#endif
};
visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
-// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
-uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
-uint64_t CpuFeatures::enabled_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+CpuFeatures::CpuFeatures()
+ : supported_(kDefaultCpuFeatures),
+ enabled_(0),
+ found_by_runtime_probing_(0) {
+}
+
void CpuFeatures::Probe(bool portable) {
- ASSERT(Heap::HasBeenSetup());
+ ASSERT(HEAP->HasBeenSetup());
supported_ = kDefaultCpuFeatures;
if (portable && Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
CodeDesc desc;
assm.GetCode(&desc);
- MaybeObject* maybe_code = Heap::CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>());
+ Isolate* isolate = Isolate::Current();
+ MaybeObject* maybe_code =
+ isolate->heap()->CreateCode(desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>());
Object* code;
if (!maybe_code->ToObject(&code)) return;
if (!code->IsCode()) return;
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ PROFILE(isolate,
+ CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
static void InitCoverageLog();
#endif
-byte* Assembler::spare_buffer_ = NULL;
-
Assembler::Assembler(void* buffer, int buffer_size)
: code_targets_(100),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
+ Isolate* isolate = Isolate::Current();
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate->assembler_spare_buffer() != NULL) {
+ buffer = isolate->assembler_spare_buffer();
+ isolate->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
Assembler::~Assembler() {
+ Isolate* isolate = Isolate::Current();
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
- Counters::reloc_info_size.Increment(desc->reloc_size);
+ COUNTERS->reloc_info_size()->Increment(desc->reloc_size);
}
void Assembler::GrowBuffer() {
+ Isolate* isolate = Isolate::Current();
ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::MaxOldGenerationSize())) {
+ (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(value->IsHeapObject());
- ASSERT(!Heap::InNewSpace(*value));
+ ASSERT(!HEAP->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
// } else {
// // Generate standard x87 or SSE2 floating point code.
// }
-class CpuFeatures : public AllStatic {
+class CpuFeatures {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- static void Probe(bool portable);
+ void Probe(bool portable);
+
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
+ bool IsSupported(CpuFeature f) const {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ bool IsEnabled(CpuFeature f) const {
return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(CpuFeature f) {
+ explicit Scope(CpuFeature f)
+ : cpu_features_(Isolate::Current()->cpu_features()),
+ isolate_(Isolate::Current()) {
uint64_t mask = (V8_UINT64_C(1) << f);
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= mask;
+ ASSERT(cpu_features_->IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (cpu_features_->found_by_runtime_probing_ & mask) == 0);
+ old_enabled_ = cpu_features_->enabled_;
+ cpu_features_->enabled_ |= mask;
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::Current(), isolate_);
+ cpu_features_->enabled_ = old_enabled_;
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
+ CpuFeatures* cpu_features_;
+ Isolate* isolate_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
private:
+ CpuFeatures();
+
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
+ // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
+ // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
- static uint64_t supported_;
- static uint64_t enabled_;
- static uint64_t found_by_runtime_probing_;
+
+ uint64_t supported_;
+ uint64_t enabled_;
+ uint64_t found_by_runtime_probing_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
- static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
// Set expected number of arguments to zero (not changing rax).
__ movq(rbx, Immediate(0));
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
}
// Call the function.
if (is_api_function) {
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Handle<Code> code = Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::HandleApiCallConstruct));
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
+ __ IncrementCounter(COUNTERS->constructed_objects(), 1);
__ ret(0);
}
// Invoke the code.
if (is_construct) {
// Expects rdi to hold function pointer.
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
- RelocInfo::CODE_TARGET);
+ __ Call(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall)), RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
- __ Push(Factory::undefined_value());
+ __ Push(FACTORY->undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
__ j(not_zero, &function);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
__ bind(&function);
}
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(not_equal,
- Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET);
ParameterCount expected(0);
__ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
__ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
// scratch2: start of next object
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
return;
}
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, Factory::the_hole_value());
+ __ Move(scratch3, FACTORY->the_hole_value());
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
// array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, Factory::empty_fixed_array());
+ __ Move(elements_array, FACTORY->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
Label not_empty_2, fill_array;
__ SmiTest(array_size);
__ j(not_zero, ¬_empty_2);
__ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
- __ Move(scratch, Factory::the_hole_value());
+ __ Move(scratch, FACTORY->the_hole_value());
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
r8,
kPreallocatedArrayElements,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(kPointerSize);
r9,
true,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(2 * kPointerSize);
r9,
false,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(COUNTERS->array_function_native(), 1);
// rax: argc
// rbx: JSArray
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
// -----------------------------------
Label invoke, dont_adapt_arguments;
- __ IncrementCounter(&Counters::arguments_adaptors, 1);
+ __ IncrementCounter(COUNTERS->arguments_adaptors(), 1);
Label enough, too_few;
__ cmpq(rax, rbx);
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls_regs(), 1);
}
// Call the stub.
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls_regs(), 1);
}
// Call the stub.
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ __ IncrementCounter(COUNTERS->generic_binary_stub_calls_regs(), 1);
}
// Call the stub.
const char* TypeRecordingBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] == double value.
// rbx = bits of double value.
// rcx = TranscendentalCache::hash(double value).
__ movq(rax, ExternalReference::transcendental_cache_array_address());
// rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ __ movq(rax, Operand(rax, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
+ TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
// rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
+ __ IncrementCounter(COUNTERS->regexp_entry_native(), 1);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
__ EnterApiExitFrame(argument_slots_on_stack);
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Pass current isolate address.
+ // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ // Immediate(ExternalReference::isolate_address()));
+ __ movq(kScratchRegister, ExternalReference::isolate_address());
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ kScratchRegister);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area.
__ addq(r9, Operand(kScratchRegister, 0));
// Argument 6 passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
#endif
// Argument 5: static offsets vector buffer.
__ movq(r8, ExternalReference::address_of_static_offsets_vector());
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
#endif
// First four arguments are passed in registers on both Linux and Windows.
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address);
__ movq(rbx, pending_exception_address);
__ movq(rax, Operand(rbx, 0));
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+ __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
STATIC_ASSERT(8 == kDoubleSize);
__ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
times_1,
FixedArray::kHeaderSize));
__ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
index,
times_1,
FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
+ __ IncrementCounter(COUNTERS->number_to_string_native(), 1);
}
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
// We cannot set rax to EQUAL until just before return because
NearLabel heap_number;
// If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(equal, &heap_number);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
// Check if the non-smi operand is a heap number.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
__ Set(rax, argc_);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code> adaptor(Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline));
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
__ lea(rcx, StackSpaceOperand(0));
+ __ movq(rdx, ExternalReference::isolate_address());
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
__ lea(rdx, StackSpaceOperand(0));
+ __ movq(r8, ExternalReference::isolate_address());
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
+ __ movq(rdx, ExternalReference::isolate_address());
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address);
__ movq(kScratchRegister, pending_exception_address);
__ movq(rax, Operand(kScratchRegister, 0));
__ movq(rdx, ExternalReference::the_hole_value_location());
// callee save as well.
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address);
__ load_rax(c_entry_fp);
__ push(rax);
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address);
__ load_rax(js_entry_sp);
__ testq(rax, rax);
__ j(not_zero, ¬_outermost_js);
// Caught exception: Store result (exception) in the pending
// exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address);
__ store_rax(pending_exception);
__ movq(rax, Failure::Exception(), RelocInfo::NONE);
__ jmp(&exit);
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ movq(kScratchRegister, ExternalReference(Isolate::k_handler_address));
__ pop(Operand(kScratchRegister, 0));
// Pop next_sp.
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
// Restore the top frame descriptor from the stack.
__ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ movq(kScratchRegister, ExternalReference(Isolate::k_c_entry_fp_address));
__ pop(Operand(kScratchRegister, 0));
// Restore callee-saved registers (X64 conventions).
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
__ SmiTest(rcx);
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in rax.
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ movq(rbx, FieldOperand(rax, String::kLengthOffset));
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in rdx.
__ movq(rax, rdx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ movq(rax, rcx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
// rdi: length of second argument
StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
__ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
// rdi: length of second argument
StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
__ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(COUNTERS->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
// rsi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, rdx); // Restore rsi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1);
__ ret(kArgumentsSize);
__ bind(&non_ascii_flat);
__ movq(rsi, rdx); // Restore esi.
__ bind(&return_rax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(COUNTERS->sub_string_native(), 1);
__ ret(kArgumentsSize);
// Just jump to runtime to create the sub string.
__ cmpq(rdx, rax);
__ j(not_equal, ¬_same);
__ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1);
__ ret(2 * kPointerSize);
__ bind(¬_same);
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(COUNTERS->string_compare_native(), 1);
// Drop arguments from the stack
__ pop(rcx);
__ addq(rsp, Immediate(2 * kPointerSize));
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ = info->is_in_loop() ? 1 : 0;
- JumpTarget::set_compiling_deferred_code(false);
+ Isolate::Current()->set_jump_target_compiling_deferred_code(false);
{
CodeGenState state(this);
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
+ frame_->Push(FACTORY->the_hole_value());
StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
}
if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
+ bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
ASSERT(!function_return_is_shadowed_);
CodeForReturnPosition(info->function());
frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value());
+ Result undefined(FACTORY->undefined_value());
if (function_return_.is_bound()) {
function_return_.Jump(&undefined);
} else {
// Process any deferred code using the register allocator.
if (!HasStackOverflow()) {
- JumpTarget::set_compiling_deferred_code(true);
+ info->isolate()->set_jump_target_compiling_deferred_code(true);
ProcessDeferred();
- JumpTarget::set_compiling_deferred_code(false);
+ info->isolate()->set_jump_target_compiling_deferred_code(false);
}
// There is no need to delete the register allocator, it is a
if (dest.false_was_fall_through()) {
// The false target was just bound.
JumpTarget loaded;
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
// There may be dangling jumps to the true target.
if (true_target.is_linked()) {
loaded.Jump();
true_target.Bind();
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
loaded.Bind();
}
// There is true, and possibly false, control flow (with true as
// the fall through).
JumpTarget loaded;
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
if (false_target.is_linked()) {
loaded.Jump();
false_target.Bind();
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
loaded.Bind();
}
loaded.Jump(); // Don't lose the current TOS.
if (true_target.is_linked()) {
true_target.Bind();
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
if (false_target.is_linked()) {
loaded.Jump();
}
}
if (false_target.is_linked()) {
false_target.Bind();
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
}
loaded.Bind();
}
// When using lazy arguments allocation, we store the arguments marker value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- frame_->Push(Factory::arguments_marker());
+ frame_->Push(FACTORY->arguments_marker());
} else {
ArgumentsAccessStub stub(is_strict_mode()
? ArgumentsAccessStub::NEW_STRICT
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
+ Object* answer_object = HEAP->undefined_value();
switch (op) {
case Token::ADD:
// Use intptr_t to detect overflow of 32-bit int.
UNREACHABLE();
break;
}
- if (answer_object == Heap::undefined_value()) {
+ if (answer_object->IsUndefined()) {
return false;
}
frame_->Push(Handle<Object>(answer_object));
if (!left_type_info.IsNumber()) {
// Branch if not a heapnumber.
__ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
deferred->Branch(not_equal);
}
// Load integer value into answer register using truncation.
// not to be a smi.
JumpTarget not_number;
__ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
not_number.Branch(not_equal, left_side);
__ movsd(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
__ nop();
__ j(not_equal, &build_args);
__ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
__ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ Handle<Code> apply_code(Isolate::Current()->builtins()->builtin(
+ Builtins::FunctionApply));
__ Cmp(rcx, apply_code);
__ j(not_equal, &build_args);
// If we have a function or a constant, we need to initialize the variable.
Expression* val = NULL;
if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
+ val = new Literal(FACTORY->the_hole_value());
} else {
val = node->fun(); // NULL if we don't have a function
}
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// Make sure that there's nothing left on the stack above the
// handler structure.
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// If we can fall off the end of the try block, unlink from the try
// chain and set the state on the frame to FALLING.
frame_->EmitPush(rsi);
frame_->EmitPush(function_info);
frame_->EmitPush(pretenure
- ? Factory::true_value()
- : Factory::false_value());
+ ? FACTORY->true_value()
+ : FACTORY->false_value());
Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
frame_->Push(&result);
}
Register target,
int registers_to_save = 0)
: size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
+ ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
set_comment("[ DeferredAllocateInNewSpace");
}
void Generate();
frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
clone = frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(COUNTERS->cow_arrays_created_stub(), 1);
} else if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
Load(function);
// Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
// Load the arguments.
int arg_count = args->length();
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
frame_->PushParameterAt(-1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
frame_->PushParameterAt(-1);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
Condition is_smi = masm_->CheckSmi(obj.reg());
destination()->false_target()->Branch(is_smi);
- __ Move(kScratchRegister, Factory::null_value());
+ __ Move(kScratchRegister, FACTORY->null_value());
__ cmpq(obj.reg(), kScratchRegister);
destination()->true_target()->Branch(equal);
__ jmp(&entry);
__ bind(&loop);
__ movq(scratch2_, FieldOperand(map_result_, 0));
- __ Cmp(scratch2_, Factory::value_of_symbol());
+ __ Cmp(scratch2_, FACTORY->value_of_symbol());
__ j(equal, &false_result);
__ addq(map_result_, Immediate(kPointerSize));
__ bind(&entry);
// Functions have class 'Function'.
function.Bind();
- frame_->Push(Factory::function_class_symbol());
+ frame_->Push(FACTORY->function_class_symbol());
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
+ frame_->Push(FACTORY->Object_symbol());
leave.Jump();
// Non-JS objects have class null.
null.Bind();
- frame_->Push(Factory::null_value());
+ frame_->Push(FACTORY->null_value());
// All done.
leave.Bind();
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ Isolate::Current()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
return;
}
__ bind(&done);
deferred->BindExit();
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
ZoneList<Expression*>* args = node->arguments();
Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
if (function == NULL) {
// Push the builtins object found in the current global object.
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
}
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
+ frame_->SetElementAt(0, FACTORY->true_value());
}
} else if (op == Token::TYPEOF) {
expression->AsLiteral()->IsNull())) {
// Omit evaluating the value of the primitive literal.
// It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
} else {
Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
+ frame_->SetElementAt(0, FACTORY->undefined_value());
}
} else {
Result answer = frame_->Pop();
answer.ToRegister();
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(HEAP->number_symbol())) {
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->true_target()->Branch(is_smi);
frame_->Spill(answer.reg());
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(HEAP->string_symbol())) {
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
answer.Unuse();
destination()->Split(below); // Unsigned byte comparison needed.
- } else if (check->Equals(Heap::boolean_symbol())) {
+ } else if (check->Equals(HEAP->boolean_symbol())) {
__ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
destination()->true_target()->Branch(equal);
__ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::undefined_symbol())) {
+ } else if (check->Equals(HEAP->undefined_symbol())) {
__ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
destination()->true_target()->Branch(equal);
answer.Unuse();
destination()->Split(not_zero);
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(HEAP->function_symbol())) {
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
frame_->Spill(answer.reg());
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(HEAP->object_symbol())) {
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
__ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
__ movq(rax, receiver_);
}
__ Move(rcx, name_);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a test rax instruction to indicate
// that the inobject property case was inlined.
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_inline_miss(), 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
// it in the IC initialization code and patch the movq instruction.
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the __
// 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
// be generated normally.
masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_inline_miss(), 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_inline_miss(), 1);
// Move value, receiver, and key to registers rax, rdx, and rcx, as
// the IC stub expects.
// Move value to rax, using xchg if the receiver or key is in rax.
}
// Call the IC stub.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
- masm()->movq(kScratchRegister, Factory::null_value(),
+ masm()->movq(kScratchRegister, FACTORY->null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
int offset = kMaxInt;
masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(&Counters::named_load_inline, 1);
+ __ IncrementCounter(COUNTERS->named_load_inline(), 1);
deferred->BindExit();
}
ASSERT(frame()->height() == original_height - 1);
// the __ macro for the following two instructions because it
// might introduce extra instructions.
__ bind(&patch_site);
- masm()->movq(kScratchRegister, Factory::null_value(),
+ masm()->movq(kScratchRegister, FACTORY->null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// coverage code can interfere with the patching. Do not use a load
// from the root array to load null_value, since the load must be patched
// with the expected receiver map, which is not in the root array.
- masm_->movq(kScratchRegister, Factory::null_value(),
+ masm_->movq(kScratchRegister, FACTORY->null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
result = elements;
__ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_inline(), 1);
deferred->BindExit();
} else {
__ bind(deferred->patch_site());
// Avoid using __ to ensure the distance from patch_site
// to the map address is always the same.
- masm()->movq(kScratchRegister, Factory::fixed_array_map(),
+ masm()->movq(kScratchRegister, FACTORY->fixed_array_map(),
RelocInfo::EMBEDDED_OBJECT);
__ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
kScratchRegister);
index.scale,
FixedArray::kHeaderSize),
result.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_inline(), 1);
deferred->BindExit();
} else {
bool in_spilled_code_;
friend class VirtualFrame;
+ friend class Isolate;
friend class JumpTarget;
friend class Reference;
friend class Result;
friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
+ friend class InlineRuntimeFunctionsTable;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
namespace internal {
void CPU::Setup() {
- CpuFeatures::Probe(true);
+ Isolate::Current()->cpu_features()->Probe(true);
if (Serializer::enabled()) {
V8::DisableCrankshaft();
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+ rinfo()->PatchCodeWithCall(
+ Isolate::Current()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Debug::debug_break_slot()->entry(),
+ Isolate::Current()->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation = Isolate::Current()->builtins()->builtin(
+ Builtins::NotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? Isolate::Current()->builtins()->builtin(Builtins::NotifyDeoptimized)
+ : Isolate::Current()->builtins()->builtin(
+ Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
static InstructionTable instruction_table;
+
static InstructionDesc cmov_instructions[16] = {
{"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
#ifndef V8_X64_FRAMES_X64_H_
#define V8_X64_FRAMES_X64_H_
+#include "memory.h"
+
namespace v8 {
namespace internal {
Label* materialize_false) const {
NearLabel done;
__ bind(materialize_true);
- __ Move(result_register(), Factory::true_value());
+ __ Move(result_register(), isolate()->factory()->true_value());
__ jmp(&done);
__ bind(materialize_false);
- __ Move(result_register(), Factory::false_value());
+ __ Move(result_register(), isolate()->factory()->false_value());
__ bind(&done);
}
Label* materialize_false) const {
NearLabel done;
__ bind(materialize_true);
- __ Push(Factory::true_value());
+ __ Push(isolate()->factory()->true_value());
__ jmp(&done);
__ bind(materialize_false);
- __ Push(Factory::false_value());
+ __ Push(isolate()->factory()->false_value());
__ bind(&done);
}
prop->key()->AsLiteral()->handle()->IsSmi());
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(
- is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(is_strict_mode()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
} else {
__ push(rsi);
__ Push(info);
- __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ __ Push(pretenure
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value());
__ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(rax);
// load IC call.
__ movq(rax, GlobalObjectOperand());
__ Move(rcx, slot->var()->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ Move(rax, key_literal->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
// object on the stack.
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
__ Move(rax, key_literal->handle());
// Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
context()->Plug(rax);
}
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
if (property->emit_store()) {
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(key->id(), NO_REGISTERS);
}
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_elements());
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ pop(rdx);
}
__ pop(rax); // Restore value.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// rcx, and the global object on the stack.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(
- is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(is_strict_mode()
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
} else {
__ pop(rdx);
}
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Restore context register.
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Push result (function).
__ push(rax);
// also use the full code generator.
FunctionLiteral* lit = fun->AsFunctionLiteral();
if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
+ lit->name()->Equals(isolate()->heap()->empty_string()) &&
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
__ Set(rax, arg_count);
__ movq(rdi, Operand(rsp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin(isolate()->builtins()->builtin(
+ Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(rax);
}
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, Factory::function_class_symbol());
+ __ Move(rax, isolate()->factory()->function_class_symbol());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, Factory::Object_symbol());
+ __ Move(rax, isolate()->factory()->Object_symbol());
__ jmp(&done);
// Non-JS objects have class null.
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
__ j(equal, &ok);
__ bind(&fail);
- __ Move(rax, Factory::false_value());
+ __ Move(rax, isolate()->factory()->false_value());
__ jmp(&done);
__ bind(&ok);
- __ Move(rax, Factory::true_value());
+ __ Move(rax, isolate()->factory()->true_value());
__ bind(&done);
context()->Plug(rax);
// Call the JS runtime function using a call IC.
__ Move(rcx, expr->name());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
case KEYED_PROPERTY: {
__ pop(rcx);
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
__ movq(rax, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(rax, if_true);
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_FUNCTION_CLASS_TYPE, rdx);
Split(above_equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
mode == RelocInfo::CODE_TARGET_CONTEXT);
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(COUNTERS->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(COUNTERS->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1);
default:
break;
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(COUNTERS->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(COUNTERS->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1);
default:
break;
}
rax,
NULL,
&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
// Slow case: Jump to runtime.
// rdx: receiver
// rax: key
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_string);
Code::kNoExtraICState,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ no_reg);
__ bind(&miss);
}
// -----------------------------------
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1);
+ __ IncrementCounter(COUNTERS->call_miss(), 1);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_miss(), 1);
}
// Get the receiver of the function from the stack; 1 ~ return address.
GenerateFastArrayLoad(
masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_smi_fast(), 1);
__ bind(&do_call);
// receiver in rdx is not used after this point.
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
__ push(rcx); // save the key
__ push(rdx); // pass the receiver
__ j(not_equal, &lookup_monomorphic_cache);
GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_lookup_dict(), 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_lookup_cache(), 1);
GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
// Fall through on miss.
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ __ IncrementCounter(COUNTERS->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
__ bind(&index_string);
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
+ rdx);
// Cache miss: Jump to runtime.
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
// -- rsp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::load_miss, 1);
+ __ IncrementCounter(COUNTERS->load_miss(), 1);
__ pop(rbx);
__ push(rax); // receiver
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
*reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
+ (offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
// Patch the offset in the write-barrier code. The offset is the
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
*reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
+ (offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
// -- rsp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::keyed_load_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_miss(), 1);
__ pop(rbx);
__ push(rdx); // receiver
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
- __ IncrementCounter(&Counters::store_normal_hit, 1);
+ __ IncrementCounter(COUNTERS->store_normal_hit(), 1);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1);
+ __ IncrementCounter(COUNTERS->store_normal_miss(), 1);
GenerateMiss(masm);
}
}
-void LCodeGen::CallRuntime(Runtime::Function* function,
+void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
ASSERT(instr != NULL);
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
Register map = ToRegister(instr->TempAt(0));
__ movq(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- __ Move(kScratchRegister, Factory::the_hole_value());
+ __ Move(kScratchRegister, factory()->the_hole_value());
__ cmpq(map, kScratchRegister); // Patched to cached map.
__ j(not_equal, &cache_miss);
// Patched to load either true or false.
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(Builtins::LoadIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
+ arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arity, NOT_IN_LOOP);
__ Move(rcx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arity, NOT_IN_LOOP);
__ Move(rcx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin(isolate()->builtins()->builtin(
+ Builtins::JSConstructCall));
__ Set(rax, instr->arity());
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(isolate()->builtins()->builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ factory()->NewJSGlobalPropertyCell(object);
__ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
} else {
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(Heap::number_symbol())) {
+ if (type_name->Equals(heap()->number_symbol())) {
__ JumpIfSmi(input, true_label);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::string_symbol())) {
+ } else if (type_name->Equals(heap()->string_symbol())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(Heap::function_symbol())) {
+ } else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
final_branch_condition = above_equal;
- } else if (type_name->Equals(Heap::object_symbol())) {
+ } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label);
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
- void CallRuntime(Runtime::Function* function,
+ void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ (instr->CanTruncateToInt32() &&
+ Isolate::Current()->cpu_features()->IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
: Assembler(buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ code_object_(HEAP->undefined_value()) {
}
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
movq(rbx, ExternalReference(function));
CEntryStub ces(1);
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
// Since we did not call the stub, there was no allocation failure.
// Return some non-failure object.
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// TODO(1236192): Most runtime routines don't need the number of
// Check if the function scheduled an exception.
movq(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), Factory::the_hole_value());
+ Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
j(not_equal, &promote_scheduled_exception);
LeaveApiExitFrame();
bind(&empty_result);
// It was zero; the result is undefined.
- Move(rax, Factory::undefined_value());
+ Move(rax, FACTORY->undefined_value());
jmp(&prologue);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
movq(prev_limit_reg, rax);
+#ifdef _WIN64
+ movq(rcx, ExternalReference::isolate_address());
+#else
+ movq(rdi, ExternalReference::isolate_address());
+#endif
movq(rax, ExternalReference::delete_handle_scope_extensions());
call(rax);
movq(rax, prev_limit_reg);
push(Immediate(0)); // NULL frame pointer.
}
// Save the current handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ movq(kScratchRegister, ExternalReference(Isolate::k_handler_address));
push(Operand(kScratchRegister, 0));
// Link this handler.
movq(Operand(kScratchRegister, 0), rsp);
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
// Unlink this handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ movq(kScratchRegister, ExternalReference(Isolate::k_handler_address));
pop(Operand(kScratchRegister, 0));
// Remove the remaining fields.
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
movq(rax, value);
}
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
movq(kScratchRegister, handler_address);
movq(rsp, Operand(kScratchRegister, 0));
// get next in chain
movq(rax, value);
}
// Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
movq(kScratchRegister, handler_address);
movq(rsp, Operand(kScratchRegister, 0));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address);
movq(rax, Immediate(false));
store_rax(external_caught);
// Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address);
movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
store_rax(pending_exception);
}
Condition is_smi = CheckSmi(object);
j(is_smi, &ok);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
Assert(equal, "Operand not a number");
bind(&ok);
}
push(kScratchRegister);
if (emit_debug_code()) {
movq(kScratchRegister,
- Factory::undefined_value(),
+ FACTORY->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
Check(not_equal, "code object not properly patched");
movq(r14, rax); // Backup rax in callee-save register.
}
- movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ movq(kScratchRegister, ExternalReference(Isolate::k_c_entry_fp_address));
movq(Operand(kScratchRegister, 0), rbp);
- movq(kScratchRegister, ExternalReference(Top::k_context_address));
+ movq(kScratchRegister, ExternalReference(Isolate::k_context_address));
movq(Operand(kScratchRegister, 0), rsi);
}
}
// Get the required frame alignment for the OS.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
movq(kScratchRegister, Immediate(-kFrameAlignment));
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Top::k_context_address);
+ ExternalReference context_address(Isolate::k_context_address);
movq(kScratchRegister, context_address);
movq(rsi, Operand(kScratchRegister, 0));
#ifdef DEBUG
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address);
movq(kScratchRegister, c_entry_fp_address);
movq(Operand(kScratchRegister, 0), Immediate(0));
}
// Check the context is a global context.
if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- Factory::global_context_map());
+ FACTORY->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
}
}
}
+#ifdef _WIN64
+static const int kRegisterPassedArguments = 4;
+#else
+static const int kRegisterPassedArguments = 6;
+#endif
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
- CheckMap(map, Factory::meta_map(), &fail, false);
+ CheckMap(map, FACTORY->meta_map(), &fail, false);
jmp(&ok);
bind(&fail);
Abort("Global functions must have initial map");
// and the caller does not reserve stack slots for them.
ASSERT(num_arguments >= 0);
#ifdef _WIN64
- static const int kMinimumStackSlots = 4;
+ const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
return num_arguments;
#else
- static const int kRegisterPassedArguments = 6;
if (num_arguments < kRegisterPassedArguments) return 0;
return num_arguments - kRegisterPassedArguments;
#endif
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
ASSERT(num_arguments >= 0);
+
+ // Reserve space for Isolate address which is always passed as last parameter
+ num_arguments += 1;
+
// Make stack end at alignment and allocate space for arguments and old rsp.
movq(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ // Pass current isolate address as additional parameter.
+ if (num_arguments < kRegisterPassedArguments) {
+#ifdef _WIN64
+ // First four arguments are passed in registers on Windows.
+ Register arg_to_reg[] = {rcx, rdx, r8, r9};
+#else
+ // First six arguments are passed in registers on other platforms.
+ Register arg_to_reg[] = {rdi, rsi, rdx, rcx, r8, r9};
+#endif
+ Register reg = arg_to_reg[num_arguments];
+ movq(reg, ExternalReference::isolate_address());
+ } else {
+ // Push Isolate pointer after all parameters.
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ movq(kScratchRegister, ExternalReference::isolate_address());
+ movq(Operand(rsp, argument_slots_on_stack * kPointerSize),
+ kScratchRegister);
+ }
+
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
call(function);
ASSERT(OS::ActivationFrameAlignment() != 0);
ASSERT(num_arguments >= 0);
+ num_arguments += 1;
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::Function* f,
+ MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
cmpq(scratch, kScratchRegister);
j(cc, branch);
} else {
- ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
- reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+ reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
j(cc, branch);
}
}
if (!definitely_matches) {
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Builtins::ArgumentsAdaptorTrampoline));
if (!code_constant.is_null()) {
movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
+ Isolate* isolate = ISOLATE;
+ Handle<Code> code = isolate->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index);
+ int previous_index,
+ Isolate* isolate);
static Result Execute(Code* code,
String* input,
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
static const int kRegisterOutput = kInputEnd - kPointerSize;
static const int kStackHighEnd = kRegisterOutput - kPointerSize;
static const int kDirectCall = kFrameAlign;
+ static const int kIsolate = kDirectCall + kPointerSize;
#endif
#ifdef _WIN64
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+ CodeGenerator* code_generator =
+ CodeGeneratorScope::Current(Isolate::Current());
+ Result fresh = code_generator->allocator()->Allocate();
ASSERT(fresh.is_valid());
- CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
+ code_generator->masm()->Move(fresh.reg(), handle());
// This result becomes a copy of the fresh one.
fresh.set_type_info(type_info());
*this = fresh;
void Result::ToRegister(Register target) {
ASSERT(is_valid());
+ CodeGenerator* code_generator =
+ CodeGeneratorScope::Current(Isolate::Current());
if (!is_register() || !reg().is(target)) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+ Result fresh = code_generator->allocator()->Allocate(target);
ASSERT(fresh.is_valid());
if (is_register()) {
- CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
+ code_generator->masm()->movq(fresh.reg(), reg());
} else {
ASSERT(is_constant());
- CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
+ code_generator->masm()->Move(fresh.reg(), handle());
}
fresh.set_type_info(type_info());
*this = fresh;
} else if (is_register() && reg().is(target)) {
- ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
- CodeGeneratorScope::Current()->frame()->Spill(target);
- ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+ ASSERT(code_generator->has_valid_frame());
+ code_generator->frame()->Spill(target);
+ ASSERT(code_generator->allocator()->count(target) == 1);
}
ASSERT(is_register());
ASSERT(reg().is(target));
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
ASSERT_EQ(16, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
- ExternalReference key_offset(SCTableReference::keyReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
Label miss;
__ movq(kScratchRegister, key_offset);
Register r0,
Register r1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+ __ IncrementCounter(COUNTERS->negative_lookups(), 1);
+ __ IncrementCounter(COUNTERS->negative_lookups_miss(), 1);
Label done;
__ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
ASSERT_EQ(kSmiTagSize, 1);
__ movq(entity_name, Operand(properties, index, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, Factory::undefined_value());
+ __ Cmp(entity_name, FACTORY->undefined_value());
// __ jmp(miss_label);
if (i != kProbes - 1) {
__ j(equal, &done);
}
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+ __ DecrementCounter(COUNTERS->negative_lookups_miss(), 1);
}
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = Isolate::Current();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
__ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Check we're still in the same context.
- __ Move(prototype, Top::global());
+ __ Move(prototype, Isolate::Current()->global());
__ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function = JSFunction::cast(
+ Isolate::Current()->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!HEAP->InNewSpace(interceptor));
__ Move(kScratchRegister, Handle<Object>(interceptor));
__ push(kScratchRegister);
__ push(receiver);
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (HEAP->InNewSpace(call_data)) {
__ Move(rcx, api_call_info_handle);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
name,
holder,
miss);
- return Heap::undefined_value(); // Success.
+ return HEAP->undefined_value(); // Success.
}
}
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1);
+ __ IncrementCounter(COUNTERS->call_const_interceptor(), 1);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ __ IncrementCounter(COUNTERS->call_const_interceptor_fast_api(), 1);
ReserveSpaceForFastApiCall(masm, scratch1);
}
FreeSpaceForFastApiCall(masm, scratch1);
}
- return Heap::undefined_value(); // Success.
+ return HEAP->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = Isolate::Current()->builtins()->builtin(Builtins::LoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = Isolate::Current()->builtins()->builtin(Builtins::KeyedLoadIC_Miss);
}
Handle<Code> ic(code);
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, Handle<Object>(cell));
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Factory::the_hole_value());
+ FACTORY->the_hole_value());
__ j(not_equal, miss);
return cell;
}
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* lookup_result = HEAP->LookupSymbol(name);
if (lookup_result->IsFailure()) {
set_failure(Failure::cast(lookup_result));
return reg;
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (HEAP->InNewSpace(prototype)) {
// Get the map of the current object.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ Cmp(scratch1, Handle<Map>(current->map()));
__ j(not_equal, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
__ push(receiver); // receiver
__ push(reg); // holder
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (HEAP->InNewSpace(callback_handle->data())) {
__ Move(scratch1, callback_handle);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (HEAP->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj = Isolate::Current()->stub_cache()->ComputeCallMiss(
+ arguments().immediate(), kind_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
Label miss;
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
Label miss, return_undefined, call_builtin;
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
const int argc = arguments().immediate();
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
JSFunction* function,
String* name) {
// TODO(872): implement this.
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
// Check if the argument is a heap number and load its value.
__ bind(¬_smi);
- __ CheckMap(rax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(rax, FACTORY->heap_number_map(), &slow, true);
__ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return Heap::undefined_value();
- if (cell != NULL) return Heap::undefined_value();
+ if (object->IsGlobalObject()) return HEAP->undefined_value();
+ if (cell != NULL) return HEAP->undefined_value();
int depth = optimization.GetPrototypeDepthOfExpectedType(
JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Heap::undefined_value();
+ if (depth == kInvalidProtoDepth) return HEAP->undefined_value();
Label miss, miss_before_stack_reserved;
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
- __ IncrementCounter(&Counters::call_const, 1);
- __ IncrementCounter(&Counters::call_const_fast_api, 1);
+ __ IncrementCounter(COUNTERS->call_const(), 1);
+ __ IncrementCounter(COUNTERS->call_const_fast_api(), 1);
// Allocate space for v8::Arguments implicit values. Must be initialized
// before calling any runtime function.
SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1);
+ __ IncrementCounter(COUNTERS->call_const(), 1);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1);
+ __ IncrementCounter(COUNTERS->call_global_inline(), 1);
ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
if (V8::UseCrankshaft()) {
}
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->call_global_inline_miss(), 1);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
__ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ __ IncrementCounter(COUNTERS->named_store_global_inline(), 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(COUNTERS->named_store_global_inline_miss(), 1);
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_field(), 1);
// Check that the name has not changed.
__ Cmp(rcx, Handle<String>(name));
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ DecrementCounter(COUNTERS->keyed_store_field(), 1);
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
__ j(not_equal, &miss);
// Check that the key is within bounds.
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, HEAP->empty_string());
}
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_stub, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_stub(), 1);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_stub_miss(), 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_field, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_field(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_field, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_field(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_callback, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_callback(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_constant_function(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
value, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_interceptor(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
name,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_array_length(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_array_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_string_length(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_string_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ DecrementCounter(COUNTERS->keyed_load_function_prototype(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
Label generic_stub_call;
// Use r8 for holding undefined which is used in several places below.
- __ Move(r8, Factory::undefined_value());
+ __ Move(r8, FACTORY->undefined_value());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
// rbx: initial map
// rdx: JSObject (untagged)
__ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, Factory::empty_fixed_array());
+ __ Move(rbx, FACTORY->empty_fixed_array());
__ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
__ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ __ IncrementCounter(COUNTERS->constructed_objects(), 1);
+ __ IncrementCounter(COUNTERS->constructed_objects_stub(), 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code = Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// Slow case: Jump to runtime.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- rax : key
// them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals.
SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = Factory::undefined_value();
+ Handle<Object> undefined = FACTORY->undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
if (count < kLocalVarBound) {
//------------------------------------------------------------------------------
// Virtual frame stub and IC calling functions.
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. Both are dropped.
// The IC expects name in rcx and receiver in rax.
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
Result name = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
PrepareForCall(0, 0);
MoveResultsToRegisters(&key, &receiver, rax, rdx);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
return RawCallCodeObject(ic, mode);
}
StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
Result value = Pop();
receiver.Unuse();
}
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
// and dropped by the call. The IC expects the name in rcx and the rest
// on the stack, and drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
Result name = Pop();
// Spill args, receiver, and function. The call will drop args and
// receiver.
// on the stack, and drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
- StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
Result name = Pop();
// Spill args, receiver, and function. The call will drop args and
// receiver.
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in rax, function in rdi, and the arguments
// and receiver on the stack.
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
PushElementAt(arg_count);
Result function = Pop();
private:
bool previous_state_;
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ CodeGenerator* cgen() {
+ return CodeGeneratorScope::Current(Isolate::Current());
+ }
};
// An illegal index into the virtual frame.
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ CodeGenerator* cgen() {
+ return CodeGeneratorScope::Current(Isolate::Current());
+ }
+
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
+ Result CallRuntime(const Runtime::Function* f, int arg_count);
Result CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
StrictModeFlag strict_mode);
// Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All three are dropped.
Result CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call call IC. Function name, arguments, and receiver are found on top
#ifndef V8_ZONE_INL_H_
#define V8_ZONE_INL_H_
+#include "isolate.h"
#include "zone.h"
#include "v8-counters.h"
namespace internal {
+AssertNoZoneAllocation::AssertNoZoneAllocation()
+ : prev_(Isolate::Current()->zone_allow_allocation()) {
+ Isolate::Current()->set_zone_allow_allocation(false);
+}
+
+
+AssertNoZoneAllocation::~AssertNoZoneAllocation() {
+ Isolate::Current()->set_zone_allow_allocation(prev_);
+}
+
+
inline void* Zone::New(int size) {
- ASSERT(AssertNoZoneAllocation::allow_allocation());
+ ASSERT(Isolate::Current()->zone_allow_allocation());
ASSERT(ZoneScope::nesting() > 0);
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
template <typename T>
T* Zone::NewArray(int length) {
- return static_cast<T*>(Zone::New(length * sizeof(T)));
+ return static_cast<T*>(New(length * sizeof(T)));
}
void Zone::adjust_segment_bytes_allocated(int delta) {
segment_bytes_allocated_ += delta;
- Counters::zone_segment_bytes.Set(segment_bytes_allocated_);
+ isolate_->counters()->zone_segment_bytes()->Set(segment_bytes_allocated_);
}
}
+// TODO(isolates): for performance reasons, this should be replaced with a new
+// operator that takes the zone in which the object should be
+// allocated.
+void* ZoneObject::operator new(size_t size) {
+ return ZONE->New(static_cast<int>(size));
+}
+
+
+inline void* ZoneListAllocationPolicy::New(int size) {
+ return ZONE->New(size);
+}
+
+
+ZoneScope::ZoneScope(ZoneScopeMode mode)
+ : isolate_(Isolate::Current()),
+ mode_(mode) {
+ isolate_->zone()->scope_nesting_++;
+}
+
+
+bool ZoneScope::ShouldDeleteOnExit() {
+ return isolate_->zone()->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
+}
+
+
+int ZoneScope::nesting() {
+ return Isolate::Current()->zone()->scope_nesting_;
+}
+
+
} } // namespace v8::internal
#endif // V8_ZONE_INL_H_
namespace internal {
-Address Zone::position_ = 0;
-Address Zone::limit_ = 0;
-int Zone::zone_excess_limit_ = 256 * MB;
-int Zone::segment_bytes_allocated_ = 0;
+Zone::Zone()
+ : zone_excess_limit_(256 * MB),
+ segment_bytes_allocated_(0),
+ position_(0),
+ limit_(0),
+ scope_nesting_(0),
+ segment_head_(NULL) {
+}
unsigned Zone::allocation_size_ = 0;
-bool AssertNoZoneAllocation::allow_allocation_ = true;
-int ZoneScope::nesting_ = 0;
+ZoneScope::~ZoneScope() {
+ ASSERT_EQ(Isolate::Current(), isolate_);
+ if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
+ isolate_->zone()->scope_nesting_--;
+}
+
// Segments represent chunks of memory: They have starting address
// (encoded in the this pointer) and a size in bytes. Segments are
// chained together forming a LIFO structure with the newest segment
-// available as Segment::head(). Segments are allocated using malloc()
+// available as segment_head_. Segments are allocated using malloc()
// and de-allocated using free().
class Segment {
Address start() const { return address(sizeof(Segment)); }
Address end() const { return address(size_); }
- static Segment* head() { return head_; }
- static void set_head(Segment* head) { head_ = head; }
-
- // Creates a new segment, sets it size, and pushes it to the front
- // of the segment chain. Returns the new segment.
- static Segment* New(int size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- Zone::adjust_segment_bytes_allocated(size);
- if (result != NULL) {
- result->next_ = head_;
- result->size_ = size;
- head_ = result;
- }
- return result;
- }
-
- // Deletes the given segment. Does not touch the segment chain.
- static void Delete(Segment* segment, int size) {
- Zone::adjust_segment_bytes_allocated(-size);
- Malloced::Delete(segment);
- }
-
- static int bytes_allocated() { return bytes_allocated_; }
-
private:
// Computes the address of the nth byte in this segment.
Address address(int n) const {
return Address(this) + n;
}
- static Segment* head_;
- static int bytes_allocated_;
Segment* next_;
int size_;
+
+ friend class Zone;
};
-Segment* Segment::head_ = NULL;
-int Segment::bytes_allocated_ = 0;
+// Creates a new segment, sets it size, and pushes it to the front
+// of the segment chain. Returns the new segment.
+Segment* Zone::NewSegment(int size) {
+ Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+ adjust_segment_bytes_allocated(size);
+ if (result != NULL) {
+ result->next_ = segment_head_;
+ result->size_ = size;
+ segment_head_ = result;
+ }
+ return result;
+}
+
+
+// Deletes the given segment. Does not touch the segment chain.
+void Zone::DeleteSegment(Segment* segment, int size) {
+ adjust_segment_bytes_allocated(-size);
+ Malloced::Delete(segment);
+}
void Zone::DeleteAll() {
#endif
// Find a segment with a suitable size to keep around.
- Segment* keep = Segment::head();
+ Segment* keep = segment_head_;
while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
keep = keep->next();
}
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
- Segment* current = Segment::head();
+ Segment* current = segment_head_;
while (current != NULL) {
Segment* next = current->next();
if (current == keep) {
// Zap the entire current segment (including the header).
memset(current, kZapDeadByte, size);
#endif
- Segment::Delete(current, size);
+ DeleteSegment(current, size);
}
current = next;
}
}
// Update the head segment to be the kept segment (if any).
- Segment::set_head(keep);
+ segment_head_ = keep;
}
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
- Segment* head = Segment::head();
+ Segment* head = segment_head_;
int old_size = (head == NULL) ? 0 : head->size();
static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
int new_size = kSegmentOverhead + size + (old_size << 1);
// requested size.
new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
}
- Segment* segment = Segment::New(new_size);
+ Segment* segment = NewSegment(new_size);
if (segment == NULL) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
DONT_DELETE_ON_EXIT
};
+class Segment;
// The Zone supports very fast allocation of small chunks of
// memory. The chunks cannot be deallocated individually, but instead
public:
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
- static inline void* New(int size);
+ inline void* New(int size);
template <typename T>
- static inline T* NewArray(int length);
+ inline T* NewArray(int length);
// Delete all objects and free all memory allocated in the Zone.
- static void DeleteAll();
+ void DeleteAll();
// Returns true if more memory has been allocated in zones than
// the limit allows.
- static inline bool excess_allocation();
+ inline bool excess_allocation();
- static inline void adjust_segment_bytes_allocated(int delta);
+ inline void adjust_segment_bytes_allocated(int delta);
static unsigned allocation_size_;
private:
+ friend class Isolate;
+ friend class ZoneScope;
// All pointers returned from New() have this alignment.
static const int kAlignment = kPointerSize;
static const int kMaximumKeptSegmentSize = 64 * KB;
// Report zone excess when allocation exceeds this limit.
- static int zone_excess_limit_;
+ int zone_excess_limit_;
// The number of bytes allocated in segments. Note that this number
// includes memory allocated from the OS but not yet allocated from
// the zone.
- static int segment_bytes_allocated_;
-
- // The Zone is intentionally a singleton; you should not try to
- // allocate instances of the class.
- Zone() { UNREACHABLE(); }
+ int segment_bytes_allocated_;
+ // Each isolate gets its own zone.
+ Zone();
// Expand the Zone to hold at least 'size' more bytes and allocate
// the bytes. Returns the address of the newly allocated chunk of
// memory in the Zone. Should only be called if there isn't enough
// room in the Zone already.
- static Address NewExpand(int size);
+ Address NewExpand(int size);
+
+ // Creates a new segment, sets it size, and pushes it to the front
+ // of the segment chain. Returns the new segment.
+ Segment* NewSegment(int size);
+ // Deletes the given segment. Does not touch the segment chain.
+ void DeleteSegment(Segment* segment, int size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.
- static Address position_;
- static Address limit_;
+ Address position_;
+ Address limit_;
+
+ int scope_nesting_;
+
+ Segment* segment_head_;
+ Isolate* isolate_;
};
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- void* operator new(size_t size) { return Zone::New(static_cast<int>(size)); }
+ inline void* operator new(size_t size);
// Ideally, the delete operator should be private instead of
// public, but unfortunately the compiler sometimes synthesizes
class AssertNoZoneAllocation {
public:
- AssertNoZoneAllocation() : prev_(allow_allocation_) {
- allow_allocation_ = false;
- }
- ~AssertNoZoneAllocation() { allow_allocation_ = prev_; }
- static bool allow_allocation() { return allow_allocation_; }
+ inline AssertNoZoneAllocation();
+ inline ~AssertNoZoneAllocation();
private:
bool prev_;
- static bool allow_allocation_;
};
class ZoneListAllocationPolicy {
public:
// Allocate 'size' bytes of memory in the zone.
- static void* New(int size) { return Zone::New(size); }
+ static inline void* New(int size);
// De-allocation attempts are silently ignored.
static void Delete(void* p) { }
// outer-most scope.
class ZoneScope BASE_EMBEDDED {
public:
- explicit ZoneScope(ZoneScopeMode mode) : mode_(mode) {
- nesting_++;
- }
+ // TODO(isolates): pass isolate pointer here.
+ inline explicit ZoneScope(ZoneScopeMode mode);
- virtual ~ZoneScope() {
- if (ShouldDeleteOnExit()) Zone::DeleteAll();
- --nesting_;
- }
+ virtual ~ZoneScope();
- bool ShouldDeleteOnExit() {
- return nesting_ == 1 && mode_ == DELETE_ON_EXIT;
- }
+ inline bool ShouldDeleteOnExit();
// For ZoneScopes that do not delete on exit by default, call this
// method to request deletion on exit.
mode_ = DELETE_ON_EXIT;
}
- static int nesting() { return nesting_; }
+ inline static int nesting();
private:
+ Isolate* isolate_;
ZoneScopeMode mode_;
- static int nesting_;
};
class ApiTestFuzzer: public v8::internal::Thread {
public:
void CallTest();
- explicit ApiTestFuzzer(int num)
- : test_number_(num),
+ explicit ApiTestFuzzer(v8::internal::Isolate* isolate, int num)
+ : Thread(isolate),
+ test_number_(num),
gate_(v8::internal::OS::CreateSemaphore(0)),
active_(true) {
}
ApiTestFuzzer::Fuzz();
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
return v8::Integer::New(17);
for (int i = 0; !iter.done(); i++) {
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
- CHECK(frame->code()->IsCode());
+ i::Code* code = frame->LookupCode(i::Isolate::Current());
+ CHECK(code->IsCode());
i::Address pc = frame->pc();
- i::Code* code = frame->code();
CHECK(code->contains(pc));
iter.Advance();
}
#include "v8.h"
#include "accessors.h"
-#include "top.h"
#include "cctest.h"
static MaybeObject* AllocateAfterFailures() {
static int attempts = 0;
if (++attempts < 3) return Failure::RetryAfterGC();
+ Heap* heap = Isolate::Current()->heap();
// New space.
- NewSpace* new_space = Heap::new_space();
+ NewSpace* new_space = heap->new_space();
static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
while (new_space->Available() > kNewSpaceFillerSize) {
int available_before = static_cast<int>(new_space->Available());
- CHECK(!Heap::AllocateByteArray(0)->IsFailure());
+ CHECK(!heap->AllocateByteArray(0)->IsFailure());
if (available_before == new_space->Available()) {
// It seems that we are avoiding new space allocations when
// allocation is forced, so no need to fill up new space
break;
}
}
- CHECK(!Heap::AllocateByteArray(100)->IsFailure());
- CHECK(!Heap::AllocateFixedArray(100, NOT_TENURED)->IsFailure());
+ CHECK(!heap->AllocateByteArray(100)->IsFailure());
+ CHECK(!heap->AllocateFixedArray(100, NOT_TENURED)->IsFailure());
// Make sure we can allocate through optimized allocation functions
// for specific kinds.
- CHECK(!Heap::AllocateFixedArray(100)->IsFailure());
- CHECK(!Heap::AllocateHeapNumber(0.42)->IsFailure());
- CHECK(!Heap::AllocateArgumentsObject(Smi::FromInt(87), 10)->IsFailure());
- Object* object =
- Heap::AllocateJSObject(*Top::object_function())->ToObjectChecked();
- CHECK(!Heap::CopyJSObject(JSObject::cast(object))->IsFailure());
+ CHECK(!heap->AllocateFixedArray(100)->IsFailure());
+ CHECK(!heap->AllocateHeapNumber(0.42)->IsFailure());
+ CHECK(!heap->AllocateArgumentsObject(Smi::FromInt(87), 10)->IsFailure());
+ Object* object = heap->AllocateJSObject(
+ *Isolate::Current()->object_function())->ToObjectChecked();
+ CHECK(!heap->CopyJSObject(JSObject::cast(object))->IsFailure());
// Old data space.
- OldSpace* old_data_space = Heap::old_data_space();
+ OldSpace* old_data_space = heap->old_data_space();
static const int kOldDataSpaceFillerSize = ByteArray::SizeFor(0);
while (old_data_space->Available() > kOldDataSpaceFillerSize) {
- CHECK(!Heap::AllocateByteArray(0, TENURED)->IsFailure());
+ CHECK(!heap->AllocateByteArray(0, TENURED)->IsFailure());
}
- CHECK(!Heap::AllocateRawAsciiString(100, TENURED)->IsFailure());
+ CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
// Large object space.
- while (!Heap::OldGenerationAllocationLimitReached()) {
- CHECK(!Heap::AllocateFixedArray(10000, TENURED)->IsFailure());
+ while (!heap->OldGenerationAllocationLimitReached()) {
+ CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
}
- CHECK(!Heap::AllocateFixedArray(10000, TENURED)->IsFailure());
+ CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
// Map space.
- MapSpace* map_space = Heap::map_space();
+ MapSpace* map_space = heap->map_space();
static const int kMapSpaceFillerSize = Map::kSize;
InstanceType instance_type = JS_OBJECT_TYPE;
int instance_size = JSObject::kHeaderSize;
while (map_space->Available() > kMapSpaceFillerSize) {
- CHECK(!Heap::AllocateMap(instance_type, instance_size)->IsFailure());
+ CHECK(!heap->AllocateMap(instance_type, instance_size)->IsFailure());
}
- CHECK(!Heap::AllocateMap(instance_type, instance_size)->IsFailure());
+ CHECK(!heap->AllocateMap(instance_type, instance_size)->IsFailure());
// Test that we can allocate in old pointer space and code space.
- CHECK(!Heap::AllocateFixedArray(100, TENURED)->IsFailure());
- CHECK(!Heap::CopyCode(Builtins::builtin(Builtins::Illegal))->IsFailure());
+ CHECK(!heap->AllocateFixedArray(100, TENURED)->IsFailure());
+ CHECK(!heap->CopyCode(Isolate::Current()->builtins()->builtin(
+ Builtins::Illegal))->IsFailure());
// Return success.
return Smi::FromInt(42);
static Handle<Object> Test() {
- CALL_HEAP_FUNCTION(AllocateAfterFailures(), Object);
+ CALL_HEAP_FUNCTION(ISOLATE, AllocateAfterFailures(), Object);
}
v8::HandleScope scope;
env->Enter();
Handle<JSFunction> function =
- Factory::NewFunction(Factory::function_symbol(), Factory::null_value());
+ FACTORY->NewFunction(FACTORY->function_symbol(), FACTORY->null_value());
// Force the creation of an initial map and set the code to
// something empty.
- Factory::NewJSObject(function);
- function->ReplaceCode(Builtins::builtin(Builtins::EmptyFunction));
+ FACTORY->NewJSObject(function);
+ function->ReplaceCode(Isolate::Current()->builtins()->builtin(
+ Builtins::EmptyFunction));
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map());
Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
- Handle<Proxy> proxy = Factory::NewProxy(&kDescriptor);
- instance_descriptors = Factory::CopyAppendProxyDescriptor(
+ Handle<Proxy> proxy = FACTORY->NewProxy(&kDescriptor);
+ instance_descriptors = FACTORY->CopyAppendProxyDescriptor(
instance_descriptors,
- Factory::NewStringFromAscii(Vector<const char>("get", 3)),
+ FACTORY->NewStringFromAscii(Vector<const char>("get", 3)),
proxy,
static_cast<PropertyAttributes>(0));
map->set_instance_descriptors(*instance_descriptors);
TEST(CodeRange) {
const int code_range_size = 16*MB;
- CodeRange::Setup(code_range_size);
+ OS::Setup();
+ Isolate::Current()->code_range()->Setup(code_range_size);
int current_allocated = 0;
int total_allocated = 0;
List<Block> blocks(1000);
size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
- void* base = CodeRange::AllocateRawMemory(requested, &allocated);
+ void* base = Isolate::Current()->code_range()->
+ AllocateRawMemory(requested, &allocated);
blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
total_allocated += static_cast<int>(allocated);
} else {
// Free a block.
int index = Pseudorandom() % blocks.length();
- CodeRange::FreeRawMemory(blocks[index].base, blocks[index].size);
+ Isolate::Current()->code_range()->FreeRawMemory(
+ blocks[index].base, blocks[index].size);
current_allocated -= blocks[index].size;
if (index < blocks.length() - 1) {
blocks[index] = blocks.RemoveLast();
}
}
- CodeRange::TearDown();
+ Isolate::Current()->code_range()->TearDown();
}
#include "execution.h"
#include "snapshot.h"
#include "platform.h"
-#include "top.h"
#include "utils.h"
#include "cctest.h"
#include "parser.h"
CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestResource::dispose_count);
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
+ v8::internal::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestResource::dispose_count);
}
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
+ i::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
LocalContext env;
Local<String> source = String::New(two_byte_source);
// Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(new TestResource(two_byte_source));
CHECK(success);
Local<Script> script = Script::Compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestResource::dispose_count);
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
+ i::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestResource::dispose_count);
}
LocalContext env;
Local<String> source = v8_str(c_source);
// Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(
new TestAsciiResource(i::StrDup(c_source)));
CHECK(success);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
+ i::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
LocalContext env;
// Free some space in the new space so that we can check freshness.
- i::Heap::CollectGarbage(i::NEW_SPACE);
- i::Heap::CollectGarbage(i::NEW_SPACE);
+ HEAP->CollectGarbage(i::NEW_SPACE);
+ HEAP->CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("small");
Local<String> small_string = String::New(two_byte_string);
// We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
LocalContext env;
// Free some space in the new space so that we can check freshness.
- i::Heap::CollectGarbage(i::NEW_SPACE);
- i::Heap::CollectGarbage(i::NEW_SPACE);
+ HEAP->CollectGarbage(i::NEW_SPACE);
+ HEAP->CollectGarbage(i::NEW_SPACE);
Local<String> small_string = String::New("small");
// We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
- i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
+ HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
CHECK(isymbol->IsSymbol());
}
- i::Heap::CollectAllGarbage(false);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
- i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
- i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
- i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
+ HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
CHECK(isymbol->IsSymbol());
}
- i::Heap::CollectAllGarbage(false);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
Local<String> string =
String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- i::Heap::CollectGarbage(i::NEW_SPACE);
- in_new_space = i::Heap::InNewSpace(*istring);
- CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
+ HEAP->CollectGarbage(i::NEW_SPACE);
+ in_new_space = HEAP->InNewSpace(*istring);
+ CHECK(in_new_space || HEAP->old_data_space()->Contains(*istring));
CHECK_EQ(0, TestResource::dispose_count);
}
- i::Heap::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+ HEAP->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, TestResource::dispose_count);
}
Local<String> string = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- i::Heap::CollectGarbage(i::NEW_SPACE);
- in_new_space = i::Heap::InNewSpace(*istring);
- CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
+ HEAP->CollectGarbage(i::NEW_SPACE);
+ in_new_space = HEAP->InNewSpace(*istring);
+ CHECK(in_new_space || HEAP->old_data_space()->Contains(*istring));
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
- i::Heap::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+ HEAP->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
+ i::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
CHECK_EQ(0, TestAsciiResource::dispose_count);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
+ i::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
CHECK(value->IsNumber());
CHECK_EQ(68, value->Int32Value());
}
- i::CompilationCache::Clear();
- i::Heap::CollectAllGarbage(false);
- i::Heap::CollectAllGarbage(false);
+ i::Isolate::Current()->compilation_cache()->Clear();
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
// Check reading and writing aligned pointers.
obj->SetPointerInInternalField(0, aligned);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
// Check reading and writing unaligned pointers.
obj->SetPointerInInternalField(0, unaligned);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
delete[] data;
CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
obj->SetPointerInInternalField(0, aligned);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
obj->SetPointerInInternalField(0, unaligned);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
obj->SetInternalField(0, v8::External::Wrap(aligned));
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
obj->SetInternalField(0, v8::External::Wrap(unaligned));
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
delete[] data;
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
Local<v8::Object> obj = v8::Object::New();
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
int hash3 = v8::Object::New()->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Make sure delete of a non-existent hidden value works
CHECK(obj->DeleteHiddenValue(key));
CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Make sure we do not find the hidden property.
CHECK(!obj->Has(empty));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK_EQ(2003, obj->Get(empty)->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Add another property and delete it afterwards to force the object in
// slow case.
CHECK(obj->Delete(prop_name));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK(obj->DeleteHiddenValue(key));
CHECK(obj->GetHiddenValue(key).IsEmpty());
V8::AddImplicitReferences(g2s2, g2_children, 1);
}
// Do a full GC
- i::Heap::CollectGarbage(i::OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls);
V8::AddImplicitReferences(g2s2, g2_children, 1);
}
- i::Heap::CollectGarbage(i::OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, NumberOfWeakCalls);
g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
- i::Heap::CollectGarbage(i::OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
}
V8::AddImplicitReferences(g3s1, g3_children, 1);
}
// Do a full GC
- i::Heap::CollectGarbage(i::OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls);
V8::AddImplicitReferences(g3s1, g3_children, 1);
}
- i::Heap::CollectGarbage(i::OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All objects should be gone. 7 global handles in total.
CHECK_EQ(7, NumberOfWeakCalls);
TEST(HugeConsStringOutOfMemory) {
// It's not possible to read a snapshot into a heap with different dimensions.
if (i::Snapshot::IsEnabled()) return;
- v8::HandleScope scope;
- LocalContext context;
// Set heap limits.
static const int K = 1024;
v8::ResourceConstraints constraints;
// Execute a script that causes out of memory.
v8::V8::IgnoreOutOfMemoryException();
+ v8::HandleScope scope;
+ LocalContext context;
+
// Build huge string. This should fail with out of memory exception.
Local<Value> result = CompileRun(
"var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
if (try_catch.HasCaught()) {
CHECK_EQ(expected, count);
CHECK(result.IsEmpty());
- CHECK(!i::Top::has_scheduled_exception());
+ CHECK(!i::Isolate::Current()->has_scheduled_exception());
} else {
CHECK_NE(expected, count);
}
obj.Dispose();
obj.Clear();
in_scavenge = true;
- i::Heap::PerformScavenge();
+ HEAP->PerformScavenge();
in_scavenge = false;
*(reinterpret_cast<bool*>(data)) = true;
}
object_b.MakeWeak(&released_in_scavenge, &CheckIsNotInvokedInScavenge);
while (!object_a_disposed) {
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
CHECK(!released_in_scavenge);
}
CHECK_EQ(v8::Integer::New(3), args[2]);
CHECK_EQ(v8::Undefined(), args[3]);
v8::HandleScope scope;
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
return v8::Undefined();
}
v8::TryCatch try_catch;
CHECK(!o1->SetPrototype(o0));
CHECK(!try_catch.HasCaught());
- ASSERT(!i::Top::has_pending_exception());
+ ASSERT(!i::Isolate::Current()->has_pending_exception());
CHECK_EQ(42, CompileRun("function f() { return 42; }; f()")->Int32Value());
}
Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
return v8::Handle<Value>();
}
int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
++(*call_count);
if ((*call_count) % 20 == 0) {
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
}
return v8::Handle<Value>();
}
v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) {
static int count = 0;
if (count++ % 3 == 0) {
- i::Heap::CollectAllGarbage(true); // This should move the stub
+ HEAP-> CollectAllGarbage(true); // This should move the stub
GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
}
return v8::Handle<v8::Value>();
v8::Handle<v8::Value> DirectGetterCallback(Local<String> name,
const v8::AccessorInfo& info) {
if (++p_getter_count % 3 == 0) {
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
GenerateSomeGarbage();
}
return v8::Handle<v8::Value>();
: RegisterThreadedTest::count();
active_tests_ = tests_being_run_ = end - start;
for (int i = 0; i < tests_being_run_; i++) {
- RegisterThreadedTest::nth(i)->fuzzer_ = new ApiTestFuzzer(i + start);
+ RegisterThreadedTest::nth(i)->fuzzer_ = new ApiTestFuzzer(
+ i::Isolate::Current(), i + start);
}
for (int i = 0; i < active_tests_; i++) {
RegisterThreadedTest::nth(i)->fuzzer_->Start();
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
- i::Heap::CollectAllGarbage(false);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
int count = GetGlobalObjectsCount();
#ifdef DEBUG
- if (count != expected) i::Heap::TracePathToGlobal();
+ if (count != expected) HEAP->TracePathToGlobal();
#endif
CHECK_EQ(expected, count);
}
// weak callback of the first handle would be able to 'reallocate' it.
handle1.MakeWeak(NULL, NewPersistentHandleCallback);
handle2.Dispose();
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
to_be_disposed.Dispose();
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
handle.Dispose();
}
}
handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
to_be_disposed = handle2;
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
}
handle2.MakeWeak(NULL, DisposingCallback);
handle3.MakeWeak(NULL, HandleCreatingCallback);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
CheckProperties(elms->Get(v8::Integer::New(3)), elmc3, elmv3);
}
+THREADED_TEST(PropertyEnumeration2) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::New(
+ "var result = [];"
+ "result[0] = {};"
+ "result[1] = {a: 1, b: 2};"
+ "result[2] = [1, 2, 3];"
+ "var proto = {x: 1, y: 2, z: 3};"
+ "var x = { __proto__: proto, w: 0, z: 1 };"
+ "result[3] = x;"
+ "result;"))->Run();
+ v8::Handle<v8::Array> elms = obj.As<v8::Array>();
+ CHECK_EQ(4, elms->Length());
+ int elmc0 = 0;
+ const char** elmv0 = NULL;
+ CheckProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0);
+
+ v8::Handle<v8::Value> val = elms->Get(v8::Integer::New(0));
+ v8::Handle<v8::Array> props = val.As<v8::Object>()->GetPropertyNames();
+ CHECK_EQ(0, props->Length());
+ for (uint32_t i = 0; i < props->Length(); i++) {
+ printf("p[%d]\n", i);
+ }
+}
static bool NamedSetAccessBlocker(Local<v8::Object> obj,
Local<Value> name,
gc_during_regexp_ = 0;
regexp_success_ = false;
gc_success_ = false;
- GCThread gc_thread(this);
+ GCThread gc_thread(i::Isolate::Current(), this);
gc_thread.Start();
v8::Locker::StartPreemption(1);
class GCThread : public i::Thread {
public:
- explicit GCThread(RegExpInterruptTest* test)
- : test_(test) {}
+ explicit GCThread(i::Isolate* isolate, RegExpInterruptTest* test)
+ : Thread(isolate), test_(test) {}
virtual void Run() {
test_->CollectGarbage();
}
{
v8::Locker lock;
// TODO(lrn): Perhaps create some garbage before collecting.
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
gc_count_++;
}
i::OS::Sleep(1);
gc_during_apply_ = 0;
apply_success_ = false;
gc_success_ = false;
- GCThread gc_thread(this);
+ GCThread gc_thread(i::Isolate::Current(), this);
gc_thread.Start();
v8::Locker::StartPreemption(1);
class GCThread : public i::Thread {
public:
- explicit GCThread(ApplyInterruptTest* test)
- : test_(test) {}
+ explicit GCThread(i::Isolate* isolate, ApplyInterruptTest* test)
+ : Thread(isolate), test_(test) {}
virtual void Run() {
test_->CollectGarbage();
}
while (gc_during_apply_ < kRequiredGCs) {
{
v8::Locker lock;
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
gc_count_++;
}
i::OS::Sleep(1);
CHECK(i::StringShape(string).IsExternal());
if (string->IsAsciiRepresentation()) {
// Check old map is not symbol or long.
- CHECK(string->map() == i::Heap::external_ascii_string_map());
+ CHECK(string->map() == HEAP->external_ascii_string_map());
// Morph external string to be TwoByte string.
- string->set_map(i::Heap::external_string_map());
+ string->set_map(HEAP->external_string_map());
i::ExternalTwoByteString* morphed =
i::ExternalTwoByteString::cast(string);
morphed->set_resource(uc16_resource);
} else {
// Check old map is not symbol or long.
- CHECK(string->map() == i::Heap::external_string_map());
+ CHECK(string->map() == HEAP->external_string_map());
// Morph external string to be ASCII string.
- string->set_map(i::Heap::external_ascii_string_map());
+ string->set_map(HEAP->external_ascii_string_map());
i::ExternalAsciiString* morphed =
i::ExternalAsciiString::cast(string);
morphed->set_resource(ascii_resource);
i::StrLength(c_string)));
Local<String> lhs(v8::Utils::ToLocal(
- i::Factory::NewExternalStringFromAscii(&ascii_resource)));
+ FACTORY->NewExternalStringFromAscii(&ascii_resource)));
Local<String> rhs(v8::Utils::ToLocal(
- i::Factory::NewExternalStringFromAscii(&ascii_resource)));
+ FACTORY->NewExternalStringFromAscii(&ascii_resource)));
env->Global()->Set(v8_str("lhs"), lhs);
env->Global()->Set(v8_str("rhs"), rhs);
// Create the input string for the regexp - the one we are going to change
// properties of.
- input_ = i::Factory::NewExternalStringFromAscii(&ascii_resource_);
+ input_ = FACTORY->NewExternalStringFromAscii(&ascii_resource_);
// Inject the input as a global variable.
i::Handle<i::String> input_name =
- i::Factory::NewStringFromAscii(i::Vector<const char>("input", 5));
- i::Top::global_context()->global()->SetProperty(
+ FACTORY->NewStringFromAscii(i::Vector<const char>("input", 5));
+ i::Isolate::Current()->global_context()->global()->SetProperty(
*input_name,
*input_,
NONE,
i::kNonStrictMode)->ToObjectChecked();
- MorphThread morph_thread(this);
+ MorphThread morph_thread(i::Isolate::Current(), this);
morph_thread.Start();
v8::Locker::StartPreemption(1);
LongRunningRegExp();
class MorphThread : public i::Thread {
public:
- explicit MorphThread(RegExpStringModificationTest* test)
- : test_(test) {}
+ explicit MorphThread(i::Isolate* isolate,
+ RegExpStringModificationTest* test)
+ : Thread(isolate), test_(test) {}
virtual void Run() {
test_->MorphString();
}
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::ExternalPixelArray> pixels =
i::Handle<i::ExternalPixelArray>::cast(
- i::Factory::NewExternalArray(kElementCount,
+ FACTORY->NewExternalArray(kElementCount,
v8::kExternalPixelArray,
pixel_data));
- i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i % 256);
}
- i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(i % 256, pixels->get(i));
CHECK_EQ(i % 256, pixel_data[i]);
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::ExternalPixelArray> pixels =
i::Handle<i::ExternalPixelArray>::cast(
- i::Factory::NewExternalArray(kElementCount,
- v8::kExternalPixelArray,
- pixel_data));
+ FACTORY->NewExternalArray(kElementCount,
+ v8::kExternalPixelArray,
+ pixel_data));
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i % 256);
}
static_cast<ElementType*>(malloc(kElementCount * element_size));
i::Handle<ExternalArrayClass> array =
i::Handle<ExternalArrayClass>::cast(
- i::Factory::NewExternalArray(kElementCount, array_type, array_data));
- i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
+ FACTORY->NewExternalArray(kElementCount, array_type, array_data));
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
array->set(i, static_cast<ElementType>(i));
}
- i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array->get(i)));
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array_data[i]));
" }"
"}"
"sum;");
- i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
CHECK_EQ(28, result->Int32Value());
// Make sure out-of-range loads do not throw.
static_cast<ElementType*>(malloc(kLargeElementCount * element_size));
i::Handle<ExternalArrayClass> large_array =
i::Handle<ExternalArrayClass>::cast(
- i::Factory::NewExternalArray(kLargeElementCount,
+ FACTORY->NewExternalArray(kLargeElementCount,
array_type,
array_data));
v8::Handle<v8::Object> large_obj = v8::Object::New();
static uint32_t* stack_limit;
static v8::Handle<Value> GetStackLimitCallback(const v8::Arguments& args) {
- stack_limit = reinterpret_cast<uint32_t*>(i::StackGuard::real_climit());
+ stack_limit = reinterpret_cast<uint32_t*>(
+ i::Isolate::Current()->stack_guard()->real_climit());
return v8::Undefined();
}
other_context->Enter();
CompileRun(source_simple);
other_context->Exit();
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
other_context->Enter();
CompileRun(source_eval);
other_context->Exit();
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
other_context->Enter();
CompileRun(source_exception);
other_context->Exit();
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
v8::V8::AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
v8::V8::RemoveGCPrologueCallback(PrologueCallback);
v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
" return 'Different results for ' + key1 + ': ' + r1 + ' vs. ' + r1_;"
" return 'PASSED';"
"})()";
- i::Heap::ClearJSFunctionResultCaches();
+ HEAP->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
" return 'FAILED: k0CacheSize is too small';"
" return 'PASSED';"
"})()";
- i::Heap::ClearJSFunctionResultCaches();
+ HEAP->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
" };"
" return 'PASSED';"
"})()";
- i::Heap::ClearJSFunctionResultCaches();
+ HEAP->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
" };"
" return 'PASSED';"
"})()";
- i::Heap::ClearJSFunctionResultCaches();
+ HEAP->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
" };"
" return 'PASSED';"
"})()";
- i::Heap::ClearJSFunctionResultCaches();
+ HEAP->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
void FailedAccessCheckCallbackGC(Local<v8::Object> target,
v8::AccessType type,
Local<v8::Value> data) {
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
}
v8::V8::SetFailedAccessCheckCallbackFunction(NULL);
}
+TEST(DefaultIsolateGetCurrent) {
+ CHECK(v8::Isolate::GetCurrent() != NULL);
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ CHECK(reinterpret_cast<i::Isolate*>(isolate)->IsDefaultIsolate());
+ printf("*** %s\n", "DefaultIsolateGetCurrent success");
+}
+
+TEST(IsolateNewDispose) {
+ v8::Isolate* current_isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = v8::Isolate::New();
+ CHECK(isolate != NULL);
+ CHECK(!reinterpret_cast<i::Isolate*>(isolate)->IsDefaultIsolate());
+ CHECK(current_isolate != isolate);
+ CHECK(current_isolate == v8::Isolate::GetCurrent());
+
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ last_location = last_message = NULL;
+ isolate->Dispose();
+ CHECK_EQ(last_location, NULL);
+ CHECK_EQ(last_message, NULL);
+}
+
+TEST(IsolateEnterExitDefault) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Isolate* current_isolate = v8::Isolate::GetCurrent();
+ CHECK(current_isolate != NULL); // Default isolate.
+ ExpectString("'hello'", "hello");
+ current_isolate->Enter();
+ ExpectString("'still working'", "still working");
+ current_isolate->Exit();
+ ExpectString("'still working 2'", "still working 2");
+ current_isolate->Exit();
+ // Default isolate is always, well, 'default current'.
+ CHECK_EQ(v8::Isolate::GetCurrent(), current_isolate);
+ // Still working since default isolate is auto-entering any thread
+ // that has no isolate and attempts to execute V8 APIs.
+ ExpectString("'still working 3'", "still working 3");
+}
+
+TEST(DisposeDefaultIsolate) {
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+
+ // Run some V8 code to trigger default isolate to become 'current'.
+ v8::HandleScope scope;
+ LocalContext context;
+ ExpectString("'run some V8'", "run some V8");
+
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ CHECK(reinterpret_cast<i::Isolate*>(isolate)->IsDefaultIsolate());
+ last_location = last_message = NULL;
+ isolate->Dispose();
+ // It is not possible to dispose default isolate via Isolate API.
+ CHECK_NE(last_location, NULL);
+ CHECK_NE(last_message, NULL);
+}
+
+TEST(RunDefaultAndAnotherIsolate) {
+ v8::HandleScope scope;
+ LocalContext context;
+
+ // Enter new isolate.
+ v8::Isolate* isolate = v8::Isolate::New();
+ CHECK(isolate);
+ isolate->Enter();
+ { // Need this block because subsequent Exit() will deallocate Heap,
+ // so we need all scope objects to be deconstructed when it happens.
+ v8::HandleScope scope_new;
+ LocalContext context_new;
+
+ // Run something in new isolate.
+ CompileRun("var foo = 153;");
+ ExpectTrue("function f() { return foo == 153; }; f()");
+ }
+ isolate->Exit();
+
+ // This runs automatically in default isolate.
+ // Variables in another isolate should be not available.
+ ExpectTrue("function f() {"
+ " try {"
+ " foo;"
+ " return false;"
+ " } catch(e) {"
+ " return true;"
+ " }"
+ "};"
+ "var bar = 371;"
+ "f()");
+
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ last_location = last_message = NULL;
+ isolate->Dispose();
+ CHECK_EQ(last_location, NULL);
+ CHECK_EQ(last_message, NULL);
+
+ // Check that default isolate still runs.
+ ExpectTrue("function f() { return bar == 371; }; f()");
+}
+
+TEST(DisposeIsolateWhenInUse) {
+ v8::Isolate* isolate = v8::Isolate::New();
+ CHECK(isolate);
+ isolate->Enter();
+ v8::HandleScope scope;
+ LocalContext context;
+ // Run something in this isolate.
+ ExpectTrue("true");
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ last_location = last_message = NULL;
+ // Still entered, should fail.
+ isolate->Dispose();
+ CHECK_NE(last_location, NULL);
+ CHECK_NE(last_message, NULL);
+}
+
+TEST(RunTwoIsolatesOnSingleThread) {
+ // Run isolate 1.
+ v8::Isolate* isolate1 = v8::Isolate::New();
+ isolate1->Enter();
+ v8::Persistent<v8::Context> context1 = v8::Context::New();
+
+ {
+ v8::Context::Scope cscope(context1);
+ v8::HandleScope scope;
+ // Run something in new isolate.
+ CompileRun("var foo = 'isolate 1';");
+ ExpectString("function f() { return foo; }; f()", "isolate 1");
+ }
+
+ // Run isolate 2.
+ v8::Isolate* isolate2 = v8::Isolate::New();
+ v8::Persistent<v8::Context> context2;
+
+ {
+ v8::Isolate::Scope iscope(isolate2);
+ context2 = v8::Context::New();
+ v8::Context::Scope cscope(context2);
+ v8::HandleScope scope;
+
+ // Run something in new isolate.
+ CompileRun("var foo = 'isolate 2';");
+ ExpectString("function f() { return foo; }; f()", "isolate 2");
+ }
+
+ {
+ v8::Context::Scope cscope(context1);
+ v8::HandleScope scope;
+ // Now again in isolate 1
+ ExpectString("function f() { return foo; }; f()", "isolate 1");
+ }
+
+ isolate1->Exit();
+
+ // Run some stuff in default isolate.
+ v8::Persistent<v8::Context> context_default = v8::Context::New();
+
+ {
+ v8::Context::Scope cscope(context_default);
+ v8::HandleScope scope;
+ // Variables in other isolates should be not available, verify there
+ // is an exception.
+ ExpectTrue("function f() {"
+ " try {"
+ " foo;"
+ " return false;"
+ " } catch(e) {"
+ " return true;"
+ " }"
+ "};"
+ "var isDefaultIsolate = true;"
+ "f()");
+ }
+
+ isolate1->Enter();
+
+ {
+ v8::Isolate::Scope iscope(isolate2);
+ v8::Context::Scope cscope(context2);
+ v8::HandleScope scope;
+ ExpectString("function f() { return foo; }; f()", "isolate 2");
+ }
+
+ {
+ v8::Context::Scope cscope(context1);
+ v8::HandleScope scope;
+ ExpectString("function f() { return foo; }; f()", "isolate 1");
+ }
+
+ {
+ v8::Isolate::Scope iscope(isolate2);
+ context2.Dispose();
+ }
+
+ context1.Dispose();
+ isolate1->Exit();
+
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ last_location = last_message = NULL;
+
+ isolate1->Dispose();
+ CHECK_EQ(last_location, NULL);
+ CHECK_EQ(last_message, NULL);
+
+ isolate2->Dispose();
+ CHECK_EQ(last_location, NULL);
+ CHECK_EQ(last_message, NULL);
+
+ // Check that default isolate still runs.
+ {
+ v8::Context::Scope cscope(context_default);
+ v8::HandleScope scope;
+ ExpectTrue("function f() { return isDefaultIsolate; }; f()");
+ }
+}
+
+static int CalcFibonacci(v8::Isolate* isolate, int limit) {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope;
+ LocalContext context;
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, "function fib(n) {"
+ " if (n <= 2) return 1;"
+ " return fib(n-1) + fib(n-2);"
+ "}"
+ "fib(%d)", limit);
+ Local<Value> value = CompileRun(code.start());
+ CHECK(value->IsNumber());
+ return static_cast<int>(value->NumberValue());
+}
+
+class IsolateThread : public v8::internal::Thread {
+ public:
+ explicit IsolateThread(v8::Isolate* isolate, int fib_limit)
+ : Thread(NULL),
+ isolate_(isolate),
+ fib_limit_(fib_limit),
+ result_(0) { }
+
+ void Run() {
+ result_ = CalcFibonacci(isolate_, fib_limit_);
+ }
+
+ int result() { return result_; }
+
+ private:
+ v8::Isolate* isolate_;
+ int fib_limit_;
+ int result_;
+};
+
+TEST(MultipleIsolatesOnIndividualThreads) {
+ v8::Isolate* isolate1 = v8::Isolate::New();
+ v8::Isolate* isolate2 = v8::Isolate::New();
+
+ IsolateThread thread1(isolate1, 21);
+ IsolateThread thread2(isolate2, 12);
+
+ // Compute some fibonacci numbers on 3 threads in 3 isolates.
+ thread1.Start();
+ thread2.Start();
+
+ int result1 = CalcFibonacci(v8::Isolate::GetCurrent(), 21);
+ int result2 = CalcFibonacci(v8::Isolate::GetCurrent(), 12);
+
+ thread1.Join();
+ thread2.Join();
+
+ // Compare results. The actual fibonacci numbers for 12 and 21 are taken
+ // (I'm lazy!) from http://en.wikipedia.org/wiki/Fibonacci_number
+ CHECK_EQ(result1, 10946);
+ CHECK_EQ(result2, 144);
+ CHECK_EQ(result1, thread1.result());
+ CHECK_EQ(result2, thread2.result());
+
+ isolate1->Dispose();
+ isolate2->Dispose();
+}
+
+
+class InitDefaultIsolateThread : public v8::internal::Thread {
+ public:
+ enum TestCase { IgnoreOOM, SetResourceConstraints, SetFatalHandler };
+
+ explicit InitDefaultIsolateThread(TestCase testCase)
+ : Thread(NULL),
+ testCase_(testCase),
+ result_(false) { }
+
+ void Run() {
+ switch (testCase_) {
+ case IgnoreOOM:
+ v8::V8::IgnoreOutOfMemoryException();
+ break;
+
+ case SetResourceConstraints: {
+ static const int K = 1024;
+ v8::ResourceConstraints constraints;
+ constraints.set_max_young_space_size(256 * K);
+ constraints.set_max_old_space_size(4 * K * K);
+ v8::SetResourceConstraints(&constraints);
+ break;
+ }
+
+ case SetFatalHandler:
+ v8::V8::SetFatalErrorHandler(NULL);
+ break;
+ }
+ result_ = true;
+ }
+
+ bool result() { return result_; }
+
+ private:
+ TestCase testCase_;
+ bool result_;
+};
+
+
+static void InitializeTestHelper(InitDefaultIsolateThread::TestCase testCase) {
+ InitDefaultIsolateThread thread(testCase);
+ thread.Start();
+ thread.Join();
+ CHECK_EQ(thread.result(), true);
+}
+
+TEST(InitializeDefaultIsolateOnSecondaryThread1) {
+ InitializeTestHelper(InitDefaultIsolateThread::IgnoreOOM);
+}
+
+TEST(InitializeDefaultIsolateOnSecondaryThread2) {
+ InitializeTestHelper(InitDefaultIsolateThread::SetResourceConstraints);
+}
+
+TEST(InitializeDefaultIsolateOnSecondaryThread3) {
+ InitializeTestHelper(InitDefaultIsolateThread::SetFatalHandler);
+}
+
TEST(StringCheckMultipleContexts) {
const char* code =
"})()",
"ReferenceError: cell is not defined");
CompileRun("cell = \"new_second\";");
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
ExpectString("readCell()", "new_second");
ExpectString("readCell()", "new_second");
}
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
// some relocated stuff here, not executed
__ RecordComment("dead code, just testing relocations");
- __ mov(r0, Operand(Factory::true_value()));
+ __ mov(r0, Operand(FACTORY->true_value()));
__ RecordComment("dead code, just testing immediate operands");
__ mov(r0, Operand(-1));
__ mov(r0, Operand(0xFF000000));
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Label L, C;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(ip, Operand(sp));
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Assembler assm(NULL, 0);
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
// On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
__ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Assembler assm(NULL, 0);
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
__ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
__ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Assembler assm(NULL, 0);
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label wrong_exception;
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->
- ToObjectChecked();
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->
- ToObjectChecked();
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
__ ret(0);
// some relocated stuff here, not executed
- __ mov(eax, Factory::true_value());
+ __ mov(eax, FACTORY->true_value());
__ jmp(NULL, RelocInfo::RUNTIME_ENTRY);
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
-
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
typedef int (*F3)(float x);
TEST(AssemblerIa323) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
- CHECK(CpuFeatures::IsSupported(SSE2));
+ CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
{ CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(eax, Operand(esp, 4));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(Heap::CreateCode(
+ Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked());
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
// don't print the code - our disassembler can't handle cvttss2si
// instead print bytes
Disassembler::Dump(stdout,
typedef int (*F4)(double x);
TEST(AssemblerIa324) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
- CHECK(CpuFeatures::IsSupported(SSE2));
+ CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
__ cvttsd2si(eax, Operand(esp, 4));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(Heap::CreateCode(
+ Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked());
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
// don't print the code - our disassembler can't handle cvttsd2si
// instead print bytes
Disassembler::Dump(stdout,
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(Heap::CreateCode(
+ Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked());
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
typedef double (*F5)(double x, double y);
TEST(AssemblerIa326) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
- CHECK(CpuFeatures::IsSupported(SSE2));
+ CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(Heap::CreateCode(
+ Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked());
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
#ifdef DEBUG
::printf("\n---\n");
// don't print the code - our disassembler can't handle SSE instructions
typedef double (*F6)(int x);
TEST(AssemblerIa328) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
- CHECK(CpuFeatures::IsSupported(SSE2));
+ CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(Heap::CreateCode(
+ Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked());
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(Heap::CreateCode(
+ Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked());
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
TEST(AssemblerX64ReturnOperation) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
}
TEST(AssemblerX64StackOperations) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
}
TEST(AssemblerX64ArithmeticOperations) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
}
TEST(AssemblerX64ImulOperation) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
}
TEST(AssemblerX64MemoryOperands) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
}
TEST(AssemblerX64ControlFlow) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
}
TEST(AssemblerX64LoopImmediates) {
+ OS::Setup();
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
using namespace v8::internal;
TEST(List) {
+ v8::internal::V8::Initialize(NULL);
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
public:
typedef SamplingCircularQueue::Cell Record;
- ProducerThread(SamplingCircularQueue* scq,
+ ProducerThread(i::Isolate* isolate,
+ SamplingCircularQueue* scq,
int records_per_chunk,
Record value,
i::Semaphore* finished)
- : scq_(scq),
+ : Thread(isolate),
+ scq_(scq),
records_per_chunk_(records_per_chunk),
value_(value),
finished_(finished) { }
// Check that we are using non-reserved values.
CHECK_NE(SamplingCircularQueue::kClear, 1);
CHECK_NE(SamplingCircularQueue::kEnd, 1);
- ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
- ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
- ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
+ i::Isolate* isolate = i::Isolate::Current();
+ ProducerThread producer1(isolate, &scq, kRecordsPerChunk, 1, semaphore);
+ ProducerThread producer2(isolate, &scq, kRecordsPerChunk, 10, semaphore);
+ ProducerThread producer3(isolate, &scq, kRecordsPerChunk, 20, semaphore);
CHECK_EQ(NULL, scq.StartDequeue());
producer1.Start();
#include "execution.h"
#include "factory.h"
#include "platform.h"
-#include "top.h"
#include "cctest.h"
using namespace v8::internal;
static MaybeObject* GetGlobalProperty(const char* name) {
- Handle<String> symbol = Factory::LookupAsciiSymbol(name);
- return Top::context()->global()->GetProperty(*symbol);
+ Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
+ return Isolate::Current()->context()->global()->GetProperty(*symbol);
}
static void SetGlobalProperty(const char* name, Object* value) {
Handle<Object> object(value);
- Handle<String> symbol = Factory::LookupAsciiSymbol(name);
- Handle<JSObject> global(Top::context()->global());
+ Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
+ Handle<JSObject> global(Isolate::Current()->context()->global());
SetProperty(global, symbol, object, NONE, kNonStrictMode);
}
static Handle<JSFunction> Compile(const char* source) {
- Handle<String> source_code(Factory::NewStringFromUtf8(CStrVector(source)));
+ Handle<String> source_code(FACTORY->NewStringFromUtf8(CStrVector(source)));
Handle<SharedFunctionInfo> shared_function =
Compiler::Compile(source_code,
Handle<String>(),
NULL,
Handle<String>::null(),
NOT_NATIVES_CODE);
- return Factory::NewFunctionFromSharedFunctionInfo(shared_function,
- Top::global_context());
+ return FACTORY->NewFunctionFromSharedFunctionInfo(shared_function,
+ Isolate::Current()->global_context());
}
if (fun.is_null()) return -1;
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
SetGlobalProperty("x", Smi::FromInt(x));
SetGlobalProperty("y", Smi::FromInt(y));
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
SetGlobalProperty("x", Smi::FromInt(x));
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
SetGlobalProperty("n", Smi::FromInt(n));
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
Handle<JSFunction> fun = Compile(source);
if (fun.is_null()) return;
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
}
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
CHECK_EQ(511.0, GetGlobalProperty("r")->ToObjectChecked()->Number());
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Handle<Object> result =
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(has_pending_exception);
- CHECK_EQ(42.0, Top::pending_exception()->ToObjectChecked()->Number());
+ CHECK_EQ(42.0, Isolate::Current()->pending_exception()->
+ ToObjectChecked()->Number());
}
// Run the generated code to populate the global object with 'foo'.
bool has_pending_exception;
- Handle<JSObject> global(Top::context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global());
Execution::Call(fun0, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
- Object* foo_symbol = Factory::LookupAsciiSymbol("foo")->ToObjectChecked();
- MaybeObject* fun1_object =
- Top::context()->global()->GetProperty(String::cast(foo_symbol));
+ Object* foo_symbol = FACTORY->LookupAsciiSymbol("foo")->ToObjectChecked();
+ MaybeObject* fun1_object = Isolate::Current()->context()->global()->
+ GetProperty(String::cast(foo_symbol));
Handle<Object> fun1(fun1_object->ToObjectChecked());
CHECK(fun1->IsJSFunction());
Object** argv[1] = {
- Handle<Object>::cast(Factory::LookupAsciiSymbol("hello")).location()
+ Handle<Object>::cast(FACTORY->LookupAsciiSymbol("hello")).location()
};
Execution::Call(Handle<JSFunction>::cast(fun1), global, 1, argv,
&has_pending_exception);
InitializeVM();
v8::HandleScope scope;
- Handle<Script> script = Factory::NewScript(Factory::empty_string());
- script->set_source(Heap::undefined_value());
+ Handle<Script> script = FACTORY->NewScript(FACTORY->empty_string());
+ script->set_source(HEAP->undefined_value());
CHECK_EQ(-1, GetScriptLineNumber(script, 0));
CHECK_EQ(-1, GetScriptLineNumber(script, 100));
CHECK_EQ(-1, GetScriptLineNumber(script, -1));
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
processor.Start();
while (!processor.running()) {
i::Thread::YieldCPU();
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
processor.Start();
while (!processor.running()) {
i::Thread::YieldCPU();
// Enqueue code creation events.
i::HandleScope scope;
const char* aaa_str = "aaa";
- i::Handle<i::String> aaa_name = i::Factory::NewStringFromAscii(
+ i::Handle<i::String> aaa_name = FACTORY->NewStringFromAscii(
i::Vector<const char>(aaa_str, i::StrLength(aaa_str)));
processor.CodeCreateEvent(i::Logger::FUNCTION_TAG,
*aaa_name,
- i::Heap::empty_string(),
+ HEAP->empty_string(),
0,
ToAddress(0x1000),
0x100,
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(i::Isolate::Current(), &generator);
processor.Start();
while (!processor.running()) {
i::Thread::YieldCPU();
using namespace v8::internal;
TEST(BitVector) {
+ v8::internal::V8::Initialize(NULL);
ZoneScope zone(DELETE_ON_EXIT);
{
BitVector v(15);
inline v8::Context* operator*() { return *context_; }
inline bool IsReady() { return !context_.IsEmpty(); }
void ExposeDebug() {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// Expose the debug context global object in the global object for testing.
- Debug::Load();
- Debug::debug_context()->set_security_token(
+ debug->Load();
+ debug->debug_context()->set_security_token(
v8::Utils::OpenHandle(*context_)->security_token());
Handle<JSGlobalProxy> global(Handle<JSGlobalProxy>::cast(
v8::Utils::OpenHandle(*context_->Global())));
Handle<v8::internal::String> debug_string =
- v8::internal::Factory::LookupAsciiSymbol("debug");
+ FACTORY->LookupAsciiSymbol("debug");
SetProperty(global, debug_string,
- Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM,
+ Handle<Object>(debug->debug_context()->global_proxy()), DONT_ENUM,
::v8::internal::kNonStrictMode);
}
private:
static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
static int break_point = 0;
Handle<v8::internal::SharedFunctionInfo> shared(fun->shared());
- Debug::SetBreakPoint(
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ debug->SetBreakPoint(
shared,
Handle<Object>(v8::internal::Smi::FromInt(++break_point)),
&position);
// Clear a break point.
static void ClearBreakPoint(int break_point) {
- Debug::ClearBreakPoint(
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ debug->ClearBreakPoint(
Handle<Object>(v8::internal::Smi::FromInt(break_point)));
}
// Change break on exception.
static void ChangeBreakOnException(bool caught, bool uncaught) {
- Debug::ChangeBreakOnException(v8::internal::BreakException, caught);
- Debug::ChangeBreakOnException(v8::internal::BreakUncaughtException, uncaught);
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ debug->ChangeBreakOnException(v8::internal::BreakException, caught);
+ debug->ChangeBreakOnException(v8::internal::BreakUncaughtException, uncaught);
}
// Prepare to step to next break location.
static void PrepareStep(StepAction step_action) {
- Debug::PrepareStep(step_action, 1);
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ debug->PrepareStep(step_action, 1);
}
// Collect the currently debugged functions.
Handle<FixedArray> GetDebuggedFunctions() {
- v8::internal::DebugInfoListNode* node = Debug::debug_info_list_;
+ Debug* debug = Isolate::Current()->debug();
+
+ v8::internal::DebugInfoListNode* node = debug->debug_info_list_;
// Find the number of debugged functions.
int count = 0;
// Allocate array for the debugged functions
Handle<FixedArray> debugged_functions =
- v8::internal::Factory::NewFixedArray(count);
+ FACTORY->NewFixedArray(count);
// Run through the debug info objects and collect all functions.
count = 0;
static Handle<Code> ComputeCallDebugBreak(int argc) {
CALL_HEAP_FUNCTION(
- v8::internal::StubCache::ComputeCallDebugBreak(argc, Code::CALL_IC),
+ v8::internal::Isolate::Current(),
+ v8::internal::Isolate::Current()->stub_cache()->ComputeCallDebugBreak(
+ argc, Code::CALL_IC),
Code);
}
void CheckDebuggerUnloaded(bool check_functions) {
// Check that the debugger context is cleared and that there is no debug
// information stored for the debugger.
- CHECK(Debug::debug_context().is_null());
- CHECK_EQ(NULL, Debug::debug_info_list_);
+ CHECK(Isolate::Current()->debug()->debug_context().is_null());
+ CHECK_EQ(NULL, Isolate::Current()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
- Heap::CollectAllGarbage(false);
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Iterate the head and check that there are no debugger related objects left.
HeapIterator iterator;
void ForceUnloadDebugger() {
- Debugger::never_unload_debugger_ = false;
- Debugger::UnloadDebugger();
+ Isolate::Current()->debugger()->never_unload_debugger_ = false;
+ Isolate::Current()->debugger()->UnloadDebugger();
}
const char* source, const char* name,
int position, v8::internal::RelocInfo::Mode mode,
Code* debug_break) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+
// Create function and set the break point.
Handle<v8::internal::JSFunction> fun = v8::Utils::OpenHandle(
*CompileFunction(env, source, name));
// Clear the break point and check that the debug break function is no longer
// there
ClearBreakPoint(bp);
- CHECK(!Debug::HasDebugInfo(shared));
- CHECK(Debug::EnsureDebugInfo(shared));
+ CHECK(!debug->HasDebugInfo(shared));
+ CHECK(debug->EnsureDebugInfo(shared));
TestBreakLocationIterator it2(Debug::GetDebugInfo(shared));
it2.FindBreakLocationFromPosition(position);
CHECK_EQ(mode, it2.it()->rinfo()->rmode());
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
// Count the number of breaks.
if (event == v8::Break) {
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
// Count the number of breaks.
if (event == v8::Break) {
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
if (event == v8::Break) {
for (int i = 0; checks[i].expr != NULL; i++) {
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
if (event == v8::Break) {
break_point_hit_count++;
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
if (event == v8::Break) {
break_point_hit_count++;
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
if (event == v8::Break || event == v8::Exception) {
// Check that the current function is the expected.
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
// Perform a garbage collection when break point is hit and continue. Based
// on the number of break points hit either scavenge or mark compact
break_point_hit_count++;
if (break_point_hit_count % 2 == 0) {
// Scavenge.
- Heap::CollectGarbage(v8::internal::NEW_SPACE);
+ HEAP->CollectGarbage(v8::internal::NEW_SPACE);
} else {
// Mark sweep compact.
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
}
}
}
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
if (event == v8::Break) {
// Count the number of breaks.
// Run the garbage collector to enforce heap verification if option
// --verify-heap is set.
- Heap::CollectGarbage(v8::internal::NEW_SPACE);
+ HEAP->CollectGarbage(v8::internal::NEW_SPACE);
// Set the break flag again to come back here as soon as possible.
v8::Debug::DebugBreak();
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// When hitting a debug event listener there must be a break set.
- CHECK_NE(v8::internal::Debug::break_id(), 0);
+ CHECK_NE(debug->break_id(), 0);
if (event == v8::Break) {
if (break_point_hit_count < max_break_point_hit_count) {
// of break locations.
TEST(DebugStub) {
using ::v8::internal::Builtins;
+ using ::v8::internal::Isolate;
v8::HandleScope scope;
DebugLocalContext env;
"function f2(){x=1;}", "f2",
0,
v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- Builtins::builtin(Builtins::StoreIC_DebugBreak));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::StoreIC_DebugBreak));
CheckDebugBreakFunction(&env,
"function f3(){var a=x;}", "f3",
0,
v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- Builtins::builtin(Builtins::LoadIC_DebugBreak));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_DebugBreak));
// TODO(1240753): Make the test architecture independent or split
// parts of the debugger into architecture dependent files. This
"f4",
0,
v8::internal::RelocInfo::CODE_TARGET,
- Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedStoreIC_DebugBreak));
CheckDebugBreakFunction(
&env,
"function f5(){var index='propertyName'; var a={}; return a[index];}",
"f5",
0,
v8::internal::RelocInfo::CODE_TARGET,
- Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_DebugBreak));
#endif
// Check the debug break code stubs for call ICs with different number of
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
- // Run with breakpoint
+ // Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
- // Run with breakpoint.
+ // Run with breakpoint
int bp = SetBreakPoint(foo, 0);
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(1 + i * 3, break_point_hit_count);
// Scavenge and call function.
- Heap::CollectGarbage(v8::internal::NEW_SPACE);
+ HEAP->CollectGarbage(v8::internal::NEW_SPACE);
f->Call(recv, 0, NULL);
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- Heap::CollectAllGarbage(force_compaction);
+ HEAP->CollectAllGarbage(force_compaction);
f->Call(recv, 0, NULL);
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
}
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
SetScriptBreakPointByNameFromJS("test.html", 3, -1);
DebugLocalContext env;
env.ExposeDebug();
- v8::internal::Top::TraceException(false);
+ v8::internal::Isolate::Current()->TraceException(false);
// Create functions for testing break on exception.
v8::Local<v8::Function> throws =
// For this test, we want to break on uncaught exceptions:
ChangeBreakOnException(false, true);
- v8::internal::Top::TraceException(false);
+ v8::internal::Isolate::Current()->TraceException(false);
// Create a function for checking the function when hitting a break point.
frame_count = CompileFunction(&env, frame_count_source, "frame_count");
// placing JSON debugger commands in the queue.
class MessageQueueDebuggerThread : public v8::internal::Thread {
public:
+ explicit MessageQueueDebuggerThread(v8::internal::Isolate* isolate)
+ : Thread(isolate) { }
void Run();
};
// Main thread continues running source_3 to end, waits for this thread.
}
-MessageQueueDebuggerThread message_queue_debugger_thread;
// This thread runs the v8 engine.
TEST(MessageQueues) {
+ MessageQueueDebuggerThread message_queue_debugger_thread(
+ i::Isolate::Current());
+
// Create a V8 environment
v8::HandleScope scope;
DebugLocalContext env;
class V8Thread : public v8::internal::Thread {
public:
+ explicit V8Thread(v8::internal::Isolate* isolate) : Thread(isolate) { }
void Run();
};
class DebuggerThread : public v8::internal::Thread {
public:
+ explicit DebuggerThread(v8::internal::Isolate* isolate) : Thread(isolate) { }
void Run();
};
v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
}
-DebuggerThread debugger_thread;
-V8Thread v8_thread;
TEST(ThreadedDebugging) {
+ DebuggerThread debugger_thread(i::Isolate::Current());
+ V8Thread v8_thread(i::Isolate::Current());
+
// Create a V8 environment
threaded_debugging_barriers.Initialize();
class BreakpointsV8Thread : public v8::internal::Thread {
public:
+ explicit BreakpointsV8Thread(v8::internal::Isolate* isolate)
+ : Thread(isolate) { }
void Run();
};
class BreakpointsDebuggerThread : public v8::internal::Thread {
public:
- explicit BreakpointsDebuggerThread(bool global_evaluate)
- : global_evaluate_(global_evaluate) {}
+ explicit BreakpointsDebuggerThread(v8::internal::Isolate* isolate,
+ bool global_evaluate)
+ : Thread(isolate), global_evaluate_(global_evaluate) {}
void Run();
private:
void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
i::FLAG_debugger_auto_break = true;
- BreakpointsDebuggerThread breakpoints_debugger_thread(global_evaluate);
- BreakpointsV8Thread breakpoints_v8_thread;
+ BreakpointsDebuggerThread breakpoints_debugger_thread(i::Isolate::Current(),
+ global_evaluate);
+ BreakpointsV8Thread breakpoints_v8_thread(i::Isolate::Current());
// Create a V8 environment
Barriers stack_allocated_breakpoints_barriers;
class HostDispatchV8Thread : public v8::internal::Thread {
public:
+ explicit HostDispatchV8Thread(v8::internal::Isolate* isolate)
+ : Thread(isolate) { }
void Run();
};
class HostDispatchDebuggerThread : public v8::internal::Thread {
public:
+ explicit HostDispatchDebuggerThread(v8::internal::Isolate* isolate)
+ : Thread(isolate) { }
void Run();
};
v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
}
-HostDispatchDebuggerThread host_dispatch_debugger_thread;
-HostDispatchV8Thread host_dispatch_v8_thread;
-
TEST(DebuggerHostDispatch) {
+ HostDispatchDebuggerThread host_dispatch_debugger_thread(
+ i::Isolate::Current());
+ HostDispatchV8Thread host_dispatch_v8_thread(i::Isolate::Current());
i::FLAG_debugger_auto_break = true;
// Create a V8 environment
class DebugMessageDispatchV8Thread : public v8::internal::Thread {
public:
+ explicit DebugMessageDispatchV8Thread(v8::internal::Isolate* isolate)
+ : Thread(isolate) { }
void Run();
};
class DebugMessageDispatchDebuggerThread : public v8::internal::Thread {
public:
+ explicit DebugMessageDispatchDebuggerThread(v8::internal::Isolate* isolate)
+ : Thread(isolate) { }
void Run();
};
debug_message_dispatch_barriers->barrier_2.Wait();
}
-DebugMessageDispatchDebuggerThread debug_message_dispatch_debugger_thread;
-DebugMessageDispatchV8Thread debug_message_dispatch_v8_thread;
-
TEST(DebuggerDebugMessageDispatch) {
+ DebugMessageDispatchDebuggerThread debug_message_dispatch_debugger_thread(
+ i::Isolate::Current());
+ DebugMessageDispatchV8Thread debug_message_dispatch_v8_thread(
+ i::Isolate::Current());
+
i::FLAG_debugger_auto_break = true;
// Create a V8 environment
TEST(DebuggerAgent) {
+ i::Debugger* debugger = i::Isolate::Current()->debugger();
// Make sure these ports is not used by other tests to allow tests to run in
// parallel.
const int kPort1 = 5858;
i::Socket::Setup();
// Test starting and stopping the agent without any client connection.
- i::Debugger::StartAgent("test", kPort1);
- i::Debugger::StopAgent();
+ debugger->StartAgent("test", kPort1);
+ debugger->StopAgent();
// Test starting the agent, connecting a client and shutting down the agent
// with the client connected.
- ok = i::Debugger::StartAgent("test", kPort2);
+ ok = debugger->StartAgent("test", kPort2);
CHECK(ok);
- i::Debugger::WaitForAgent();
+ debugger->WaitForAgent();
i::Socket* client = i::OS::CreateSocket();
ok = client->Connect("localhost", port2_str);
CHECK(ok);
- i::Debugger::StopAgent();
+ debugger->StopAgent();
delete client;
// Test starting and stopping the agent with the required port already
i::Socket* server = i::OS::CreateSocket();
server->Bind(kPort3);
- i::Debugger::StartAgent("test", kPort3);
- i::Debugger::StopAgent();
+ debugger->StartAgent("test", kPort3);
+ debugger->StopAgent();
delete server;
}
class DebuggerAgentProtocolServerThread : public i::Thread {
public:
- explicit DebuggerAgentProtocolServerThread(int port)
- : port_(port), server_(NULL), client_(NULL),
+ explicit DebuggerAgentProtocolServerThread(i::Isolate* isolate, int port)
+ : Thread(isolate), port_(port), server_(NULL), client_(NULL),
listening_(OS::CreateSemaphore(0)) {
}
~DebuggerAgentProtocolServerThread() {
// Create a socket server to receive a debugger agent message.
DebuggerAgentProtocolServerThread* server =
- new DebuggerAgentProtocolServerThread(kPort);
+ new DebuggerAgentProtocolServerThread(i::Isolate::Current(), kPort);
server->Start();
server->WaitForListening();
// Test that scripts collected are reported through the debug event listener.
TEST(ScriptCollectedEvent) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
break_point_hit_count = 0;
script_collected_count = 0;
v8::HandleScope scope;
DebugLocalContext env;
// Request the loaded scripts to initialize the debugger script cache.
- Debug::GetLoadedScripts();
+ debug->GetLoadedScripts();
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
script_collected_count = 0;
v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
// Do garbage collection to collect the script above which is no longer
// referenced.
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, script_collected_count);
// Test that GetEventContext doesn't fail and return empty handle for
// ScriptCollected events.
TEST(ScriptCollectedEventContext) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
script_collected_message_count = 0;
v8::HandleScope scope;
DebugLocalContext env;
// Request the loaded scripts to initialize the debugger script cache.
- Debug::GetLoadedScripts();
+ debug->GetLoadedScripts();
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
{
// Do garbage collection to collect the script above which is no longer
// referenced.
- Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, script_collected_message_count);
static void BreakMessageHandler(const v8::Debug::Message& message) {
+ i::Isolate* isolate = i::Isolate::Current();
if (message.IsEvent() && message.GetEvent() == v8::Break) {
// Count the number of breaks.
break_point_hit_count++;
} else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
v8::HandleScope scope;
- bool is_debug_break = i::StackGuard::IsDebugBreak();
+ bool is_debug_break = isolate->stack_guard()->IsDebugBreak();
// Force DebugBreak flag while serializer is working.
- i::StackGuard::DebugBreak();
+ isolate->stack_guard()->DebugBreak();
// Force serialization to trigger some internal JS execution.
v8::Handle<v8::String> json = message.GetJSON();
// Restore previous state.
if (is_debug_break) {
- i::StackGuard::DebugBreak();
+ isolate->stack_guard()->DebugBreak();
} else {
- i::StackGuard::Continue(i::DEBUGBREAK);
+ isolate->stack_guard()->Continue(i::DEBUGBREAK);
}
}
}
TEST(CallingContextIsNotDebugContext) {
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// Create and enter a debugee context.
v8::HandleScope scope;
DebugLocalContext env;
// Save handles to the debugger and debugee contexts to be used in
// NamedGetterWithCallingContextCheck.
debugee_context = v8::Local<v8::Context>(*env);
- debugger_context = v8::Utils::ToLocal(Debug::debug_context());
+ debugger_context = v8::Utils::ToLocal(debug->debug_context());
// Create object with 'a' property accessor.
v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now
// to avoid that.
- v8::internal::Heap::CollectGarbage(v8::internal::NEW_SPACE);
+ HEAP->CollectGarbage(v8::internal::NEW_SPACE);
HandleScope scope;
TryCatch catcher;
catcher.SetVerbose(true);
#include "v8.h"
#include "api.h"
+#include "cctest.h"
#include "compilation-cache.h"
#include "debug.h"
#include "deoptimizer.h"
+#include "isolate.h"
#include "platform.h"
#include "stub-cache.h"
-#include "cctest.h"
-
-using ::v8::internal::Handle;
-using ::v8::internal::Object;
-using ::v8::internal::JSFunction;
using ::v8::internal::Deoptimizer;
using ::v8::internal::EmbeddedVector;
+using ::v8::internal::Handle;
+using ::v8::internal::Isolate;
+using ::v8::internal::JSFunction;
using ::v8::internal::OS;
+using ::v8::internal::Object;
// Size of temp buffer for formatting small strings.
#define SMALL_STRING_BUFFER_SIZE 80
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
// Test lazy deoptimization of a simple function. Call the function after the
// deoptimization while it is still activated further down the stack.
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
// Test lazy deoptimization of a simple function with some arguments. Call the
// function after the deoptimization while it is still activated further down
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK(result->IsString());
v8::String::AsciiValue ascii(result);
CHECK_EQ("a+an X", *ascii);
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
"13a06000 movne r6, #0");
// mov -> movw.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
"13015234 movwne r5, #4660");
// We only disassemble one instruction so the eor instruction is not here.
TEST(Type3) {
SETUP();
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
COMPARE(ubfx(r0, r1, 5, 10),
"e7e902d1 ubfx r0, r1, #5, #10");
COMPARE(ubfx(r1, r0, 5, 10),
TEST(Vfp) {
SETUP();
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
COMPARE(vmov(d0, d1),
"eeb00b41 vmov.f64 d0, d1");
__ sub(Operand(eax), Immediate(12345678));
__ xor_(eax, 12345678);
__ and_(eax, 12345678);
- Handle<FixedArray> foo = Factory::NewFixedArray(10, TENURED);
+ Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
__ cmp(eax, foo);
// ---- This one caused crash
__ cmp(edx, 3);
__ cmp(edx, Operand(esp, 4));
__ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
- Handle<FixedArray> foo2 = Factory::NewFixedArray(10, TENURED);
+ Handle<FixedArray> foo2 = FACTORY->NewFixedArray(10, TENURED);
__ cmp(ebx, foo2);
__ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
__ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
__ xor_(edx, 3);
__ nop();
{
- CHECK(CpuFeatures::IsSupported(CPUID));
+ CHECK(Isolate::Current()->cpu_features()->IsSupported(CPUID));
CpuFeatures::Scope fscope(CPUID);
__ cpuid();
}
{
- CHECK(CpuFeatures::IsSupported(RDTSC));
+ CHECK(Isolate::Current()->cpu_features()->IsSupported(RDTSC));
CpuFeatures::Scope fscope(RDTSC);
__ rdtsc();
}
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
__ j(zero, &Ljcc, taken);
__ j(zero, &Ljcc, not_taken);
- // __ mov(Operand::StaticVariable(Top::handler_address()), eax);
+ // __ mov(Operand::StaticVariable(Isolate::handler_address()), eax);
// 0xD9 instructions
__ nop();
__ fwait();
__ nop();
{
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
// cmov.
{
- if (CpuFeatures::IsSupported(CMOV)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(overflow, eax, Operand(eax, 0));
__ cmov(no_overflow, eax, Operand(eax, 1));
// andpd, cmpltsd, movaps, psllq, psrlq, por.
{
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ andpd(xmm0, xmm1);
__ andpd(xmm1, xmm2);
}
{
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
__ pextrd(Operand(eax), xmm0, 1);
__ pinsrd(xmm1, Operand(eax), 0);
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
using ::v8::internal::Factory;
using ::v8::internal::Handle;
using ::v8::internal::Heap;
+using ::v8::internal::Isolate;
using ::v8::internal::JSFunction;
using ::v8::internal::Object;
using ::v8::internal::Runtime;
// Find the position of a given func source substring in the source.
Handle<String> func_pos_str =
- Factory::NewStringFromAscii(CStrVector(func_pos_src));
- int func_pos = Runtime::StringMatch(script_src, func_pos_str, 0);
+ FACTORY->NewStringFromAscii(CStrVector(func_pos_src));
+ int func_pos = Runtime::StringMatch(Isolate::Current(),
+ script_src,
+ func_pos_str,
+ 0);
CHECK_NE(0, func_pos);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Obtain SharedFunctionInfo for the function.
Object* shared_func_info_ptr =
- Runtime::FindSharedFunctionInfoInScript(i_script, func_pos);
- CHECK(shared_func_info_ptr != Heap::undefined_value());
+ Runtime::FindSharedFunctionInfoInScript(Isolate::Current(),
+ i_script,
+ func_pos);
+ CHECK(shared_func_info_ptr != HEAP->undefined_value());
Handle<SharedFunctionInfo> shared_func_info(
SharedFunctionInfo::cast(shared_func_info_ptr));
public:
ConstructorHeapProfileTestHelper()
: i::ConstructorHeapProfile(),
- f_name_(i::Factory::NewStringFromAscii(i::CStrVector("F"))),
+ f_name_(FACTORY->NewStringFromAscii(i::CStrVector("F"))),
f_count_(0) {
}
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
JSObjectsRetainerTree tree;
- JSObjectsCluster function(i::Heap::function_class_symbol());
- JSObjectsCluster a(*i::Factory::NewStringFromAscii(i::CStrVector("A")));
- JSObjectsCluster b(*i::Factory::NewStringFromAscii(i::CStrVector("B")));
+ JSObjectsCluster function(HEAP->function_class_symbol());
+ JSObjectsCluster a(*FACTORY->NewStringFromAscii(i::CStrVector("A")));
+ JSObjectsCluster b(*FACTORY->NewStringFromAscii(i::CStrVector("B")));
// o1 <- Function
JSObjectsCluster o1 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100, &function);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100, &function);
// o2 <- Function
JSObjectsCluster o2 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x200, &function);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x200, &function);
// o3 <- A, B
JSObjectsCluster o3 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &a, &b);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x300, &a, &b);
// o4 <- B, A
JSObjectsCluster o4 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x400, &b, &a);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x400, &b, &a);
// o5 <- A, B, Function
JSObjectsCluster o5 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x500,
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x500,
&a, &b, &function);
ClustersCoarser coarser;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
JSObjectsRetainerTree tree;
- JSObjectsCluster function(i::Heap::function_class_symbol());
+ JSObjectsCluster function(HEAP->function_class_symbol());
// o1 <- Function
JSObjectsCluster o1 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100, &function);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100, &function);
// a1 <- Function
JSObjectsCluster a1 =
- AddHeapObjectToTree(&tree, i::Heap::Array_symbol(), 0x1000, &function);
+ AddHeapObjectToTree(&tree, HEAP->Array_symbol(), 0x1000, &function);
// o2 <- Function
JSObjectsCluster o2 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x200, &function);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x200, &function);
// a2 <- Function
JSObjectsCluster a2 =
- AddHeapObjectToTree(&tree, i::Heap::Array_symbol(), 0x2000, &function);
+ AddHeapObjectToTree(&tree, HEAP->Array_symbol(), 0x2000, &function);
ClustersCoarser coarser;
coarser.Process(&tree);
// o21 ~ o22, and o11 ~ o12.
JSObjectsCluster o =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100);
JSObjectsCluster o11 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x110, &o);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x110, &o);
JSObjectsCluster o12 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x120, &o);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x120, &o);
JSObjectsCluster o21 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x210, &o11);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x210, &o11);
JSObjectsCluster o22 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x220, &o12);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x220, &o12);
JSObjectsCluster p =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &o21);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x300, &o21);
JSObjectsCluster q =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x310, &o21, &o22);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x310, &o21, &o22);
JSObjectsCluster r =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x320, &o22);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x320, &o22);
ClustersCoarser coarser;
coarser.Process(&tree);
// we expect that coarser will deduce equivalences: p ~ q ~ r, o1 ~ o2;
JSObjectsCluster o =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x100);
JSObjectsCluster o1 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x110, &o);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x110, &o);
JSObjectsCluster o2 =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x120, &o);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x120, &o);
JSObjectsCluster p =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &o1);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x300, &o1);
AddSelfReferenceToTree(&tree, &p);
JSObjectsCluster q =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x310, &o1, &o2);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x310, &o1, &o2);
AddSelfReferenceToTree(&tree, &q);
JSObjectsCluster r =
- AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x320, &o2);
+ AddHeapObjectToTree(&tree, HEAP->Object_symbol(), 0x320, &o2);
AddSelfReferenceToTree(&tree, &r);
ClustersCoarser coarser;
const v8::HeapSnapshot* snapshot1 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
- i::Heap::CollectAllGarbage(true); // Enforce compaction.
+ HEAP->CollectAllGarbage(true); // Enforce compaction.
const v8::HeapSnapshot* snapshot2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
static void CheckMap(Map* map, int type, int instance_size) {
CHECK(map->IsHeapObject());
#ifdef DEBUG
- CHECK(Heap::Contains(map));
+ CHECK(HEAP->Contains(map));
#endif
- CHECK_EQ(Heap::meta_map(), map->map());
+ CHECK_EQ(HEAP->meta_map(), map->map());
CHECK_EQ(type, map->instance_type());
CHECK_EQ(instance_size, map->instance_size());
}
TEST(HeapMaps) {
InitializeVM();
- CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
- CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
- CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- CheckMap(Heap::string_map(), STRING_TYPE, kVariableSizeSentinel);
+ CheckMap(HEAP->meta_map(), MAP_TYPE, Map::kSize);
+ CheckMap(HEAP->heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
+ CheckMap(HEAP->fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ CheckMap(HEAP->string_map(), STRING_TYPE, kVariableSizeSentinel);
}
static void CheckNumber(double value, const char* string) {
- Object* obj = Heap::NumberFromDouble(value)->ToObjectChecked();
+ Object* obj = HEAP->NumberFromDouble(value)->ToObjectChecked();
CHECK(obj->IsNumber());
bool exc;
Object* print_string = *Execution::ToString(Handle<Object>(obj), &exc);
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
+ Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(code->IsCode());
HeapObject* obj = HeapObject::cast(code);
Address obj_addr = obj->address();
for (int i = 0; i < obj->Size(); i += kPointerSize) {
- Object* found = Heap::FindCodeObject(obj_addr + i);
+ Object* found = HEAP->FindCodeObject(obj_addr + i);
CHECK_EQ(code, found);
}
- Object* copy = Heap::CreateCode(
+ Object* copy = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
CHECK(copy->IsCode());
HeapObject* obj_copy = HeapObject::cast(copy);
- Object* not_right = Heap::FindCodeObject(obj_copy->address() +
+ Object* not_right = HEAP->FindCodeObject(obj_copy->address() +
obj_copy->Size() / 2);
CHECK(not_right != code);
}
InitializeVM();
v8::HandleScope sc;
- Object* value = Heap::NumberFromDouble(1.000123)->ToObjectChecked();
+ Object* value = HEAP->NumberFromDouble(1.000123)->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(1.000123, value->Number());
- value = Heap::NumberFromDouble(1.0)->ToObjectChecked();
+ value = HEAP->NumberFromDouble(1.0)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(1.0, value->Number());
- value = Heap::NumberFromInt32(1024)->ToObjectChecked();
+ value = HEAP->NumberFromInt32(1024)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(1024.0, value->Number());
- value = Heap::NumberFromInt32(Smi::kMinValue)->ToObjectChecked();
+ value = HEAP->NumberFromInt32(Smi::kMinValue)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMinValue, Smi::cast(value)->value());
- value = Heap::NumberFromInt32(Smi::kMaxValue)->ToObjectChecked();
+ value = HEAP->NumberFromInt32(Smi::kMaxValue)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
#ifndef V8_TARGET_ARCH_X64
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
- value = Heap::NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
+ value = HEAP->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(static_cast<double>(Smi::kMinValue - 1), value->Number());
#endif
MaybeObject* maybe_value =
- Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
+ HEAP->NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
value = maybe_value->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
value->Number());
// nan oddball checks
- CHECK(Heap::nan_value()->IsNumber());
- CHECK(isnan(Heap::nan_value()->Number()));
+ CHECK(HEAP->nan_value()->IsNumber());
+ CHECK(isnan(HEAP->nan_value()->Number()));
- Handle<String> s = Factory::NewStringFromAscii(CStrVector("fisk hest "));
+ Handle<String> s = FACTORY->NewStringFromAscii(CStrVector("fisk hest "));
CHECK(s->IsString());
CHECK_EQ(10, s->length());
- String* object_symbol = String::cast(Heap::Object_symbol());
- CHECK(Top::context()->global()->HasLocalProperty(object_symbol));
+ String* object_symbol = String::cast(HEAP->Object_symbol());
+ CHECK(
+ Isolate::Current()->context()->global()->HasLocalProperty(object_symbol));
// Check ToString for oddballs
- CheckOddball(Heap::true_value(), "true");
- CheckOddball(Heap::false_value(), "false");
- CheckOddball(Heap::null_value(), "null");
- CheckOddball(Heap::undefined_value(), "undefined");
+ CheckOddball(HEAP->true_value(), "true");
+ CheckOddball(HEAP->false_value(), "false");
+ CheckOddball(HEAP->null_value(), "null");
+ CheckOddball(HEAP->undefined_value(), "undefined");
// Check ToString for Smis
CheckSmi(0, "0");
v8::HandleScope sc;
// Check GC.
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
- Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
- Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
- Handle<String> prop_namex = Factory::LookupAsciiSymbol("theSlotx");
- Handle<String> obj_name = Factory::LookupAsciiSymbol("theObject");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
+ Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+ Handle<String> prop_namex = FACTORY->LookupAsciiSymbol("theSlotx");
+ Handle<String> obj_name = FACTORY->LookupAsciiSymbol("theObject");
{
v8::HandleScope inner_scope;
// Allocate a function and keep it in global object's property.
Handle<JSFunction> function =
- Factory::NewFunction(name, Factory::undefined_value());
+ FACTORY->NewFunction(name, FACTORY->undefined_value());
Handle<Map> initial_map =
- Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Top::context()->global()->SetProperty(
+ Isolate::Current()->context()->global()->SetProperty(
*name, *function, NONE, kNonStrictMode)->ToObjectChecked();
// Allocate an object. Unrooted after leaving the scope.
- Handle<JSObject> obj = Factory::NewJSObject(function);
+ Handle<JSObject> obj = FACTORY->NewJSObject(function);
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
obj->SetProperty(
CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
}
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
// Function should be alive.
- CHECK(Top::context()->global()->HasLocalProperty(*name));
+ CHECK(Isolate::Current()->context()->global()->HasLocalProperty(*name));
// Check function is retained.
- Object* func_value =
- Top::context()->global()->GetProperty(*name)->ToObjectChecked();
+ Object* func_value = Isolate::Current()->context()->global()->
+ GetProperty(*name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
{
HandleScope inner_scope;
// Allocate another object, make it reachable from global.
- Handle<JSObject> obj = Factory::NewJSObject(function);
- Top::context()->global()->SetProperty(
+ Handle<JSObject> obj = FACTORY->NewJSObject(function);
+ Isolate::Current()->context()->global()->SetProperty(
*obj_name, *obj, NONE, kNonStrictMode)->ToObjectChecked();
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
}
// After gc, it should survive.
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
- CHECK(Top::context()->global()->HasLocalProperty(*obj_name));
- CHECK(Top::context()->global()->GetProperty(*obj_name)->ToObjectChecked()->
- IsJSObject());
- Object* obj =
- Top::context()->global()->GetProperty(*obj_name)->ToObjectChecked();
+ CHECK(Isolate::Current()->context()->global()->HasLocalProperty(*obj_name));
+ CHECK(Isolate::Current()->context()->global()->
+ GetProperty(*obj_name)->ToObjectChecked()->IsJSObject());
+ Object* obj = Isolate::Current()->context()->global()->
+ GetProperty(*obj_name)->ToObjectChecked();
JSObject* js_obj = JSObject::cast(obj);
CHECK_EQ(Smi::FromInt(23), js_obj->GetProperty(*prop_name));
}
static void VerifyStringAllocation(const char* string) {
v8::HandleScope scope;
- Handle<String> s = Factory::NewStringFromUtf8(CStrVector(string));
+ Handle<String> s = FACTORY->NewStringFromUtf8(CStrVector(string));
CHECK_EQ(StrLength(string), s->length());
for (int index = 0; index < s->length(); index++) {
CHECK_EQ(static_cast<uint16_t>(string[index]), s->Get(index));
v8::HandleScope scope;
const char* name = "Kasper the spunky";
- Handle<String> string = Factory::NewStringFromAscii(CStrVector(name));
+ Handle<String> string = FACTORY->NewStringFromAscii(CStrVector(name));
CHECK_EQ(StrLength(name), string->length());
}
TEST(GlobalHandles) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
InitializeVM();
Handle<Object> h1;
{
HandleScope scope;
- Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
- Handle<Object> u = Factory::NewNumber(1.12344);
+ Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = FACTORY->NewNumber(1.12344);
- h1 = GlobalHandles::Create(*i);
- h2 = GlobalHandles::Create(*u);
- h3 = GlobalHandles::Create(*i);
- h4 = GlobalHandles::Create(*u);
+ h1 = global_handles->Create(*i);
+ h2 = global_handles->Create(*u);
+ h3 = global_handles->Create(*i);
+ h4 = global_handles->Create(*u);
}
// after gc, it should survive
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
CHECK((*h4)->IsHeapNumber());
CHECK_EQ(*h3, *h1);
- GlobalHandles::Destroy(h1.location());
- GlobalHandles::Destroy(h3.location());
+ global_handles->Destroy(h1.location());
+ global_handles->Destroy(h3.location());
CHECK_EQ(*h4, *h2);
- GlobalHandles::Destroy(h2.location());
- GlobalHandles::Destroy(h4.location());
+ global_handles->Destroy(h2.location());
+ global_handles->Destroy(h4.location());
}
TEST(WeakGlobalHandlesScavenge) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
InitializeVM();
WeakPointerCleared = false;
{
HandleScope scope;
- Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
- Handle<Object> u = Factory::NewNumber(1.12344);
+ Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = FACTORY->NewNumber(1.12344);
- h1 = GlobalHandles::Create(*i);
- h2 = GlobalHandles::Create(*u);
+ h1 = global_handles->Create(*i);
+ h2 = global_handles->Create(*u);
}
- GlobalHandles::MakeWeak(h2.location(),
- reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback);
+ global_handles->MakeWeak(h2.location(),
+ reinterpret_cast<void*>(1234),
+ &TestWeakGlobalHandleCallback);
// Scavenge treats weak pointers as normal roots.
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
CHECK(!WeakPointerCleared);
- CHECK(!GlobalHandles::IsNearDeath(h2.location()));
- CHECK(!GlobalHandles::IsNearDeath(h1.location()));
+ CHECK(!global_handles->IsNearDeath(h2.location()));
+ CHECK(!global_handles->IsNearDeath(h1.location()));
- GlobalHandles::Destroy(h1.location());
- GlobalHandles::Destroy(h2.location());
+ global_handles->Destroy(h1.location());
+ global_handles->Destroy(h2.location());
}
TEST(WeakGlobalHandlesMark) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
InitializeVM();
WeakPointerCleared = false;
{
HandleScope scope;
- Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
- Handle<Object> u = Factory::NewNumber(1.12344);
+ Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = FACTORY->NewNumber(1.12344);
- h1 = GlobalHandles::Create(*i);
- h2 = GlobalHandles::Create(*u);
+ h1 = global_handles->Create(*i);
+ h2 = global_handles->Create(*u);
}
- Heap::CollectGarbage(OLD_POINTER_SPACE);
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
// Make sure the object is promoted.
- GlobalHandles::MakeWeak(h2.location(),
- reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback);
+ global_handles->MakeWeak(h2.location(),
+ reinterpret_cast<void*>(1234),
+ &TestWeakGlobalHandleCallback);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK((*h1)->IsString());
CHECK(WeakPointerCleared);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
- GlobalHandles::Destroy(h1.location());
+ global_handles->Destroy(h1.location());
}
TEST(DeleteWeakGlobalHandle) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
InitializeVM();
WeakPointerCleared = false;
{
HandleScope scope;
- Handle<Object> i = Factory::NewStringFromAscii(CStrVector("fisk"));
- h = GlobalHandles::Create(*i);
+ Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
+ h = global_handles->Create(*i);
}
- GlobalHandles::MakeWeak(h.location(),
- reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback);
+ global_handles->MakeWeak(h.location(),
+ reinterpret_cast<void*>(1234),
+ &TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK(!WeakPointerCleared);
// Mark-compact treats weak reference properly.
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK(WeakPointerCleared);
}
static void CheckSymbols(const char** strings) {
for (const char* string = *strings; *strings != 0; string = *strings++) {
Object* a;
- MaybeObject* maybe_a = Heap::LookupAsciiSymbol(string);
+ MaybeObject* maybe_a = HEAP->LookupAsciiSymbol(string);
// LookupAsciiSymbol may return a failure if a GC is needed.
if (!maybe_a->ToObject(&a)) continue;
CHECK(a->IsSymbol());
Object* b;
- MaybeObject *maybe_b = Heap::LookupAsciiSymbol(string);
+ MaybeObject* maybe_b = HEAP->LookupAsciiSymbol(string);
if (!maybe_b->ToObject(&b)) continue;
CHECK_EQ(b, a);
CHECK(String::cast(b)->IsEqualTo(CStrVector(string)));
InitializeVM();
v8::HandleScope sc;
- Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
Handle<JSFunction> function =
- Factory::NewFunction(name, Factory::undefined_value());
+ FACTORY->NewFunction(name, FACTORY->undefined_value());
Handle<Map> initial_map =
- Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
- Handle<JSObject> obj = Factory::NewJSObject(function);
+ Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+ Handle<JSObject> obj = FACTORY->NewJSObject(function);
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
InitializeVM();
v8::HandleScope sc;
- String* object_symbol = String::cast(Heap::Object_symbol());
- Object* raw_object =
- Top::context()->global()->GetProperty(object_symbol)->ToObjectChecked();
+ String* object_symbol = String::cast(HEAP->Object_symbol());
+ Object* raw_object = Isolate::Current()->context()->global()->
+ GetProperty(object_symbol)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
- Handle<JSObject> obj = Factory::NewJSObject(constructor);
- Handle<String> first = Factory::LookupAsciiSymbol("first");
- Handle<String> second = Factory::LookupAsciiSymbol("second");
+ Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
+ Handle<String> first = FACTORY->LookupAsciiSymbol("first");
+ Handle<String> second = FACTORY->LookupAsciiSymbol("second");
// check for empty
CHECK(!obj->HasLocalProperty(*first));
// check string and symbol match
static const char* string1 = "fisk";
- Handle<String> s1 = Factory::NewStringFromAscii(CStrVector(string1));
+ Handle<String> s1 = FACTORY->NewStringFromAscii(CStrVector(string1));
obj->SetProperty(
*s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- Handle<String> s1_symbol = Factory::LookupAsciiSymbol(string1);
+ Handle<String> s1_symbol = FACTORY->LookupAsciiSymbol(string1);
CHECK(obj->HasLocalProperty(*s1_symbol));
// check symbol and string match
static const char* string2 = "fugl";
- Handle<String> s2_symbol = Factory::LookupAsciiSymbol(string2);
+ Handle<String> s2_symbol = FACTORY->LookupAsciiSymbol(string2);
obj->SetProperty(
*s2_symbol, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- Handle<String> s2 = Factory::NewStringFromAscii(CStrVector(string2));
+ Handle<String> s2 = FACTORY->NewStringFromAscii(CStrVector(string2));
CHECK(obj->HasLocalProperty(*s2));
}
InitializeVM();
v8::HandleScope sc;
- Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
Handle<JSFunction> function =
- Factory::NewFunction(name, Factory::undefined_value());
+ FACTORY->NewFunction(name, FACTORY->undefined_value());
Handle<Map> initial_map =
- Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
- Handle<JSObject> obj = Factory::NewJSObject(function);
+ Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+ Handle<JSObject> obj = FACTORY->NewJSObject(function);
// Set a propery
obj->SetProperty(
InitializeVM();
v8::HandleScope sc;
- Handle<String> name = Factory::LookupAsciiSymbol("Array");
- Object* raw_object =
- Top::context()->global()->GetProperty(*name)->ToObjectChecked();
+ Handle<String> name = FACTORY->LookupAsciiSymbol("Array");
+ Object* raw_object = Isolate::Current()->context()->global()->
+ GetProperty(*name)->ToObjectChecked();
Handle<JSFunction> function = Handle<JSFunction>(
JSFunction::cast(raw_object));
// Allocate the object.
- Handle<JSObject> object = Factory::NewJSObject(function);
+ Handle<JSObject> object = FACTORY->NewJSObject(function);
Handle<JSArray> array = Handle<JSArray>::cast(object);
// We just initialized the VM, no heap allocation failure yet.
Object* ok = array->Initialize(0)->ToObjectChecked();
// Set array length with larger than smi value.
Handle<Object> length =
- Factory::NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
+ FACTORY->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
ok = array->SetElementsLength(*length)->ToObjectChecked();
uint32_t int_length = 0;
InitializeVM();
v8::HandleScope sc;
- String* object_symbol = String::cast(Heap::Object_symbol());
- Object* raw_object =
- Top::context()->global()->GetProperty(object_symbol)->ToObjectChecked();
+ String* object_symbol = String::cast(HEAP->Object_symbol());
+ Object* raw_object = Isolate::Current()->context()->global()->
+ GetProperty(object_symbol)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
- Handle<JSObject> obj = Factory::NewJSObject(constructor);
- Handle<String> first = Factory::LookupAsciiSymbol("first");
- Handle<String> second = Factory::LookupAsciiSymbol("second");
+ Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
+ Handle<String> first = FACTORY->LookupAsciiSymbol("first");
+ Handle<String> second = FACTORY->LookupAsciiSymbol("second");
obj->SetProperty(
*first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
non_ascii[3 * i + 2] = chars[2];
}
Handle<String> non_ascii_sym =
- Factory::LookupSymbol(Vector<const char>(non_ascii, 3 * length));
+ FACTORY->LookupSymbol(Vector<const char>(non_ascii, 3 * length));
CHECK_EQ(length, non_ascii_sym->length());
Handle<String> ascii_sym =
- Factory::LookupSymbol(Vector<const char>(ascii, length));
+ FACTORY->LookupSymbol(Vector<const char>(ascii, length));
CHECK_EQ(length, ascii_sym->length());
Handle<String> non_ascii_str =
- Factory::NewStringFromUtf8(Vector<const char>(non_ascii, 3 * length));
+ FACTORY->NewStringFromUtf8(Vector<const char>(non_ascii, 3 * length));
non_ascii_str->Hash();
CHECK_EQ(length, non_ascii_str->length());
Handle<String> ascii_str =
- Factory::NewStringFromUtf8(Vector<const char>(ascii, length));
+ FACTORY->NewStringFromUtf8(Vector<const char>(ascii, length));
ascii_str->Hash();
CHECK_EQ(length, ascii_str->length());
DeleteArray(non_ascii);
int next_objs_index = 0;
// Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE
- objs[next_objs_index++] = Factory::NewJSArray(10);
- objs[next_objs_index++] = Factory::NewJSArray(10, TENURED);
+ objs[next_objs_index++] = FACTORY->NewJSArray(10);
+ objs[next_objs_index++] = FACTORY->NewJSArray(10, TENURED);
// Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
objs[next_objs_index++] =
- Factory::NewStringFromAscii(CStrVector("abcdefghij"));
+ FACTORY->NewStringFromAscii(CStrVector("abcdefghij"));
objs[next_objs_index++] =
- Factory::NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
+ FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
- int large_size = Heap::MaxObjectSizeInPagedSpace() + 1;
+ int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
objs[next_objs_index++] =
- Factory::NewStringFromAscii(CStrVector(str), TENURED);
+ FACTORY->NewStringFromAscii(CStrVector(str), TENURED);
delete[] str;
// Add a Map object to look for.
TEST(LargeObjectSpaceContains) {
InitializeVM();
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
- Address current_top = Heap::new_space()->top();
+ Address current_top = HEAP->new_space()->top();
Page* page = Page::FromAddress(current_top);
Address current_page = page->address();
Address next_page = current_page + Page::kPageSize;
kPointerSize;
CHECK_EQ(bytes_to_allocate, FixedArray::SizeFor(n_elements));
FixedArray* array = FixedArray::cast(
- Heap::AllocateFixedArray(n_elements)->ToObjectChecked());
+ HEAP->AllocateFixedArray(n_elements)->ToObjectChecked());
int index = n_elements - 1;
CHECK_EQ(flags_ptr,
// CHECK(Page::FromAddress(next_page)->IsLargeObjectPage());
HeapObject* addr = HeapObject::FromAddress(next_page + 2 * kPointerSize);
- CHECK(Heap::new_space()->Contains(addr));
- CHECK(!Heap::lo_space()->Contains(addr));
+ CHECK(HEAP->new_space()->Contains(addr));
+ CHECK(!HEAP->lo_space()->Contains(addr));
}
// Increase the chance of 'bump-the-pointer' allocation in old space.
bool force_compaction = true;
- Heap::CollectAllGarbage(force_compaction);
+ HEAP->CollectAllGarbage(force_compaction);
v8::HandleScope scope;
// that region dirty marks are updated correctly.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
- Handle<JSFunction> object_ctor(Top::global_context()->object_function());
+ Handle<JSFunction> object_ctor(
+ Isolate::Current()->global_context()->object_function());
CHECK(object_ctor->has_initial_map());
Handle<Map> object_map(object_ctor->initial_map());
// Create a map with single inobject property.
- Handle<Map> my_map = Factory::CopyMap(object_map, 1);
+ Handle<Map> my_map = FACTORY->CopyMap(object_map, 1);
int n_properties = my_map->inobject_properties();
CHECK_GT(n_properties, 0);
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount = Min(FixedArray::kMaxSize,
- Heap::MaxObjectSizeInNewSpace());
+ HEAP->MaxObjectSizeInNewSpace());
int allocation_len = LenFromSize(allocation_amount);
- NewSpace* new_space = Heap::new_space();
+ NewSpace* new_space = HEAP->new_space();
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
- CHECK(!Heap::always_allocate());
- Object* array =
- Heap::AllocateFixedArray(allocation_len)->ToObjectChecked();
+ CHECK(!HEAP->always_allocate());
+ Object* array = HEAP->AllocateFixedArray(allocation_len)->ToObjectChecked();
+ CHECK(!array->IsFailure());
CHECK(new_space->Contains(array));
}
int fixed_array_len = LenFromSize(to_fill);
CHECK(fixed_array_len < FixedArray::kMaxLength);
- CHECK(!Heap::always_allocate());
- Object* array =
- Heap::AllocateFixedArray(fixed_array_len)->ToObjectChecked();
+ CHECK(!HEAP->always_allocate());
+ Object* array = HEAP->AllocateFixedArray(fixed_array_len)->ToObjectChecked();
+ CHECK(!array->IsFailure());
CHECK(new_space->Contains(array));
- Object* object = Heap::AllocateJSObjectFromMap(*my_map)->ToObjectChecked();
+ Object* object = HEAP->AllocateJSObjectFromMap(*my_map)->ToObjectChecked();
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
- Address old_pointer_space_top = Heap::old_pointer_space()->top();
+ Address old_pointer_space_top = HEAP->old_pointer_space()->top();
AlwaysAllocateScope aa_scope;
- Object* clone_obj = Heap::CopyJSObject(jsobject)->ToObjectChecked();
+ Object* clone_obj = HEAP->CopyJSObject(jsobject)->ToObjectChecked();
JSObject* clone = JSObject::cast(clone_obj);
if (clone->address() != old_pointer_space_top) {
// Alas, got allocated from free list, we cannot do checks.
return;
}
- CHECK(Heap::old_pointer_space()->Contains(clone->address()));
+ CHECK(HEAP->old_pointer_space()->Contains(clone->address()));
// Step 5: verify validity of region dirty marks.
Address clone_addr = clone->address();
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = Factory::LookupAsciiSymbol("foo");
+ Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope;
}
// Check function is compiled.
- Object* func_value =
- Top::context()->global()->GetProperty(*foo_name)->ToObjectChecked();
+ Object* func_value = Isolate::Current()->context()->global()->
+ GetProperty(*foo_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
CHECK(function->shared()->is_compiled());
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK(function->shared()->is_compiled());
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
// foo should no longer be in the compilation cache
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
// Count the number of global contexts in the weak list of global contexts.
static int CountGlobalContexts() {
int count = 0;
- Object* object = Heap::global_contexts_list();
+ Object* object = HEAP->global_contexts_list();
while (!object->IsUndefined()) {
count++;
object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
TEST(TestInternalWeakLists) {
+ v8::V8::Initialize();
+
static const int kNumTestContexts = 10;
v8::HandleScope scope;
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
}
// Mark compact handles the weak references.
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
}
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
}
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
}
// Force compilation cache cleanup.
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
// Dispose the global contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK_EQ(kNumTestContexts - i, CountGlobalContexts());
}
// Mark compact handles the weak references.
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
}
// causing a GC after the specified number of elements.
static int CountGlobalContextsWithGC(int n) {
int count = 0;
- Handle<Object> object(Heap::global_contexts_list());
+ Handle<Object> object(HEAP->global_contexts_list());
while (!object->IsUndefined()) {
count++;
- if (count == n) Heap::CollectAllGarbage(true);
+ if (count == n) HEAP->CollectAllGarbage(true);
object =
Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
}
while (object->IsJSFunction() &&
!Handle<JSFunction>::cast(object)->IsBuiltin()) {
count++;
- if (count == n) Heap::CollectAllGarbage(true);
+ if (count == n) HEAP->CollectAllGarbage(true);
object = Handle<Object>(
Object::cast(JSFunction::cast(*object)->next_function_link()));
}
TEST(TestInternalWeakListsTraverseWithGC) {
+ v8::V8::Initialize();
+
static const int kNumTestContexts = 10;
v8::HandleScope scope;
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
InitializeVM();
- intptr_t size_of_objects_1 = Heap::SizeOfObjects();
+ intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
intptr_t size_of_objects_2 = 0;
for (HeapObject* obj = iterator.next();
InitializeVM();
v8::HandleScope scope;
CompileRun("a = {}; b = {};");
- v8::Handle<Object> a(Top::context()->global()->GetProperty(
- *Factory::LookupAsciiSymbol("a"))->ToObjectChecked());
- v8::Handle<Object> b(Top::context()->global()->GetProperty(
- *Factory::LookupAsciiSymbol("b"))->ToObjectChecked());
+ v8::Handle<Object> a(ISOLATE->context()->global()->GetProperty(
+ *FACTORY->LookupAsciiSymbol("a"))->ToObjectChecked());
+ v8::Handle<Object> b(ISOLATE->context()->global()->GetProperty(
+ *FACTORY->LookupAsciiSymbol("b"))->ToObjectChecked());
CHECK_NE(*a, *b);
{
HeapIteratorTestHelper helper(*a, *b);
CHECK(helper.a_found());
CHECK(helper.b_found());
}
- CHECK(Top::context()->global()->DeleteProperty(
- *Factory::LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
+ CHECK(ISOLATE->context()->global()->DeleteProperty(
+ *FACTORY->LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
// We ensure that GC will not happen, so our raw pointer stays valid.
AssertNoAllocation no_alloc;
Object* a_saved = *a;
// --- T h e A c t u a l T e s t s
TEST(LiveEditDiffer) {
+ v8::internal::V8::Initialize(NULL);
CompareStrings("zz1zzz12zz123zzz", "zzzzzzzzzz", 6);
CompareStrings("zz1zzz12zz123zzz", "zz0zzz0zz0zzz", 9);
CompareStrings("123456789", "987654321", 16);
#include "api.h"
#include "codegen.h"
#include "log.h"
-#include "top.h"
+#include "isolate.h"
#include "cctest.h"
#include "disassembler.h"
#include "register-allocator-inl.h"
using v8::internal::byte;
using v8::internal::Address;
using v8::internal::Handle;
+using v8::internal::Isolate;
using v8::internal::JSFunction;
using v8::internal::StackTracer;
using v8::internal::TickSample;
-using v8::internal::Top;
namespace i = v8::internal;
// sp is only used to define stack high bound
trace_env.sample->sp =
reinterpret_cast<Address>(trace_env.sample) - 10240;
- StackTracer::Trace(trace_env.sample);
+ StackTracer::Trace(Isolate::Current(), trace_env.sample);
}
// Hide c_entry_fp to emulate situation when sampling is done while
// pure JS code is being executed
static void DoTraceHideCEntryFPAddress(Address fp) {
- v8::internal::Address saved_c_frame_fp = *(Top::c_entry_fp_address());
+ v8::internal::Address saved_c_frame_fp =
+ *(Isolate::Current()->c_entry_fp_address());
CHECK(saved_c_frame_fp);
- *(Top::c_entry_fp_address()) = 0;
+ *(Isolate::Current()->c_entry_fp_address()) = 0;
DoTrace(fp);
- *(Top::c_entry_fp_address()) = saved_c_frame_fp;
+ *(Isolate::Current()->c_entry_fp_address()) = saved_c_frame_fp;
}
static Address GetJsEntrySp() {
- CHECK_NE(NULL, Top::GetCurrentThread());
- return Top::js_entry_sp(Top::GetCurrentThread());
+ CHECK_NE(NULL, i::Isolate::Current()->thread_local_top());
+ return Isolate::js_entry_sp(i::Isolate::Current()->thread_local_top());
}
// This test verifies that stack tracing works when called during
// execution of a native function called from JS code. In this case,
-// StackTracer uses Top::c_entry_fp as a starting point for stack
+// StackTracer uses Isolate::c_entry_fp as a starting point for stack
// walking.
TEST(CFromJSStackTrace) {
TickSample sample;
// This test verifies that stack tracing works when called during
// execution of JS code. However, as calling StackTracer requires
// entering native code, we can only emulate pure JS by erasing
-// Top::c_entry_fp value. In this case, StackTracer uses passed frame
+// Isolate::c_entry_fp value. In this case, StackTracer uses passed frame
// pointer value as a starting point for stack walking.
TEST(PureJSStackTrace) {
// This test does not pass with inlining enabled since inlined functions
TEST(PureCStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
+ InitializeVM();
// Check that sampler doesn't crash
CHECK_EQ(10, CFunc(10));
}
// Log to memory buffer.
i::FLAG_logfile = "*";
i::FLAG_log = true;
- Logger::Setup();
+ LOGGER->Setup();
}
static void TearDown() {
- Logger::TearDown();
+ LOGGER->TearDown();
}
TEST(EmptyLog) {
SetUp();
- CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
- CHECK_EQ(0, Logger::GetLogLines(100, NULL, 0));
- CHECK_EQ(0, Logger::GetLogLines(0, NULL, 100));
- CHECK_EQ(0, Logger::GetLogLines(100, NULL, 100));
+ CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 0));
+ CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 0));
+ CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 100));
+ CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 100));
TearDown();
}
TEST(GetMessages) {
SetUp();
- Logger::StringEvent("aaa", "bbb");
- Logger::StringEvent("cccc", "dddd");
- CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
+ LOGGER->StringEvent("aaa", "bbb");
+ LOGGER->StringEvent("cccc", "dddd");
+ CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 0));
char log_lines[100];
memset(log_lines, 0, sizeof(log_lines));
// See Logger::StringEvent.
const char* line_1 = "aaa,\"bbb\"\n";
const int line_1_len = StrLength(line_1);
// The exact size.
- CHECK_EQ(line_1_len, Logger::GetLogLines(0, log_lines, line_1_len));
+ CHECK_EQ(line_1_len, LOGGER->GetLogLines(0, log_lines, line_1_len));
CHECK_EQ(line_1, log_lines);
memset(log_lines, 0, sizeof(log_lines));
// A bit more than the first line length.
- CHECK_EQ(line_1_len, Logger::GetLogLines(0, log_lines, line_1_len + 3));
+ CHECK_EQ(line_1_len, LOGGER->GetLogLines(0, log_lines, line_1_len + 3));
log_lines[line_1_len] = '\0';
CHECK_EQ(line_1, log_lines);
memset(log_lines, 0, sizeof(log_lines));
const char* line_2 = "cccc,\"dddd\"\n";
const int line_2_len = StrLength(line_2);
// Now start with line_2 beginning.
- CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 0));
- CHECK_EQ(line_2_len, Logger::GetLogLines(line_1_len, log_lines, line_2_len));
+ CHECK_EQ(0, LOGGER->GetLogLines(line_1_len, log_lines, 0));
+ CHECK_EQ(line_2_len, LOGGER->GetLogLines(line_1_len, log_lines, line_2_len));
CHECK_EQ(line_2, log_lines);
memset(log_lines, 0, sizeof(log_lines));
CHECK_EQ(line_2_len,
- Logger::GetLogLines(line_1_len, log_lines, line_2_len + 3));
+ LOGGER->GetLogLines(line_1_len, log_lines, line_2_len + 3));
CHECK_EQ(line_2, log_lines);
memset(log_lines, 0, sizeof(log_lines));
// Now get entire buffer contents.
const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
const int all_lines_len = StrLength(all_lines);
- CHECK_EQ(all_lines_len, Logger::GetLogLines(0, log_lines, all_lines_len));
+ CHECK_EQ(all_lines_len, LOGGER->GetLogLines(0, log_lines, all_lines_len));
CHECK_EQ(all_lines, log_lines);
memset(log_lines, 0, sizeof(log_lines));
- CHECK_EQ(all_lines_len, Logger::GetLogLines(0, log_lines, all_lines_len + 3));
+ CHECK_EQ(all_lines_len, LOGGER->GetLogLines(0, log_lines, all_lines_len + 3));
CHECK_EQ(all_lines, log_lines);
memset(log_lines, 0, sizeof(log_lines));
TearDown();
static int GetLogLines(int start_pos, i::Vector<char>* buffer) {
- return Logger::GetLogLines(start_pos, buffer->start(), buffer->length());
+ return LOGGER->GetLogLines(start_pos, buffer->start(), buffer->length());
}
TEST(BeyondWritePosition) {
SetUp();
- Logger::StringEvent("aaa", "bbb");
- Logger::StringEvent("cccc", "dddd");
+ LOGGER->StringEvent("aaa", "bbb");
+ LOGGER->StringEvent("cccc", "dddd");
// See Logger::StringEvent.
const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
const int all_lines_len = StrLength(all_lines);
EmbeddedVector<char, 100> buffer;
const int beyond_write_pos = all_lines_len;
- CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos, buffer.start(), 1));
+ CHECK_EQ(0, LOGGER->GetLogLines(beyond_write_pos, buffer.start(), 1));
CHECK_EQ(0, GetLogLines(beyond_write_pos, &buffer));
- CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos + 1, buffer.start(), 1));
+ CHECK_EQ(0, LOGGER->GetLogLines(beyond_write_pos + 1, buffer.start(), 1));
CHECK_EQ(0, GetLogLines(beyond_write_pos + 1, &buffer));
- CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos + 100, buffer.start(), 1));
+ CHECK_EQ(0, LOGGER->GetLogLines(beyond_write_pos + 100, buffer.start(), 1));
CHECK_EQ(0, GetLogLines(beyond_write_pos + 100, &buffer));
- CHECK_EQ(0, Logger::GetLogLines(10 * 1024 * 1024, buffer.start(), 1));
+ CHECK_EQ(0, LOGGER->GetLogLines(10 * 1024 * 1024, buffer.start(), 1));
CHECK_EQ(0, GetLogLines(10 * 1024 * 1024, &buffer));
TearDown();
}
// Log to stdout
i::FLAG_logfile = "-";
i::FLAG_log = true;
- Logger::Setup();
- CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
- CHECK_EQ(0, Logger::GetLogLines(100, NULL, 0));
- CHECK_EQ(0, Logger::GetLogLines(0, NULL, 100));
- CHECK_EQ(0, Logger::GetLogLines(100, NULL, 100));
- Logger::TearDown();
+ LOGGER->Setup();
+ CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 0));
+ CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 0));
+ CHECK_EQ(0, LOGGER->GetLogLines(0, NULL, 100));
+ CHECK_EQ(0, LOGGER->GetLogLines(100, NULL, 100));
+ LOGGER->TearDown();
}
class LoggerTestHelper : public AllStatic {
public:
- static bool IsSamplerActive() { return Logger::IsProfilerSamplerActive(); }
+ static bool IsSamplerActive() { return LOGGER->IsProfilerSamplerActive(); }
static void ResetSamplesTaken() {
- reinterpret_cast<Sampler*>(Logger::ticker_)->ResetSamplesTaken();
+ reinterpret_cast<Sampler*>(LOGGER->ticker_)->ResetSamplesTaken();
}
static bool has_samples_taken() {
- return reinterpret_cast<Sampler*>(Logger::ticker_)->samples_taken() > 0;
+ return reinterpret_cast<Sampler*>(LOGGER->ticker_)->samples_taken() > 0;
}
};
need_to_set_up_logger_(i::V8::IsRunning()),
scope_(),
env_(v8::Context::New()) {
- if (need_to_set_up_logger_) Logger::Setup();
+ if (need_to_set_up_logger_) LOGGER->Setup();
env_->Enter();
}
~ScopedLoggerInitializer() {
env_->Exit();
- Logger::TearDown();
+ LOGGER->TearDown();
i::FLAG_prof_lazy = saved_prof_lazy_;
i::FLAG_prof = saved_prof_;
i::FLAG_prof_auto = saved_prof_auto_;
!LoggerTestHelper::IsSamplerActive());
LoggerTestHelper::ResetSamplesTaken();
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
CHECK(LoggerTestHelper::IsSamplerActive());
// Verify that the current map of compiled functions has been logged.
i::OS::Sleep(1);
}
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
CHECK(i::RuntimeProfiler::IsEnabled() ||
!LoggerTestHelper::IsSamplerActive());
class LoopingThread : public v8::internal::Thread {
public:
- LoopingThread()
- : v8::internal::Thread(),
+ explicit LoopingThread(v8::internal::Isolate* isolate)
+ : v8::internal::Thread(isolate),
semaphore_(v8::internal::OS::CreateSemaphore(0)),
run_(true) {
}
class LoopingJsThread : public LoopingThread {
public:
+ explicit LoopingJsThread(v8::internal::Isolate* isolate)
+ : LoopingThread(isolate) { }
void RunLoop() {
v8::Locker locker;
- CHECK(v8::internal::ThreadManager::HasId());
+ CHECK(i::Isolate::Current() != NULL);
+ CHECK_GT(i::Isolate::Current()->thread_manager()->CurrentId(), 0);
SetV8ThreadId();
while (IsRunning()) {
v8::HandleScope scope;
class LoopingNonJsThread : public LoopingThread {
public:
+ explicit LoopingNonJsThread(v8::internal::Isolate* isolate)
+ : LoopingThread(isolate) { }
void RunLoop() {
v8::Locker locker;
v8::Unlocker unlocker;
// Now thread has V8's id, but will not run VM code.
- CHECK(v8::internal::ThreadManager::HasId());
+ CHECK(i::Isolate::Current() != NULL);
+ CHECK_GT(i::Isolate::Current()->thread_manager()->CurrentId(), 0);
double i = 10;
SignalRunning();
while (IsRunning()) {
class TestSampler : public v8::internal::Sampler {
public:
- TestSampler()
- : Sampler(0, true, true),
+ explicit TestSampler(v8::internal::Isolate* isolate)
+ : Sampler(isolate, 0, true, true),
semaphore_(v8::internal::OS::CreateSemaphore(0)),
was_sample_stack_called_(false) {
}
TestSampler* sampler = NULL;
{
v8::Locker locker;
- sampler = new TestSampler();
+ sampler = new TestSampler(v8::internal::Isolate::Current());
sampler->Start();
CHECK(sampler->IsActive());
}
- LoopingJsThread jsThread;
+ LoopingJsThread jsThread(v8::internal::Isolate::Current());
jsThread.Start();
- LoopingNonJsThread nonJsThread;
+ LoopingNonJsThread nonJsThread(v8::internal::Isolate::Current());
nonJsThread.Start();
CHECK(!sampler->WasSampleStackCalled());
i_source->set_resource(NULL);
// Must not crash.
- i::Logger::LogCompiledFunctions();
+ LOGGER->LogCompiledFunctions();
}
initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
CompileAndRunScript("Obj.prototype.method1.toString();");
- i::Logger::LogCompiledFunctions();
+ LOGGER->LogCompiledFunctions();
CHECK_GT(matcher.GetNextChunk(), 0);
const char* callback_rec = "code-creation,Callback,";
inst->SetAccessor(v8::String::New("prop1"), Prop1Getter, Prop1Setter);
inst->SetAccessor(v8::String::New("prop2"), Prop2Getter);
- i::Logger::LogAccessorCallbacks();
+ LOGGER->LogAccessorCallbacks();
CHECK_GT(matcher.GetNextChunk(), 0);
matcher.PrintBuffer();
const char* close_tag = "close-tag,";
// Check compatibility with the old style behavior.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
CHECK_EQ(NULL, matcher.Find(open_tag));
CHECK_EQ(NULL, matcher.Find(close_tag));
const char* close_tag1 = "close-tag,1\n";
// Check non-nested tag case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
CHECK_GT(matcher.GetNextChunk(), 0);
CHECK(matcher.IsInSequence(open_tag1, close_tag1));
const char* close_tag2 = "close-tag,2\n";
// Check nested tags case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
CHECK_GT(matcher.GetNextChunk(), 0);
// open_tag1 < open_tag2 < close_tag2 < close_tag1
CHECK(matcher.IsInSequence(open_tag1, open_tag2));
CHECK(matcher.IsInSequence(close_tag2, close_tag1));
// Check overlapped tags case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
CHECK_GT(matcher.GetNextChunk(), 0);
// open_tag1 < open_tag2 < close_tag1 < close_tag2
CHECK(matcher.IsInSequence(open_tag1, open_tag2));
const char* close_tag3 = "close-tag,3\n";
// Check pausing overflow case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 3);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 3);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 3);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 3);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
// Must be no tags, because logging must be disabled.
CHECK_EQ(NULL, matcher.Find(open_tag3));
CHECK_EQ(NULL, matcher.Find(close_tag3));
TEST(IsLoggingPreserved) {
ScopedLoggerInitializer initialize_logger(false);
- CHECK(Logger::is_logging());
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK(Logger::is_logging());
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK(Logger::is_logging());
+ CHECK(LOGGER->is_logging());
+ LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK(LOGGER->is_logging());
+ LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK(LOGGER->is_logging());
- CHECK(Logger::is_logging());
- Logger::ResumeProfiler(
+ CHECK(LOGGER->is_logging());
+ LOGGER->ResumeProfiler(
v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
- CHECK(Logger::is_logging());
- Logger::PauseProfiler(
+ CHECK(LOGGER->is_logging());
+ LOGGER->PauseProfiler(
v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
- CHECK(Logger::is_logging());
+ CHECK(LOGGER->is_logging());
- CHECK(Logger::is_logging());
- Logger::ResumeProfiler(
+ CHECK(LOGGER->is_logging());
+ LOGGER->ResumeProfiler(
v8::PROFILER_MODULE_CPU |
v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
- CHECK(Logger::is_logging());
- Logger::PauseProfiler(
+ CHECK(LOGGER->is_logging());
+ LOGGER->PauseProfiler(
v8::PROFILER_MODULE_CPU |
v8::PROFILER_MODULE_HEAP_STATS | v8::PROFILER_MODULE_JS_CONSTRUCTORS, 1);
- CHECK(Logger::is_logging());
+ CHECK(LOGGER->is_logging());
}
" obj.test =\n"
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
EmbeddedVector<char, 204800> buffer;
int log_size;
}
// Iterate heap to find compiled functions, will write to log.
- i::Logger::LogCompiledFunctions();
+ LOGGER->LogCompiledFunctions();
char* new_log_start = buffer.start() + log_size;
- const int new_log_size = Logger::GetLogLines(
+ const int new_log_size = LOGGER->GetLogLines(
log_size, new_log_start, buffer.length() - log_size);
CHECK_GT(new_log_size, 0);
CHECK_GT(buffer.length(), log_size + new_log_size);
CHECK(results_equal);
env->Exit();
- Logger::TearDown();
+ LOGGER->TearDown();
i::FLAG_always_compact = saved_always_compact;
}
// Test that we can move a Smi value literally into a register.
TEST(SmiMove) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
// Test that we can compare smis for equality (and more).
TEST(SmiCompare) {
- v8::V8::Initialize();
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(Integer32ToSmi) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
TEST(Integer64PlusConstantToSmi) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
TEST(SmiCheck) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
TEST(SmiNeg) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
}
TEST(SmiAdd) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
TEST(SmiSub) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiMul) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
TEST(SmiDiv) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiMod) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
}
TEST(SmiIndex) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiSelectNonSmi) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiAnd) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiOr) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiXor) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiNot) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiShiftLeft) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiShiftLogicalRight) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(SmiShiftArithmeticRight) {
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
- v8::V8::Initialize();
+ v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
TEST(OperandOffset) {
+ v8::internal::V8::Initialize(NULL);
int data[256];
for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
#include "global-handles.h"
#include "snapshot.h"
-#include "top.h"
#include "cctest.h"
using namespace v8::internal;
// from new space.
FLAG_gc_global = true;
FLAG_always_compact = true;
- Heap::ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+ HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
InitializeVM();
// Allocate a fixed array in the new space.
int array_size =
- (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
+ (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
(kPointerSize * 4);
- Object* obj = Heap::AllocateFixedArray(array_size)->ToObjectChecked();
+ Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
// Array should be in the new space.
- CHECK(Heap::InSpace(*array, NEW_SPACE));
+ CHECK(HEAP->InSpace(*array, NEW_SPACE));
// Call the m-c collector, so array becomes an old object.
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
// Array now sits in the old space
- CHECK(Heap::InSpace(*array, OLD_POINTER_SPACE));
+ CHECK(HEAP->InSpace(*array, OLD_POINTER_SPACE));
}
TEST(NoPromotion) {
- Heap::ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+ HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
// Test the situation that some objects in new space are promoted to
// the old space
v8::HandleScope sc;
// Do a mark compact GC to shrink the heap.
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
// Allocate a big Fixed array in the new space.
- int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
+ int size = (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
kPointerSize;
- Object* obj = Heap::AllocateFixedArray(size)->ToObjectChecked();
+ Object* obj = HEAP->AllocateFixedArray(size)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
// Array still stays in the new space.
- CHECK(Heap::InSpace(*array, NEW_SPACE));
+ CHECK(HEAP->InSpace(*array, NEW_SPACE));
// Allocate objects in the old space until out of memory.
FixedArray* host = *array;
while (true) {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(100, TENURED);
+ { MaybeObject* maybe_obj = HEAP->AllocateFixedArray(100, TENURED);
if (!maybe_obj->ToObject(&obj)) break;
}
}
// Call mark compact GC, and it should pass.
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
// array should not be promoted because the old space is full.
- CHECK(Heap::InSpace(*array, NEW_SPACE));
+ CHECK(HEAP->InSpace(*array, NEW_SPACE));
}
v8::HandleScope sc;
// call mark-compact when heap is empty
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
// keep allocating garbage in new space until it fails
const int ARRAY_SIZE = 100;
Object* array;
MaybeObject* maybe_array;
do {
- maybe_array = Heap::AllocateFixedArray(ARRAY_SIZE);
+ maybe_array = HEAP->AllocateFixedArray(ARRAY_SIZE);
} while (maybe_array->ToObject(&array));
- Heap::CollectGarbage(NEW_SPACE);
+ HEAP->CollectGarbage(NEW_SPACE);
- array = Heap::AllocateFixedArray(ARRAY_SIZE)->ToObjectChecked();
+ array = HEAP->AllocateFixedArray(ARRAY_SIZE)->ToObjectChecked();
// keep allocating maps until it fails
Object* mapp;
MaybeObject* maybe_mapp;
do {
- maybe_mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ maybe_mapp = HEAP->AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
} while (maybe_mapp->ToObject(&mapp));
- Heap::CollectGarbage(MAP_SPACE);
- mapp = Heap::AllocateMap(JS_OBJECT_TYPE,
+ HEAP->CollectGarbage(MAP_SPACE);
+ mapp = HEAP->AllocateMap(JS_OBJECT_TYPE,
JSObject::kHeaderSize)->ToObjectChecked();
// allocate a garbage
String* func_name =
- String::cast(Heap::LookupAsciiSymbol("theFunction")->ToObjectChecked());
+ String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
SharedFunctionInfo* function_share = SharedFunctionInfo::cast(
- Heap::AllocateSharedFunctionInfo(func_name)->ToObjectChecked());
+ HEAP->AllocateSharedFunctionInfo(func_name)->ToObjectChecked());
JSFunction* function = JSFunction::cast(
- Heap::AllocateFunction(*Top::function_map(),
+ HEAP->AllocateFunction(*Isolate::Current()->function_map(),
function_share,
- Heap::undefined_value())->ToObjectChecked());
+ HEAP->undefined_value())->ToObjectChecked());
Map* initial_map =
- Map::cast(Heap::AllocateMap(JS_OBJECT_TYPE,
+ Map::cast(HEAP->AllocateMap(JS_OBJECT_TYPE,
JSObject::kHeaderSize)->ToObjectChecked());
function->set_initial_map(initial_map);
- Top::context()->global()->SetProperty(func_name,
- function,
- NONE,
- kNonStrictMode)->ToObjectChecked();
+ Isolate::Current()->context()->global()->SetProperty(
+ func_name, function, NONE, kNonStrictMode)->ToObjectChecked();
- JSObject* obj =
- JSObject::cast(Heap::AllocateJSObject(function)->ToObjectChecked());
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ JSObject* obj = JSObject::cast(
+ HEAP->AllocateJSObject(function)->ToObjectChecked());
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
func_name =
- String::cast(Heap::LookupAsciiSymbol("theFunction")->ToObjectChecked());
- CHECK(Top::context()->global()->HasLocalProperty(func_name));
- Object* func_value =
- Top::context()->global()->GetProperty(func_name)->ToObjectChecked();
+ String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
+ CHECK(Isolate::Current()->context()->global()->HasLocalProperty(func_name));
+ Object* func_value = Isolate::Current()->context()->global()->
+ GetProperty(func_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
function = JSFunction::cast(func_value);
- obj = JSObject::cast(Heap::AllocateJSObject(function)->ToObjectChecked());
+ obj = JSObject::cast(HEAP->AllocateJSObject(function)->ToObjectChecked());
String* obj_name =
- String::cast(Heap::LookupAsciiSymbol("theObject")->ToObjectChecked());
- Top::context()->global()->SetProperty(obj_name,
- obj,
- NONE,
- kNonStrictMode)->ToObjectChecked();
+ String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
+ Isolate::Current()->context()->global()->SetProperty(
+ obj_name, obj, NONE, kNonStrictMode)->ToObjectChecked();
String* prop_name =
- String::cast(Heap::LookupAsciiSymbol("theSlot")->ToObjectChecked());
+ String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
obj->SetProperty(prop_name,
Smi::FromInt(23),
NONE,
kNonStrictMode)->ToObjectChecked();
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
obj_name =
- String::cast(Heap::LookupAsciiSymbol("theObject")->ToObjectChecked());
- CHECK(Top::context()->global()->HasLocalProperty(obj_name));
- CHECK(Top::context()->global()->
- GetProperty(obj_name)->ToObjectChecked()->IsJSObject());
- obj = JSObject::cast(
- Top::context()->global()->GetProperty(obj_name)->ToObjectChecked());
+ String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
+ CHECK(Isolate::Current()->context()->global()->HasLocalProperty(obj_name));
+ CHECK(Isolate::Current()->context()->global()->
+ GetProperty(obj_name)->ToObjectChecked()->IsJSObject());
+ obj = JSObject::cast(Isolate::Current()->context()->global()->
+ GetProperty(obj_name)->ToObjectChecked());
prop_name =
- String::cast(Heap::LookupAsciiSymbol("theSlot")->ToObjectChecked());
- CHECK(obj->GetProperty(prop_name)->ToObjectChecked() == Smi::FromInt(23));
+ String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
+ CHECK(obj->GetProperty(prop_name) == Smi::FromInt(23));
}
static Handle<Map> CreateMap() {
- return Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
}
v8::HandleScope sc;
// keep allocating maps while pointers are still encodable and thus
// mark compact is permitted.
- Handle<JSObject> root = Factory::NewJSObjectFromMap(CreateMap());
+ Handle<JSObject> root = FACTORY->NewJSObjectFromMap(CreateMap());
do {
Handle<Map> map = CreateMap();
map->set_prototype(*root);
- root = Factory::NewJSObjectFromMap(map);
- } while (Heap::map_space()->MapPointersEncodable());
+ root = FACTORY->NewJSObjectFromMap(map);
+ } while (HEAP->map_space()->MapPointersEncodable());
}
// Now, as we don't have any handles to just allocated maps, we should
// be able to trigger map compaction.
// To give an additional chance to fail, try to force compaction which
// should be impossible right now.
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
// And now map pointers should be encodable again.
- CHECK(Heap::map_space()->MapPointersEncodable());
+ CHECK(HEAP->map_space()->MapPointersEncodable());
}
TEST(GCCallback) {
InitializeVM();
- Heap::SetGlobalGCPrologueCallback(&GCPrologueCallbackFunc);
- Heap::SetGlobalGCEpilogueCallback(&GCEpilogueCallbackFunc);
+ HEAP->SetGlobalGCPrologueCallback(&GCPrologueCallbackFunc);
+ HEAP->SetGlobalGCEpilogueCallback(&GCEpilogueCallbackFunc);
// Scavenge does not call GC callback functions.
- Heap::PerformScavenge();
+ HEAP->PerformScavenge();
CHECK_EQ(0, gc_starts);
CHECK_EQ(gc_ends, gc_starts);
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(1, gc_starts);
CHECK_EQ(gc_ends, gc_starts);
}
}
TEST(ObjectGroups) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
InitializeVM();
NumberOfWeakCalls = 0;
v8::HandleScope handle_scope;
Handle<Object> g1s1 =
- GlobalHandles::Create(Heap::AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1s2 =
- GlobalHandles::Create(Heap::AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1c1 =
- GlobalHandles::Create(Heap::AllocateFixedArray(1)->ToObjectChecked());
- GlobalHandles::MakeWeak(g1s1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- GlobalHandles::MakeWeak(g1s2.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- GlobalHandles::MakeWeak(g1c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->MakeWeak(g1s1.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g1s2.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g1c1.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
Handle<Object> g2s1 =
- GlobalHandles::Create(Heap::AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s2 =
- GlobalHandles::Create(Heap::AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2c1 =
- GlobalHandles::Create(Heap::AllocateFixedArray(1)->ToObjectChecked());
- GlobalHandles::MakeWeak(g2s1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- GlobalHandles::MakeWeak(g2s2.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- GlobalHandles::MakeWeak(g2c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
-
- Handle<Object> root = GlobalHandles::Create(*g1s1); // make a root.
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->MakeWeak(g2s1.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g2s2.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g2c1.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+
+ Handle<Object> root = global_handles->Create(*g1s1); // make a root.
// Connect group 1 and 2, make a cycle.
Handle<FixedArray>::cast(g1s2)->set(0, *g2s2);
Object** g1_children[] = { g1c1.location() };
Object** g2_objects[] = { g2s1.location(), g2s2.location() };
Object** g2_children[] = { g2c1.location() };
- GlobalHandles::AddObjectGroup(g1_objects, 2, NULL);
- GlobalHandles::AddImplicitReferences(HeapObject::cast(*g1s1),
- g1_children, 1);
- GlobalHandles::AddObjectGroup(g2_objects, 2, NULL);
- GlobalHandles::AddImplicitReferences(HeapObject::cast(*g2s2),
- g2_children, 1);
+ global_handles->AddObjectGroup(g1_objects, 2, NULL);
+ global_handles->AddImplicitReferences(HeapObject::cast(*g1s1),
+ g1_children, 1);
+ global_handles->AddObjectGroup(g2_objects, 2, NULL);
+ global_handles->AddImplicitReferences(HeapObject::cast(*g2s2),
+ g2_children, 1);
}
// Do a full GC
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
// All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls);
// Weaken the root.
- GlobalHandles::MakeWeak(root.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ global_handles->MakeWeak(root.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
- GlobalHandles::ClearWeakness(g1c1.location());
- GlobalHandles::ClearWeakness(g2c1.location());
+ global_handles->ClearWeakness(g1c1.location());
+ global_handles->ClearWeakness(g2c1.location());
// Groups are deleted, rebuild groups.
{
Object** g1_children[] = { g1c1.location() };
Object** g2_objects[] = { g2s1.location(), g2s2.location() };
Object** g2_children[] = { g2c1.location() };
- GlobalHandles::AddObjectGroup(g1_objects, 2, NULL);
- GlobalHandles::AddImplicitReferences(HeapObject::cast(*g1s1),
+ global_handles->AddObjectGroup(g1_objects, 2, NULL);
+ global_handles->AddImplicitReferences(HeapObject::cast(*g1s1),
g1_children, 1);
- GlobalHandles::AddObjectGroup(g2_objects, 2, NULL);
- GlobalHandles::AddImplicitReferences(HeapObject::cast(*g2s2),
+ global_handles->AddObjectGroup(g2_objects, 2, NULL);
+ global_handles->AddImplicitReferences(HeapObject::cast(*g2s2),
g2_children, 1);
}
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, NumberOfWeakCalls);
// And now make children weak again and collect them.
- GlobalHandles::MakeWeak(g1c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- GlobalHandles::MakeWeak(g2c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
-
- Heap::CollectGarbage(OLD_POINTER_SPACE);
+ global_handles->MakeWeak(g1c1.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g2c1.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
}
#include "v8.h"
+#include "isolate.h"
#include "token.h"
#include "scanner.h"
#include "parser.h"
// Parser/Scanner needs a stack limit.
int marker;
- i::StackGuard::SetStackLimit(
+ i::Isolate::Current()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
for (int i = 0; tests[i]; i++) {
v8::Persistent<v8::Context> context = v8::Context::New();
v8::Context::Scope context_scope(context);
int marker;
- i::StackGuard::SetStackLimit(
+ i::Isolate::Current()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
// Source containing functions that might be lazily compiled and all types
TEST(StandAlonePreParser) {
int marker;
- i::StackGuard::SetStackLimit(
+ i::Isolate::Current()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
const char* programs[] = {
NULL
};
- uintptr_t stack_limit = i::StackGuard::real_climit();
+ uintptr_t stack_limit = ISOLATE->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const i::byte*>(program),
static_cast<unsigned>(strlen(program)));
i::CompleteParserRecorder log;
- i::V8JavaScriptScanner scanner;
+ i::V8JavaScriptScanner scanner(ISOLATE);
scanner.Initialize(&stream);
v8::preparser::PreParser::PreParseResult result =
TEST(RegressChromium62639) {
int marker;
- i::StackGuard::SetStackLimit(
+ ISOLATE->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
const char* program = "var x = 'something';\n"
// the block could be lazily compiled, and an extra, unexpected,
// entry was added to the data.
int marker;
- i::StackGuard::SetStackLimit(
+ ISOLATE->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
const char* program =
TEST(PreParseOverflow) {
int marker;
- i::StackGuard::SetStackLimit(
+ ISOLATE->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
size_t kProgramSize = 1024 * 1024;
memset(*program, '(', kProgramSize);
program[kProgramSize] = '\0';
- uintptr_t stack_limit = i::StackGuard::real_climit();
+ uintptr_t stack_limit = ISOLATE->stack_guard()->real_climit();
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const i::byte*>(*program),
static_cast<unsigned>(kProgramSize));
i::CompleteParserRecorder log;
- i::V8JavaScriptScanner scanner;
+ i::V8JavaScriptScanner scanner(ISOLATE);
scanner.Initialize(&stream);
}
i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
i::Handle<i::String> ascii_string(
- i::Factory::NewStringFromAscii(ascii_vector));
+ FACTORY->NewStringFromAscii(ascii_vector));
TestExternalResource resource(*uc16_buffer, length);
i::Handle<i::String> uc16_string(
- i::Factory::NewExternalStringFromTwoByte(&resource));
+ FACTORY->NewExternalStringFromTwoByte(&resource));
i::ExternalTwoByteStringUC16CharacterStream uc16_stream(
i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::V8JavaScriptScanner scanner;
+ i::V8JavaScriptScanner scanner(ISOLATE);
scanner.Initialize(stream);
int i = 0;
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const i::byte*>(re_source),
static_cast<unsigned>(strlen(re_source)));
- i::V8JavaScriptScanner scanner;
+ i::V8JavaScriptScanner scanner(ISOLATE);
scanner.Initialize(&stream);
i::Token::Value start = scanner.peek();
TEST(VirtualMemory) {
+ OS::Setup();
VirtualMemory* vm = new VirtualMemory(1 * MB);
CHECK(vm->IsReserved());
void* block_addr = vm->address();
TEST(VirtualMemory) {
+ OS::Setup();
VirtualMemory* vm = new VirtualMemory(1 * MB);
CHECK(vm->IsReserved());
void* block_addr = vm->address();
CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
}
CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
- i::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
V8::Initialize(NULL);
v8::HandleScope scope;
ZoneScope zone_scope(DELETE_ON_EXIT);
- FlatStringReader reader(CStrVector(input));
+ FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
return v8::internal::RegExpParser::ParseRegExp(&reader, false, &result);
}
V8::Initialize(NULL);
v8::HandleScope scope;
ZoneScope zone_scope(DELETE_ON_EXIT);
- FlatStringReader reader(CStrVector(input));
+ FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
CHECK(result.tree != NULL);
v8::HandleScope scope;
unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
ZoneScope zone_scope(DELETE_ON_EXIT);
- FlatStringReader reader(CStrVector(input));
+ FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
CHECK(result.tree != NULL);
v8::HandleScope scope;
unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
ZoneScope zone_scope(DELETE_ON_EXIT);
- FlatStringReader reader(CStrVector(input));
+ FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
CHECK(result.tree != NULL);
V8::Initialize(NULL);
v8::HandleScope scope;
ZoneScope zone_scope(DELETE_ON_EXIT);
- FlatStringReader reader(CStrVector(input));
+ FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
CHECK(!v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
CHECK(result.tree == NULL);
TEST(CharacterClassEscapes) {
+ v8::internal::V8::Initialize(NULL);
TestCharacterClassEscapes('.', IsRegExpNewline);
TestCharacterClassEscapes('d', IsDigit);
TestCharacterClassEscapes('D', NotDigit);
static RegExpNode* Compile(const char* input, bool multiline, bool is_ascii) {
V8::Initialize(NULL);
- FlatStringReader reader(CStrVector(input));
+ FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData compile_data;
if (!v8::internal::RegExpParser::ParseRegExp(&reader, multiline,
&compile_data))
return NULL;
- Handle<String> pattern = Factory::NewStringFromUtf8(CStrVector(input));
+ Handle<String> pattern = FACTORY->NewStringFromUtf8(CStrVector(input));
RegExpEngine::Compile(&compile_data, false, multiline, pattern, is_ascii);
return compile_data.node;
}
TEST(SplayTreeSimple) {
+ v8::internal::V8::Initialize(NULL);
static const unsigned kLimit = 1000;
ZoneScope zone_scope(DELETE_ON_EXIT);
ZoneSplayTree<TestConfig> tree;
TEST(DispatchTableConstruction) {
+ v8::internal::V8::Initialize(NULL);
// Initialize test data.
static const int kLimit = 1000;
static const int kRangeCount = 8;
class ContextInitializer {
public:
ContextInitializer()
- : env_(), scope_(), zone_(DELETE_ON_EXIT), stack_guard_() {
+ : env_(), scope_(), zone_(DELETE_ON_EXIT) {
env_ = v8::Context::New();
env_->Enter();
}
v8::Persistent<v8::Context> env_;
v8::HandleScope scope_;
v8::internal::ZoneScope zone_;
- v8::internal::StackGuard stack_guard_;
};
start_offset,
input_start,
input_end,
- captures);
+ captures,
+ Isolate::Current());
}
m.Succeed();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector(""));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector(""));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
int captures[4] = {42, 37, 87, 117};
- Handle<String> input = Factory::NewStringFromAscii(CStrVector("foofoo"));
+ Handle<String> input = FACTORY->NewStringFromAscii(CStrVector("foofoo"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
const byte* start_adr =
reinterpret_cast<const byte*>(seq_input->GetCharsAddress());
m.Bind(&fail);
m.Fail();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("^foo"));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("^foo"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
int captures[4] = {42, 37, 87, 117};
- Handle<String> input = Factory::NewStringFromAscii(CStrVector("foofoo"));
+ Handle<String> input = FACTORY->NewStringFromAscii(CStrVector("foofoo"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
CHECK_EQ(-1, captures[2]);
CHECK_EQ(-1, captures[3]);
- input = Factory::NewStringFromAscii(CStrVector("barbarbar"));
+ input = FACTORY->NewStringFromAscii(CStrVector("barbarbar"));
seq_input = Handle<SeqAsciiString>::cast(input);
start_adr = seq_input->GetCharsAddress();
m.Bind(&fail);
m.Fail();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("^foo"));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("^foo"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
int captures[4] = {42, 37, 87, 117};
const uc16 input_data[6] = {'f', 'o', 'o', 'f', 'o', '\xa0'};
Handle<String> input =
- Factory::NewStringFromTwoByte(Vector<const uc16>(input_data, 6));
+ FACTORY->NewStringFromTwoByte(Vector<const uc16>(input_data, 6));
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
CHECK_EQ(-1, captures[3]);
const uc16 input_data2[9] = {'b', 'a', 'r', 'b', 'a', 'r', 'b', 'a', '\xa0'};
- input = Factory::NewStringFromTwoByte(Vector<const uc16>(input_data2, 9));
+ input = FACTORY->NewStringFromTwoByte(Vector<const uc16>(input_data2, 9));
seq_input = Handle<SeqTwoByteString>::cast(input);
start_adr = seq_input->GetCharsAddress();
m.Bind(&backtrack);
m.Fail();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector(".........."));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector(".........."));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input = Factory::NewStringFromAscii(CStrVector("foofoo"));
+ Handle<String> input = FACTORY->NewStringFromAscii(CStrVector("foofoo"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
m.Bind(&missing_match);
m.Fail();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("^(..)..\1"));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("^(..)..\1"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input = Factory::NewStringFromAscii(CStrVector("fooofo"));
+ Handle<String> input = FACTORY->NewStringFromAscii(CStrVector("fooofo"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
m.Bind(&missing_match);
m.Fail();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("^(..)..\1"));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("^(..)..\1"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
const uc16 input_data[6] = {'f', 0x2028, 'o', 'o', 'f', 0x2028};
Handle<String> input =
- Factory::NewStringFromTwoByte(Vector<const uc16>(input_data, 6));
+ FACTORY->NewStringFromTwoByte(Vector<const uc16>(input_data, 6));
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
m.CheckNotCharacter('b', &fail);
m.Succeed();
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("(^f|ob)"));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("(^f|ob)"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input = Factory::NewStringFromAscii(CStrVector("foobar"));
+ Handle<String> input = FACTORY->NewStringFromAscii(CStrVector("foobar"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
m.Succeed();
Handle<String> source =
- Factory::NewStringFromAscii(CStrVector("^(abc)\1\1(?!\1)...(?!\1)"));
+ FACTORY->NewStringFromAscii(CStrVector("^(abc)\1\1(?!\1)...(?!\1)"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input =
- Factory::NewStringFromAscii(CStrVector("aBcAbCABCxYzab"));
+ FACTORY->NewStringFromAscii(CStrVector("aBcAbCABCxYzab"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
m.Fail();
Handle<String> source =
- Factory::NewStringFromAscii(CStrVector("<loop test>"));
+ FACTORY->NewStringFromAscii(CStrVector("<loop test>"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
// String long enough for test (content doesn't matter).
Handle<String> input =
- Factory::NewStringFromAscii(CStrVector("foofoofoofoofoo"));
+ FACTORY->NewStringFromAscii(CStrVector("foofoofoofoofoo"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
m.GoTo(&loop);
Handle<String> source =
- Factory::NewStringFromAscii(CStrVector("<stack overflow test>"));
+ FACTORY->NewStringFromAscii(CStrVector("<stack overflow test>"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
// String long enough for test (content doesn't matter).
Handle<String> input =
- Factory::NewStringFromAscii(CStrVector("dummy"));
+ FACTORY->NewStringFromAscii(CStrVector("dummy"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NULL);
CHECK_EQ(NativeRegExpMacroAssembler::EXCEPTION, result);
- CHECK(Top::has_pending_exception());
- Top::clear_pending_exception();
+ CHECK(Isolate::Current()->has_pending_exception());
+ Isolate::Current()->clear_pending_exception();
}
m.Succeed();
Handle<String> source =
- Factory::NewStringFromAscii(CStrVector("<huge register space test>"));
+ FACTORY->NewStringFromAscii(CStrVector("<huge register space test>"));
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
// String long enough for test (content doesn't matter).
Handle<String> input =
- Factory::NewStringFromAscii(CStrVector("sample text"));
+ FACTORY->NewStringFromAscii(CStrVector("sample text"));
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
CHECK_EQ(0, captures[0]);
CHECK_EQ(42, captures[1]);
- Top::clear_pending_exception();
+ Isolate::Current()->clear_pending_exception();
}
#else // V8_INTERPRETED_REGEXP
v8::HandleScope scope;
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("^f(o)o"));
+ Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("^f(o)o"));
Handle<ByteArray> array = Handle<ByteArray>::cast(m.GetCode(source));
int captures[5];
const uc16 str1[] = {'f', 'o', 'o', 'b', 'a', 'r'};
Handle<String> f1_16 =
- Factory::NewStringFromTwoByte(Vector<const uc16>(str1, 6));
+ FACTORY->NewStringFromTwoByte(Vector<const uc16>(str1, 6));
CHECK(IrregexpInterpreter::Match(array, f1_16, captures, 0));
CHECK_EQ(0, captures[0]);
const uc16 str2[] = {'b', 'a', 'r', 'f', 'o', 'o'};
Handle<String> f2_16 =
- Factory::NewStringFromTwoByte(Vector<const uc16>(str2, 6));
+ FACTORY->NewStringFromTwoByte(Vector<const uc16>(str2, 6));
CHECK(!IrregexpInterpreter::Match(array, f2_16, captures, 0));
CHECK_EQ(42, captures[0]);
TEST(AddInverseToTable) {
+ v8::internal::V8::Initialize(NULL);
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
TEST(CharacterRangeCaseIndependence) {
+ v8::internal::V8::Initialize(NULL);
TestSimpleRangeCaseIndependence(CharacterRange::Singleton('a'),
CharacterRange::Singleton('A'));
TestSimpleRangeCaseIndependence(CharacterRange::Singleton('z'),
TEST(CharClassDifference) {
+ v8::internal::V8::Initialize(NULL);
ZoneScope zone_scope(DELETE_ON_EXIT);
ZoneList<CharacterRange>* base = new ZoneList<CharacterRange>(1);
base->Add(CharacterRange::Everything());
TEST(CanonicalizeCharacterSets) {
+ v8::internal::V8::Initialize(NULL);
ZoneScope scope(DELETE_ON_EXIT);
ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(4);
CharacterSet set(list);
}
TEST(CharacterRangeMerge) {
+ v8::internal::V8::Initialize(NULL);
ZoneScope zone_scope(DELETE_ON_EXIT);
ZoneList<CharacterRange> l1(4);
ZoneList<CharacterRange> l2(4);
TEST(ExternalReferenceEncoder) {
- StatsTable::SetCounterFunction(counter_function);
- Heap::Setup(false);
+ OS::Setup();
+ i::Isolate::Current()->stats_table()->SetCounterFunction(counter_function);
+ HEAP->Setup(false);
ExternalReferenceEncoder encoder;
CHECK_EQ(make_code(BUILTIN, Builtins::ArrayCode),
Encode(encoder, Builtins::ArrayCode));
CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
ExternalReference keyed_load_function_prototype =
- ExternalReference(&Counters::keyed_load_function_prototype);
+ ExternalReference(COUNTERS->keyed_load_function_prototype());
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
encoder.Encode(keyed_load_function_prototype.address()));
ExternalReference the_hole_value_location =
TEST(ExternalReferenceDecoder) {
- StatsTable::SetCounterFunction(counter_function);
- Heap::Setup(false);
+ OS::Setup();
+ i::Isolate::Current()->stats_table()->SetCounterFunction(counter_function);
+ HEAP->Setup(false);
ExternalReferenceDecoder decoder;
CHECK_EQ(AddressOf(Builtins::ArrayCode),
decoder.Decode(make_code(BUILTIN, Builtins::ArrayCode)));
CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
ExternalReference keyed_load_function =
- ExternalReference(&Counters::keyed_load_function_prototype);
+ ExternalReference(COUNTERS->keyed_load_function_prototype());
CHECK_EQ(keyed_load_function.address(),
decoder.Decode(
make_code(STATS_COUNTER,
static void SanityCheck() {
v8::HandleScope scope;
#ifdef DEBUG
- Heap::Verify();
+ HEAP->Verify();
#endif
- CHECK(Top::global()->IsJSObject());
- CHECK(Top::global_context()->IsContext());
- CHECK(Heap::symbol_table()->IsSymbolTable());
- CHECK(!Factory::LookupAsciiSymbol("Empty")->IsFailure());
+ CHECK(Isolate::Current()->global()->IsJSObject());
+ CHECK(Isolate::Current()->global_context()->IsContext());
+ CHECK(HEAP->symbol_table()->IsSymbolTable());
+ CHECK(!FACTORY->LookupAsciiSymbol("Empty")->IsFailure());
}
// serialize a snapshot in a VM that is booted from a snapshot.
if (!Snapshot::IsEnabled()) {
v8::HandleScope scope;
-
Deserialize();
v8::Persistent<v8::Context> env = v8::Context::New();
DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
if (!Snapshot::IsEnabled()) {
v8::HandleScope scope;
-
Deserialize();
v8::Persistent<v8::Context> env = v8::Context::New();
DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
if (!Snapshot::IsEnabled()) {
v8::HandleScope scope;
-
Deserialize();
v8::Persistent<v8::Context> env = v8::Context::New();
SerializeTwice) {
if (!Snapshot::IsEnabled()) {
v8::HandleScope scope;
-
Deserialize();
v8::Persistent<v8::Context> env = v8::Context::New();
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Bootstrapper::NativesSourceLookup(i);
+ Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
}
}
- Heap::CollectAllGarbage(true);
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
Object* raw_foo;
{
#undef fscanf
#endif
fclose(fp);
- Heap::ReserveSpace(new_size,
+ HEAP->ReserveSpace(new_size,
pointer_size,
data_size,
code_size,
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Bootstrapper::NativesSourceLookup(i);
+ Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of env.
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
for (int size = 1000; size < 5 * MB; size += size >> 1) {
int new_space_size = (size < new_space_max) ? size : new_space_max;
- Heap::ReserveSpace(
+ HEAP->ReserveSpace(
new_space_size,
size, // Old pointer space.
size, // Old data space.
i + kSmallFixedArraySize <= new_space_size;
i += kSmallFixedArraySize) {
Object* obj =
- Heap::AllocateFixedArray(kSmallFixedArrayLength)->ToObjectChecked();
+ HEAP->AllocateFixedArray(kSmallFixedArrayLength)->ToObjectChecked();
if (new_last != NULL) {
CHECK(reinterpret_cast<char*>(obj) ==
reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
for (int i = 0;
i + kSmallFixedArraySize <= size;
i += kSmallFixedArraySize) {
- Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength,
+ Object* obj = HEAP->AllocateFixedArray(kSmallFixedArrayLength,
TENURED)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
Object* data_last = NULL;
for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
- Object* obj = Heap::AllocateRawAsciiString(kSmallStringLength,
+ Object* obj = HEAP->AllocateRawAsciiString(kSmallStringLength,
TENURED)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
Object* map_last = NULL;
for (int i = 0; i + kMapSize <= size; i += kMapSize) {
- Object* obj = Heap::AllocateMap(JS_OBJECT_TYPE,
+ Object* obj = HEAP->AllocateMap(JS_OBJECT_TYPE,
42 * kPointerSize)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
AlwaysAllocateScope always;
int large_object_array_length =
(size - FixedArray::kHeaderSize) / kPointerSize;
- Object* obj = Heap::AllocateFixedArray(large_object_array_length,
+ Object* obj = HEAP->AllocateFixedArray(large_object_array_length,
TENURED)->ToObjectChecked();
CHECK(!obj->IsFailure());
}
class SocketListenerThread : public Thread {
public:
- explicit SocketListenerThread(int port, int data_size)
- : port_(port), data_size_(data_size), server_(NULL), client_(NULL),
- listening_(OS::CreateSemaphore(0)) {
+ explicit SocketListenerThread(Isolate* isolate, int port, int data_size)
+ : Thread(isolate), port_(port), data_size_(data_size), server_(NULL),
+ client_(NULL), listening_(OS::CreateSemaphore(0)) {
data_ = new char[data_size_];
}
~SocketListenerThread() {
OS::SNPrintF(Vector<char>(port_str, kPortBuferLen), "%d", port);
// Create a socket listener.
- SocketListenerThread* listener = new SocketListenerThread(port, len);
+ SocketListenerThread* listener = new SocketListenerThread(Isolate::Current(),
+ port, len);
listener->Start();
listener->WaitForListening();
Address page_start = RoundUp(start, Page::kPageSize);
Page* p = Page::FromAddress(page_start);
+ // Initialized Page has heap pointer, normally set by memory_allocator.
+ p->heap_ = HEAP;
CHECK(p->address() == page_start);
CHECK(p->is_valid());
TEST(MemoryAllocator) {
- CHECK(Heap::ConfigureHeapDefault());
- CHECK(MemoryAllocator::Setup(Heap::MaxReserved(), Heap::MaxExecutableSize()));
-
- OldSpace faked_space(Heap::MaxReserved(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
+ OS::Setup();
+ Isolate* isolate = Isolate::Current();
+ CHECK(HEAP->ConfigureHeapDefault());
+ CHECK(isolate->memory_allocator()->Setup(HEAP->MaxReserved(),
+ HEAP->MaxExecutableSize()));
+
+ OldSpace faked_space(HEAP,
+ HEAP->MaxReserved(),
+ OLD_POINTER_SPACE,
+ NOT_EXECUTABLE);
int total_pages = 0;
int requested = MemoryAllocator::kPagesPerChunk;
int allocated;
// If we request n pages, we should get n or n - 1.
Page* first_page =
- MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
+ isolate->memory_allocator()->AllocatePages(
+ requested, &allocated, &faked_space);
CHECK(first_page->is_valid());
CHECK(allocated == requested || allocated == requested - 1);
total_pages += allocated;
Page* last_page = first_page;
for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
- CHECK(MemoryAllocator::IsPageInSpace(p, &faked_space));
+ CHECK(isolate->memory_allocator()->IsPageInSpace(p, &faked_space));
last_page = p;
}
// Again, we should get n or n - 1 pages.
Page* others =
- MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
+ isolate->memory_allocator()->AllocatePages(
+ requested, &allocated, &faked_space);
CHECK(others->is_valid());
CHECK(allocated == requested || allocated == requested - 1);
total_pages += allocated;
- MemoryAllocator::SetNextPage(last_page, others);
+ isolate->memory_allocator()->SetNextPage(last_page, others);
int page_count = 0;
for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
- CHECK(MemoryAllocator::IsPageInSpace(p, &faked_space));
+ CHECK(isolate->memory_allocator()->IsPageInSpace(p, &faked_space));
page_count++;
}
CHECK(total_pages == page_count);
// Freeing pages at the first chunk starting at or after the second page
// should free the entire second chunk. It will return the page it was passed
// (since the second page was in the first chunk).
- Page* free_return = MemoryAllocator::FreePages(second_page);
+ Page* free_return = isolate->memory_allocator()->FreePages(second_page);
CHECK(free_return == second_page);
- MemoryAllocator::SetNextPage(first_page, free_return);
+ isolate->memory_allocator()->SetNextPage(first_page, free_return);
// Freeing pages in the first chunk starting at the first page should free
// the first chunk and return an invalid page.
- Page* invalid_page = MemoryAllocator::FreePages(first_page);
+ Page* invalid_page = isolate->memory_allocator()->FreePages(first_page);
CHECK(!invalid_page->is_valid());
- MemoryAllocator::TearDown();
+ isolate->memory_allocator()->TearDown();
}
TEST(NewSpace) {
- CHECK(Heap::ConfigureHeapDefault());
- CHECK(MemoryAllocator::Setup(Heap::MaxReserved(), Heap::MaxExecutableSize()));
+ OS::Setup();
+ CHECK(HEAP->ConfigureHeapDefault());
+ CHECK(Isolate::Current()->memory_allocator()->Setup(
+ HEAP->MaxReserved(), HEAP->MaxExecutableSize()));
- NewSpace new_space;
+ NewSpace new_space(HEAP);
void* chunk =
- MemoryAllocator::ReserveInitialChunk(4 * Heap::ReservedSemiSpaceSize());
+ Isolate::Current()->memory_allocator()->ReserveInitialChunk(
+ 4 * HEAP->ReservedSemiSpaceSize());
CHECK(chunk != NULL);
Address start = RoundUp(static_cast<Address>(chunk),
- 2 * Heap::ReservedSemiSpaceSize());
- CHECK(new_space.Setup(start, 2 * Heap::ReservedSemiSpaceSize()));
+ 2 * HEAP->ReservedSemiSpaceSize());
+ CHECK(new_space.Setup(start, 2 * HEAP->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetup());
while (new_space.Available() >= Page::kMaxHeapObjectSize) {
}
new_space.TearDown();
- MemoryAllocator::TearDown();
+ Isolate::Current()->memory_allocator()->TearDown();
}
TEST(OldSpace) {
- CHECK(Heap::ConfigureHeapDefault());
- CHECK(MemoryAllocator::Setup(Heap::MaxReserved(), Heap::MaxExecutableSize()));
+ OS::Setup();
+ CHECK(HEAP->ConfigureHeapDefault());
+ CHECK(Isolate::Current()->memory_allocator()->Setup(
+ HEAP->MaxReserved(), HEAP->MaxExecutableSize()));
- OldSpace* s = new OldSpace(Heap::MaxOldGenerationSize(),
+ OldSpace* s = new OldSpace(HEAP,
+ HEAP->MaxOldGenerationSize(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
CHECK(s != NULL);
void* chunk =
- MemoryAllocator::ReserveInitialChunk(4 * Heap::ReservedSemiSpaceSize());
+ Isolate::Current()->memory_allocator()->ReserveInitialChunk(
+ 4 * HEAP->ReservedSemiSpaceSize());
CHECK(chunk != NULL);
Address start = static_cast<Address>(chunk);
- size_t size = RoundUp(start, 2 * Heap::ReservedSemiSpaceSize()) - start;
+ size_t size = RoundUp(start, 2 * HEAP->ReservedSemiSpaceSize()) - start;
CHECK(s->Setup(start, size));
s->TearDown();
delete s;
- MemoryAllocator::TearDown();
+ Isolate::Current()->memory_allocator()->TearDown();
}
TEST(LargeObjectSpace) {
- CHECK(Heap::Setup(false));
+ OS::Setup();
+ CHECK(HEAP->Setup(false));
- LargeObjectSpace* lo = Heap::lo_space();
+ LargeObjectSpace* lo = HEAP->lo_space();
CHECK(lo != NULL);
Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
lo->TearDown();
delete lo;
- MemoryAllocator::TearDown();
+ Isolate::Current()->memory_allocator()->TearDown();
}
buf[j] = gen() % 65536;
}
building_blocks[i] =
- Factory::NewStringFromTwoByte(Vector<const uc16>(buf, len));
+ FACTORY->NewStringFromTwoByte(Vector<const uc16>(buf, len));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
buf[j] = gen() % 128;
}
building_blocks[i] =
- Factory::NewStringFromAscii(Vector<const char>(buf, len));
+ FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
break;
}
case 2: {
- uc16* buf = Zone::NewArray<uc16>(len);
+ uc16* buf = ZONE->NewArray<uc16>(len);
for (int j = 0; j < len; j++) {
buf[j] = gen() % 65536;
}
Resource* resource = new Resource(Vector<const uc16>(buf, len));
- building_blocks[i] = Factory::NewExternalStringFromTwoByte(resource);
+ building_blocks[i] = FACTORY->NewExternalStringFromTwoByte(resource);
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
buf[j] = gen() % 128;
}
building_blocks[i] =
- Factory::NewStringFromAscii(Vector<const char>(buf, len));
+ FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
static Handle<String> ConstructLeft(
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
int depth) {
- Handle<String> answer = Factory::NewStringFromAscii(CStrVector(""));
+ Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
for (int i = 0; i < depth; i++) {
- answer = Factory::NewConsString(
+ answer = FACTORY->NewConsString(
answer,
building_blocks[i % NUMBER_OF_BUILDING_BLOCKS]);
}
static Handle<String> ConstructRight(
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
int depth) {
- Handle<String> answer = Factory::NewStringFromAscii(CStrVector(""));
+ Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
for (int i = depth - 1; i >= 0; i--) {
- answer = Factory::NewConsString(
+ answer = FACTORY->NewConsString(
building_blocks[i % NUMBER_OF_BUILDING_BLOCKS],
answer);
}
return building_blocks[from % NUMBER_OF_BUILDING_BLOCKS];
}
if (to - from == 2) {
- return Factory::NewConsString(
+ return FACTORY->NewConsString(
building_blocks[from % NUMBER_OF_BUILDING_BLOCKS],
building_blocks[(from+1) % NUMBER_OF_BUILDING_BLOCKS]);
}
ConstructBalancedHelper(building_blocks, from, from + ((to - from) / 2));
Handle<String> part2 =
ConstructBalancedHelper(building_blocks, from + ((to - from) / 2), to);
- return Factory::NewConsString(part1, part2);
+ return FACTORY->NewConsString(part1, part2);
}
foo[i] = "foo "[i % 4];
}
Handle<String> string =
- Factory::NewStringFromAscii(Vector<const char>(foo, DEEP_ASCII_DEPTH));
- Handle<String> foo_string = Factory::NewStringFromAscii(CStrVector("foo"));
+ FACTORY->NewStringFromAscii(Vector<const char>(foo, DEEP_ASCII_DEPTH));
+ Handle<String> foo_string = FACTORY->NewStringFromAscii(CStrVector("foo"));
for (int i = 0; i < DEEP_ASCII_DEPTH; i += 10) {
- string = Factory::NewConsString(string, foo_string);
+ string = FACTORY->NewConsString(string, foo_string);
}
- Handle<String> flat_string = Factory::NewConsString(string, foo_string);
+ Handle<String> flat_string = FACTORY->NewConsString(string, foo_string);
FlattenString(flat_string);
for (int i = 0; i < 500; i++) {
// Generate short ascii and non-ascii external strings.
for (int i = 0; i <= kMaxLength; i++) {
- char* ascii = Zone::NewArray<char>(i + 1);
+ char* ascii = ZONE->NewArray<char>(i + 1);
for (int j = 0; j < i; j++) {
ascii[j] = 'a';
}
v8::String::NewExternal(ascii_resource);
ascii_external_strings->Set(v8::Integer::New(i), ascii_external_string);
- uc16* non_ascii = Zone::NewArray<uc16>(i + 1);
+ uc16* non_ascii = ZONE->NewArray<uc16>(i + 1);
for (int j = 0; j < i; j++) {
non_ascii[j] = 0x1234;
}
Handle<Smi> fortytwo(Smi::FromInt(42));
Handle<Smi> thirtyseven(Smi::FromInt(37));
Handle<Object> results[] = {
- Factory::undefined_value(),
+ FACTORY->undefined_value(),
fortytwo,
- Factory::undefined_value(),
- Factory::undefined_value(),
+ FACTORY->undefined_value(),
+ FACTORY->undefined_value(),
thirtyseven,
fortytwo,
thirtyseven // Bug yielded 42 here.
class TerminatorThread : public v8::internal::Thread {
+ public:
+ explicit TerminatorThread(i::Isolate* isolate) : Thread(isolate) { }
void Run() {
semaphore->Wait();
CHECK(!v8::V8::IsExecutionTerminating());
// from the side by another thread.
TEST(TerminateOnlyV8ThreadFromOtherThread) {
semaphore = v8::internal::OS::CreateSemaphore(0);
- TerminatorThread thread;
+ TerminatorThread thread(i::Isolate::Current());
thread.Start();
v8::HandleScope scope;
class LoopingThread : public v8::internal::Thread {
public:
+ explicit LoopingThread(i::Isolate* isolate) : Thread(isolate) { }
void Run() {
v8::Locker locker;
v8::HandleScope scope;
v8::Locker::StartPreemption(1);
semaphore = v8::internal::OS::CreateSemaphore(0);
}
- LoopingThread thread1;
+ LoopingThread thread1(i::Isolate::Current());
thread1.Start();
- LoopingThread thread2;
+ LoopingThread thread2(i::Isolate::Current());
thread2.Start();
// Wait until both threads have signaled the semaphore.
semaphore->Wait();
class ThreadA: public v8::internal::Thread {
public:
+ explicit ThreadA(i::Isolate* isolate) : Thread(isolate) { }
void Run() {
v8::Locker locker;
v8::HandleScope scope;
class ThreadB: public v8::internal::Thread {
public:
+ explicit ThreadB(i::Isolate* isolate) : Thread(isolate) { }
void Run() {
do {
{
v8::Context::Scope context_scope(v8::Context::New());
// Clear the caches by forcing major GC.
- v8::internal::Heap::CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
turn = SECOND_TIME_FILL_CACHE;
break;
}
TEST(JSFunctionResultCachesInTwoThreads) {
v8::V8::Initialize();
- ThreadA threadA;
- ThreadB threadB;
+ ThreadA threadA(i::Isolate::Current());
+ ThreadB threadB(i::Isolate::Current());
threadA.Start();
threadB.Start();
TEST(MemCopy) {
- V8::Initialize(NULL);
+ OS::Setup();
const int N = kMinComplexMemCopy + 128;
Vector<byte> buffer1 = Vector<byte>::New(N);
Vector<byte> buffer2 = Vector<byte>::New(N);
class MjsunitTestCase(test.TestCase):
- def __init__(self, path, file, mode, context, config):
+ def __init__(self, path, file, mode, context, config, isolates):
super(MjsunitTestCase, self).__init__(context, path, mode)
self.file = file
self.config = config
self.self_script = False
+ self.isolates = isolates
def GetLabel(self):
return "%s %s" % (self.mode, self.GetName())
def GetName(self):
- return self.path[-1]
+ return self.path[-1] + ["", "-isolates"][self.isolates]
- def GetCommand(self):
+ def TestsIsolates(self):
+ return self.isolates
+
+ def GetVmCommand(self, source):
result = self.config.context.GetVmCommand(self, self.mode)
- source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
result += flags_match.group(1).strip().split()
+ return result
+
+ def GetVmArguments(self, source):
+ result = []
additional_files = []
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'
result += [framework, self.file]
return result
+ def GetCommand(self):
+ source = open(self.file).read()
+ result = self.GetVmCommand(source)
+ result += self.GetVmArguments(source)
+ if self.isolates:
+ result.append("--isolate")
+ result += self.GetVmArguments(source)
+ return result
+
def GetSource(self):
return open(self.file).read()
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], "") + ".js")
- result.append(MjsunitTestCase(test, file_path, mode, self.context, self))
+ result.append(MjsunitTestCase(test, file_path, mode, self.context, self, False))
+ result.append(MjsunitTestCase(test, file_path, mode, self.context, self, True))
return result
def GetBuildRequirements(self):
'../../src/jump-target.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
+ '../../src/isolate.cc',
+ '../../src/isolate.h',
'../../src/list-inl.h',
'../../src/list.h',
'../../src/lithium.cc',
def IsNegative(self):
return False
+ def TestsIsolates(self):
+ return False
+
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def CheckedUnlink(name):
- try:
- os.unlink(name)
- except OSError, e:
- PrintError("os.unlink() " + str(e))
-
+ # On Windows, when run with -jN in parallel processes,
+ # OS often fails to unlink the temp file. Not sure why.
+ # Need to retry.
+ # Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
+ retry_count = 0
+ while retry_count < 30:
+ try:
+ os.unlink(name)
+ return
+ except OSError, e:
+ retry_count += 1;
+ time.sleep(retry_count * 0.1)
+ PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
self.case = case
self.outcomes = outcomes
+ def TestsIsolates(self):
+ return self.case.TestsIsolates()
+
class Configuration(object):
"""The parsed contents of a configuration file"""
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
+ result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
+ if not options.isolates:
+ cases_to_run = [c for c in cases_to_run if not c.TestsIsolates()]
if len(cases_to_run) == 0:
print "No tests to run."
return 0