This code is copyrighted by Marc Alexander Lehmann. Both are dually
licensed under MIT and GPL2.
- - JSMin JavaScript minifier, located at tools/jsmin.py. This code is
- copyrighted by Douglas Crockford and Baruch Even and has an MIT license.
-
- parseUri, a URI parser, is located in lib/http.js. This is just a small
snippit. It is copyrighted 2007 by Steven Levithan and released under an
MIT license.
distclean:
@-rm -rf build/
- @-rm -f *.pyc
+ @-find tools | egrep --colour=never ".pyc$" | xargs rm
check:
@tools/waf-light check
+2009-10-07: Version 1.3.14
+
+ Added GetRealNamedProperty to the API to lookup real properties
+ located on the object or in the prototype chain skipping any
+ interceptors.
+
+ Fix the stack limits setting API to work correctly with threads. The
+ stack limit now needs to be set to each thread thich is used with V8.
+
+ Remove the high-priority flag from IdleNotification()
+
+ Ensure V8 is initialized before locking and unlocking threads.
+
+ Implemented a new JavaScript minifier for compressing the source of
+ the built-in JavaScript. This Remove non-Open Source code from Douglas
+ Crockford from the project.
+
+ Added a missing optimization in StringCharAt.
+
+ Fixed some flaky socket tests.
+
+ Change by Alexander Botero-Lowry to fix profiler sampling on FreeBSD
+ in 64-bit mode.
+
+ Fixed memory leaks in the thread management code.
+
+ Fixed the result of assignment to a pixel array. The assigned value
+ is now the result.
+
+ Error reporting for invalid left-hand sides in for-in statements, pre-
+ and postfix count expressions, and assignments now matches the JSC
+ behavior in Safari 4.
+
+ Follow the spec in disallowing function declarations without a name.
+
+ Always allocate code objects within a 2 GB range. On x64 architecture
+ this is used to use near calls (32-bit displacement) in Code objects.
+
+ Optimized array construction ported to x64 and ARM architectures.
+
+ [ES5] Changed Object.keys to return strings for element indices.
+
+
2009-09-23: Version 1.3.13
Fixed uninitialized memory problem.
This code is copyrighted by Sun Microsystems Inc. and released
under a 3-clause BSD license.
- - JSMin JavaScript minifier, located at tools/jsmin.py. This code is
- copyrighted by Douglas Crockford and Baruch Even and released under
- an MIT license.
-
- - Valgrind client API header, located at third_party/valgrind/valgrind.h
- This is release under the BSD license.
-
- Valgrind client API header, located at third_party/valgrind/valgrind.h
This is release under the BSD license.
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
namespace internal {
class Object;
+class Arguments;
}
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- Handle<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+ Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+
+ /**
+ * If result.IsEmpty() no real property was located on the object or
+ * in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ Local<Value> GetRealNamedProperty(Handle<String> key);
/** Tests for a named lookup interceptor.*/
bool HasNamedLookupInterceptor();
*/
class V8EXPORT AccessorInfo {
public:
- inline AccessorInfo(Local<Object> self,
- Local<Value> data,
- Local<Object> holder)
- : self_(self), data_(data), holder_(holder) { }
+ inline AccessorInfo(internal::Object** args)
+ : args_(args) { }
inline Local<Value> Data() const;
inline Local<Object> This() const;
inline Local<Object> Holder() const;
private:
- Local<Object> self_;
- Local<Value> data_;
- Local<Object> holder_;
+ internal::Object** args_;
};
/**
* A FunctionTemplate is used to create functions at runtime. There
* can only be one function created from a FunctionTemplate in a
- * context.
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
*
* A FunctionTemplate can have properties, these properties are added to the
* function object when it is created.
/**
- * A set of constraints that specifies the limits of the runtime's
- * memory use.
+ * A set of constraints that specifies the limits of the runtime's memory use.
+ * You must set the heap size before initializing the VM - the size cannot be
+ * adjusted after the VM is initialized.
+ *
+ * If you are using threads then you should hold the V8::Locker lock while
+ * setting the stack limit and you must set a non-default stack limit separately
+ * for each thread.
*/
class V8EXPORT ResourceConstraints {
public:
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
+ // Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
private:
int max_young_space_size_;
/**
* Initializes from snapshot if possible. Otherwise, attempts to
- * initialize from scratch.
+ * initialize from scratch. This function is called implicitly if
+ * you use the API without calling it first.
*/
static bool Initialize();
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
- * \param is_high_priority tells whether the embedder is high priority.
* Returns true if the embedder should stop calling IdleNotification
* until real work has been done. This indicates that V8 has done
* as much cleanup as it will be able to do.
*/
- static bool IdleNotification(bool is_high_priority);
+ static bool IdleNotification();
/**
* Optional notification that the system is running low on memory.
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
-
+
static inline bool HasSmiTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
}
-
+
static inline int SmiValue(internal::Object* value) {
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
}
-
+
static inline bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
-Local<Value> AccessorInfo::Data() const {
- return data_;
-}
-
-
-Local<Object> AccessorInfo::This() const {
- return self_;
-}
-
-
-Local<Object> AccessorInfo::Holder() const {
- return holder_;
-}
-
-
template <class T>
Local<T> HandleScope::Close(Handle<T> value) {
internal::Object** before = reinterpret_cast<internal::Object**>(*value);
}
+Local<Value> AccessorInfo::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[-3]));
+}
+
+
+Local<Object> AccessorInfo::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[0]));
+}
+
+
+Local<Object> AccessorInfo::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[-1]));
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
SOURCES = {
'all': [
'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
- 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
- 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
- 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
- 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
- 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
- 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
- 'hashmap.cc', 'heap.cc', 'heap-profiler.cc', 'ic.cc',
- 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
- 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
- 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
- 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
- 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc',
- 'runtime.cc', 'scanner.cc', 'scopeinfo.cc', 'scopes.cc',
- 'serialize.cc', 'snapshot-common.cc', 'spaces.cc',
- 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+ 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
+ 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
+ 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
+ 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+ 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
+ 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+ 'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+ 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
+ 'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
+ 'property.cc', 'regexp-macro-assembler.cc',
+ 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+ 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+ 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+ 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
- 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
- 'arm/codegen-arm.cc', 'arm/constants-arm.cc', 'arm/cpu-arm.cc',
- 'arm/disasm-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc',
- 'arm/ic-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
- 'arm/regexp-macro-assembler-arm.cc',
- 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
- 'arm/virtual-frame-arm.cc'
+ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
+ 'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
+ 'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
+ 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
+ 'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
+ 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
- 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
- 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
+ 'ia32/regexp-macro-assembler-ia32.cc',
+ 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+ 'ia32/virtual-frame-ia32.cc'
],
'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
- 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
- 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
- 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
- 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
- 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
+ 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
+ 'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
+ 'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
+ 'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
+ 'x64/virtual-frame-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
thread_local.DecrementCallDepth(); \
if (has_pending_exception) { \
if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
- if (!thread_local.IgnoreOutOfMemory()) \
+ if (!thread_local.ignore_out_of_memory()) \
i::V8::FatalProcessOutOfMemory(NULL); \
} \
bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
bool SetResourceConstraints(ResourceConstraints* constraints) {
- bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
- constraints->max_old_space_size());
- if (!result) return false;
+ int semispace_size = constraints->max_young_space_size();
+ int old_gen_size = constraints->max_old_space_size();
+ if (semispace_size != 0 || old_gen_size != 0) {
+ bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+ if (!result) return false;
+ }
if (constraints->stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
i::StackGuard::SetStackLimit(limit);
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::Set()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::ForceSet()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::ForceDelete()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE();
}
-Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
return Local<Value>());
}
+Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
+ ENTER_V8;
+ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::LookupResult lookup;
+ self_obj->LookupRealNamedProperty(*key_obj, &lookup);
+ if (lookup.IsValid()) {
+ PropertyAttributes attributes;
+ i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+ &lookup,
+ *key_obj,
+ &attributes));
+ return Utils::ToLocal(result);
+ }
+ return Local<Value>(); // No real property was found in prototype chain.
+}
+
+
// Turns on access checks by copying the map and setting the check flag.
// Because the object gets a new map, existing inline cache caching
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Map> new_map =
int v8::Object::GetIdentityHash() {
ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
v8::Handle<v8::Value> value) {
ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ON_BAILOUT("v8::DeleteHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT("v8::SetElementsToPixelData()", return);
ENTER_V8;
+ HandleScope scope;
if (!ApiCheck(i::Smi::IsValid(length),
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlattenIfNotFlat();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
- int i;
- for (i = 0; i < end; i++)
- buffer[i] = write_input_buffer.GetNext();
- if (length == -1 || i < length)
- buffer[i] = '\0';
- return i;
+ i::String::WriteToFlat(*str, buffer, start, end);
+ if (length == -1 || end < length)
+ buffer[end] = '\0';
+ return end;
}
}
-bool v8::V8::IdleNotification(bool is_high_priority) {
- if (!i::V8::IsRunning()) return false;
- return i::V8::IdleNotification(is_high_priority);
+bool v8::V8::IdleNotification() {
+ // Returning true tells the caller that it need not
+ // continue to call IdleNotification.
+ if (!i::V8::IsRunning()) return true;
+ return i::V8::IdleNotification();
}
v8::Local<v8::Context> Context::GetCalling() {
if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
- i::Handle<i::Context> context(i::Top::GetCallingGlobalContext());
+ i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+ if (calling.is_null()) return Local<Context>();
+ i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
}
void V8::IgnoreOutOfMemoryException() {
- thread_local.SetIgnoreOutOfMemory(true);
+ thread_local.set_ignore_out_of_memory(true);
}
}
+void HandleScopeImplementer::FreeThreadResources() {
+ thread_local.Free();
+}
+
+
char* HandleScopeImplementer::ArchiveThread(char* storage) {
return thread_local.ArchiveThreadHelper(storage);
}
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
- Initialize();
+ ResetAfterArchive();
current->Initialize();
return storage + ArchiveSpacePerThread();
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
// Iterate over all handles in the blocks except for the last.
- for (int i = Blocks()->length() - 2; i >= 0; --i) {
- Object** block = Blocks()->at(i);
+ for (int i = blocks()->length() - 2; i >= 0; --i) {
+ Object** block = blocks()->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
}
// Iterate over live handles in the last block (if any).
- if (!Blocks()->is_empty()) {
- v->VisitPointers(Blocks()->last(), handle_scope_data_.next);
+ if (!blocks()->is_empty()) {
+ v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
if (!saved_contexts_.is_empty()) {
public:
HandleScopeImplementer()
- : blocks(0),
+ : blocks_(0),
entered_contexts_(0),
- saved_contexts_(0) {
- Initialize();
- }
-
- void Initialize() {
- blocks.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
- spare = NULL;
- ignore_out_of_memory = false;
- call_depth = 0;
- }
+ saved_contexts_(0),
+ spare_(NULL),
+ ignore_out_of_memory_(false),
+ call_depth_(0) { }
static HandleScopeImplementer* instance();
static int ArchiveSpacePerThread();
static char* RestoreThread(char* from);
static char* ArchiveThread(char* to);
+ static void FreeThreadResources();
// Garbage collection support.
static void Iterate(v8::internal::ObjectVisitor* v);
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions);
- inline void IncrementCallDepth() {call_depth++;}
- inline void DecrementCallDepth() {call_depth--;}
- inline bool CallDepthIsZero() { return call_depth == 0; }
+ inline void IncrementCallDepth() {call_depth_++;}
+ inline void DecrementCallDepth() {call_depth_--;}
+ inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Object> context);
inline bool LeaveLastContext();
inline Context* RestoreContext();
inline bool HasSavedContexts();
- inline List<internal::Object**>* Blocks() { return &blocks; }
-
- inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
- inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+ inline List<internal::Object**>* blocks() { return &blocks_; }
+ inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+ inline void set_ignore_out_of_memory(bool value) {
+ ignore_out_of_memory_ = value;
+ }
private:
- List<internal::Object**> blocks;
- Object** spare;
- int call_depth;
+ void ResetAfterArchive() {
+ blocks_.Initialize(0);
+ entered_contexts_.Initialize(0);
+ saved_contexts_.Initialize(0);
+ spare_ = NULL;
+ ignore_out_of_memory_ = false;
+ call_depth_ = 0;
+ }
+
+ void Free() {
+ ASSERT(blocks_.length() == 0);
+ ASSERT(entered_contexts_.length() == 0);
+ ASSERT(saved_contexts_.length() == 0);
+ blocks_.Free();
+ entered_contexts_.Free();
+ saved_contexts_.Free();
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
+ }
+ ASSERT(call_depth_ == 0);
+ }
+
+ List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
- bool ignore_out_of_memory;
+ Object** spare_;
+ bool ignore_out_of_memory_;
+ int call_depth_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare != NULL) ?
- spare :
+ internal::Object** block = (spare_ != NULL) ?
+ spare_ :
NewArray<internal::Object*>(kHandleBlockSize);
- spare = NULL;
+ spare_ = NULL;
return block;
}
void HandleScopeImplementer::DeleteExtensions(int extensions) {
- if (spare != NULL) {
- DeleteArray(spare);
- spare = NULL;
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
}
for (int i = extensions; i > 1; --i) {
- internal::Object** block = blocks.RemoveLast();
+ internal::Object** block = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]);
#endif
DeleteArray(block);
}
- spare = blocks.RemoveLast();
+ spare_ = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(
- spare,
- &spare[kHandleBlockSize]);
+ spare_,
+ &spare_[kHandleBlockSize]);
#endif
}
class Arguments BASE_EMBEDDED {
public:
+ Arguments(int length, Object** arguments)
+ : length_(length), arguments_(arguments) { }
+
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
return arguments_[-index];
// Get the total number of arguments including the receiver.
int length() const { return length_; }
+ Object** arguments() { return arguments_; }
+
private:
int length_;
Object** arguments_;
};
+
+// Cursom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+class CustomArguments : public Relocatable {
+ public:
+ inline CustomArguments(Object *data,
+ JSObject *self,
+ JSObject *holder) {
+ values_[3] = self;
+ values_[2] = holder;
+ values_[1] = Smi::FromInt(0);
+ values_[0] = data;
+ }
+ void IterateInstance(ObjectVisitor* v);
+ Object** end() { return values_ + 3; }
+ private:
+ Object* values_[4];
+};
+
+
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
str(src, MemOperand(sp, 4, NegPreIndex), cond);
}
- void pop(Register dst) {
- ldr(dst, MemOperand(sp, 4, PostIndex), al);
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
}
void pop() {
__ str(r1, MemOperand(ip, 0));
// The actual argument count has already been loaded into register
- // r0, but JumpToBuiltin expects r0 to contain the number of
+ // r0, but JumpToRuntime expects r0 to contain the number of
// arguments including the receiver.
__ add(r0, r0, Operand(1));
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ ldr(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, Operand(0));
+ __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, MemOperand(result, JSArray::kSize));
+ __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ __ mov(scratch3, Operand(initial_capacity));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ ldr(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ tst(array_size, array_size);
+ __ b(nz, ¬_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(¬_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ mov(elements_array_end,
+ Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+ __ add(elements_array_end,
+ elements_array_end,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ AllocateInNewSpace(elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ add(elements_array_storage, result, Operand(JSArray::kSize));
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ // Convert array_size from smi to value.
+ __ mov(array_size,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ tst(array_size, array_size);
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is not stored as a smi.
+ __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(array_size,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: size of elements array
+ __ add(elements_array_end,
+ elements_array_storage,
+ Operand(array_size, LSL, kPointerSizeLog2));
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ str(scratch1,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(elements_array_storage, elements_array_end);
+ __ b(lt, &loop);
+ }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// r0: argc
+// r1: constructor (built-in Array function)
+// lr: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ cmp(r0, Operand(0));
+ __ b(ne, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(r0, r2);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmp(r0, Operand(1));
+ __ b(ne, &argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
+ __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+ __ b(ne, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+ __ b(ge, call_generic_code);
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(r0, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Jump(lr);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store therefore PreIndex is
+ // used when filling the backing store.
+ // r0: argc
+ // r3: JSArray
+ // r4: elements_array storage start (untagged)
+ // r5: elements_array_end (untagged)
+ // sp[0]: last argument
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ cmp(r4, r5);
+ __ b(lt, &loop);
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // r0: argc
+ // r3: JSArray
+ // sp[0]: receiver
+ __ add(sp, sp, Operand(kPointerSize));
+ __ mov(r0, r3);
+ __ Jump(lr);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // Just jump to the generic array code.
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, r1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // Just jump to the generic construct code.
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // always have a map.
+ GenerateLoadArrayFunction(masm, r2);
+ __ cmp(r1, r2);
+ __ Assert(eq, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
// r5: start of next object
// r7: undefined
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateObjectInNewSpace(r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// r1: constructor
+++ /dev/null
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-arm.h" // Include after codegen-inl.h.
-#include "macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- StackCheckStub stub;
- __ CallStub(&stub);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ add(sp, sp, Operand((count + 1) * kPointerSize));
- __ Jump(lr);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
-
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Discard key and receiver.
- __ add(sp, sp, Operand(2 * kPointerSize));
- } else {
- key()->Get(masm, r2);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(); // Discard receiver.
- }
- location()->Set(masm, r0);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Move left to r1 and right to r0.
- left()->Get(masm, r1);
- right()->Get(masm, r0);
- GenericBinaryOpStub stub(op(), mode);
- __ CallStub(&stub);
- location()->Set(masm, r0);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, r0);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Operand(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ mov(ip, Operand(handle_));
- __ push(ip);
-}
-
-
-static MemOperand ToMemOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return MemOperand(fp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return MemOperand(r0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ip, Operand(handle_));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ ldr(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ str(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ ldr(ip, ToMemOperand(this));
- __ push(ip); // Push will not destroy ip.
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // Double dispatch.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ ldr(ip, ToMemOperand(this));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(reg, r0);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(r0, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(r0);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, r0);
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ str(r0, ToMemOperand(loc));
- case STACK:
- __ pop(ip);
- __ str(ip, ToMemOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
// Standard function call.
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ // Prepare stack for call to resolved function.
+ LoadAndSpill(function);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPush(r2); // Slot for receiver
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Prepare stack for call to ResolvePossiblyDirectEval.
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+ frame_->EmitPush(r1);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
+ } else {
+ frame_->EmitPush(r2);
+ }
+
+ // Resolve the call.
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up stack with the right values for the function and the receiver.
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+ __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ frame_->CallStub(&call_function, arg_count + 1);
+
+ __ ldr(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->Drop();
+ frame_->EmitPush(r0);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare stack for call to resolved function.
- LoadAndSpill(function);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2); // Slot for receiver
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Prepare stack for call to ResolvePossiblyDirectEval.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up stack with the right values for the function and the receiver.
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
Register scratch2) { // Another scratch register.
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Get heap number map and store it in the allocated object.
__ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
// r5: Address of heap number for result.
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
+ __ AlignStack(0);
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
__ Call(r5);
+ __ pop(r4); // Address of heap number.
+ __ cmp(r4, Operand(Smi::FromInt(0)));
+ __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
// Store answer in the overwritable heap number.
- __ pop(r4);
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// Align the stack at this point. After this point we have 5 pushes,
// so in fact we have to unalign here! See also the assert on the
- // alignment immediately below.
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so we will always align at
- // this point here.
- int activation_frame_alignment = 2 * kPointerSize;
-#endif // defined(V8_HOST_ARCH_ARM)
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand(activation_frame_alignment - 1));
- push(r7, eq); // Conditional push instruction.
- }
+ // alignment in AlignStack.
+ AlignStack(1);
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
}
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_ARM)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so we will always align at
+ // this point here.
+ int activation_frame_alignment = 2 * kPointerSize;
+#endif // defined(V8_HOST_ARCH_ARM)
+ if (activation_frame_alignment != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(activation_frame_alignment == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(activation_frame_alignment - offset));
+ push(r7, eq); // Conditional push instruction.
+ }
+}
+
+
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(StackFrame::Type type);
+ // Align the stack by optionally pushing a Smi zero.
+ void AlignStack(int offset);
// ---------------------------------------------------------------------------
// JavaScript invokes
// bytes). If the new space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& builtin);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
Simulator::Simulator() {
- ASSERT(initialized_);
+ Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
+ Initialize();
Simulator* sim = reinterpret_cast<Simulator*>(
v8::internal::Thread::GetThreadLocal(simulator_key));
if (sim == NULL) {
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
+#include "allocation.h"
+
#if defined(__arm__)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) - limit)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (assembler::arm::Simulator::current()->StackLimit())
-
-
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
} } // namespace assembler::arm
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return assembler::arm::Simulator::current()->StackLimit();
+ }
+};
+
+
#endif // defined(__arm__)
#endif // V8_ARM_SIMULATOR_ARM_H_
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3,
+ r4,
+ r5,
+ r6,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
QuickSort(a, high_start, to);
}
+ var length;
+
// Copies elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Returns one more than the maximal index
// of a prototype property.
return first_undefined;
}
- var length = ToUint32(this.length);
+ length = ToUint32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
INLINE(Address target_address());
INLINE(void set_target_address(Address target));
INLINE(Object* target_object());
+ INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target));
ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
Call Call::sentinel_(NULL, NULL, 0);
-CallEval CallEval::sentinel_(NULL, NULL, 0);
// ----------------------------------------------------------------------------
V(Throw) \
V(Property) \
V(Call) \
- V(CallEval) \
V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
class AstNode: public ZoneObject {
public:
- AstNode(): statement_pos_(RelocInfo::kNoPosition) { }
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
};
class Statement: public AstNode {
public:
+ Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
bool IsEmpty() { return AsEmptyStatement() != NULL; }
+
+ void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
+ int statement_pos() const { return statement_pos_; }
+
+ private:
+ int statement_pos_;
};
class Call: public Expression {
public:
- Call(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : expression_(expression),
- arguments_(arguments),
- pos_(pos) { }
+ Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v);
};
-class CallNew: public Call {
+class CallNew: public Expression {
public:
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : Call(expression, arguments, pos) { }
-
- virtual void Accept(AstVisitor* v);
-};
-
-
-// The CallEval class represents a call of the form 'eval(...)' where eval
-// cannot be seen to be overwritten at compile time. It is potentially a
-// direct (i.e. not aliased) eval call. The real nature of the call is
-// determined at runtime.
-class CallEval: public Call {
- public:
- CallEval(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : Call(expression, arguments, pos) { }
+ : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v);
- static CallEval* sentinel() { return &sentinel_; }
+ Expression* expression() const { return expression_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+ int position() { return pos_; }
private:
- static CallEval sentinel_;
+ Expression* expression_;
+ ZoneList<Expression*>* arguments_;
+ int pos_;
};
}
Code* code = Code::cast(code_[i]);
Address pc = code->instruction_start() + pc_[i];
- bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags);
+ RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
-
if (use_code_object) {
- if (is_pc_relative) {
- Assembler::set_target_address_at(
- pc, reinterpret_cast<Address>(f->code()));
- } else {
- *reinterpret_cast<Object**>(pc) = f->code();
- }
+ target.set_target_object(f->code());
} else {
- Assembler::set_target_address_at(pc, f->code()->instruction_start());
+ target.set_target_address(f->code()->instruction_start());
}
-
LOG(StringEvent("resolved", name));
}
Clear();
}
+// Called when the top-level V8 mutex is destroyed.
+void Bootstrapper::FreeThreadResources() {
+ ASSERT(Genesis::current() == NULL);
+}
+
+
// Reserve space for statics needing saving and restoring.
int Genesis::ArchiveSpacePerThread() {
return sizeof(current_);
static bool IsActive();
// Encoding/decoding support for fixup flags.
- class FixupFlagsIsPCRelative: public BitField<bool, 0, 1> {};
- class FixupFlagsUseCodeObject: public BitField<bool, 1, 1> {};
- class FixupFlagsArgumentsCount: public BitField<uint32_t, 2, 32-2> {};
+ class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
+ class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
+ static void FreeThreadResources();
};
}} // namespace v8::internal
}
// Optimize the case where there are no parameters passed.
- if (args.length() == 1) return array->Initialize(4);
+ if (args.length() == 1) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
// Take the arguments as elements.
int number_of_elements = args.length() - 1;
+++ /dev/null
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "cfg.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-CfgGlobals* CfgGlobals::top_ = NULL;
-
-
-CfgGlobals::CfgGlobals(FunctionLiteral* fun)
- : global_fun_(fun),
- global_exit_(new ExitNode()),
- nowhere_(new Nowhere()),
-#ifdef DEBUG
- node_counter_(0),
- temp_counter_(0),
-#endif
- previous_(top_) {
- top_ = this;
-}
-
-
-#define BAILOUT(reason) \
- do { return NULL; } while (false)
-
-Cfg* Cfg::Build() {
- FunctionLiteral* fun = CfgGlobals::current()->fun();
- if (fun->scope()->num_heap_slots() > 0) {
- BAILOUT("function has context slots");
- }
- if (fun->scope()->num_stack_slots() > kBitsPerPointer) {
- BAILOUT("function has too many locals");
- }
- if (fun->scope()->num_parameters() > kBitsPerPointer - 1) {
- BAILOUT("function has too many parameters");
- }
- if (fun->scope()->arguments() != NULL) {
- BAILOUT("function uses .arguments");
- }
-
- ZoneList<Statement*>* body = fun->body();
- if (body->is_empty()) {
- BAILOUT("empty function body");
- }
-
- StatementCfgBuilder builder;
- builder.VisitStatements(body);
- Cfg* graph = builder.graph();
- if (graph == NULL) {
- BAILOUT("unsupported statement type");
- }
- if (graph->is_empty()) {
- BAILOUT("function body produces empty cfg");
- }
- if (graph->has_exit()) {
- BAILOUT("control path without explicit return");
- }
- graph->PrependEntryNode();
- return graph;
-}
-
-#undef BAILOUT
-
-
-void Cfg::PrependEntryNode() {
- ASSERT(!is_empty());
- entry_ = new EntryNode(InstructionBlock::cast(entry()));
-}
-
-
-void Cfg::Append(Instruction* instr) {
- ASSERT(is_empty() || has_exit());
- if (is_empty()) {
- entry_ = exit_ = new InstructionBlock();
- }
- InstructionBlock::cast(exit_)->Append(instr);
-}
-
-
-void Cfg::AppendReturnInstruction(Value* value) {
- Append(new ReturnInstr(value));
- ExitNode* global_exit = CfgGlobals::current()->exit();
- InstructionBlock::cast(exit_)->set_successor(global_exit);
- exit_ = NULL;
-}
-
-
-void Cfg::Concatenate(Cfg* other) {
- ASSERT(is_empty() || has_exit());
- if (other->is_empty()) return;
-
- if (is_empty()) {
- entry_ = other->entry();
- exit_ = other->exit();
- } else {
- // We have a pair of nonempty fragments and this has an available exit.
- // Destructively glue the fragments together.
- InstructionBlock* first = InstructionBlock::cast(exit_);
- InstructionBlock* second = InstructionBlock::cast(other->entry());
- first->instructions()->AddAll(*second->instructions());
- if (second->successor() != NULL) {
- first->set_successor(second->successor());
- exit_ = other->exit();
- }
- }
-}
-
-
-void InstructionBlock::Unmark() {
- if (is_marked_) {
- is_marked_ = false;
- successor_->Unmark();
- }
-}
-
-
-void EntryNode::Unmark() {
- if (is_marked_) {
- is_marked_ = false;
- successor_->Unmark();
- }
-}
-
-
-void ExitNode::Unmark() {
- is_marked_ = false;
-}
-
-
-Handle<Code> Cfg::Compile(Handle<Script> script) {
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler* masm = new MacroAssembler(NULL, kInitialBufferSize);
- entry()->Compile(masm);
- entry()->Unmark();
- CodeDesc desc;
- masm->GetCode(&desc);
- FunctionLiteral* fun = CfgGlobals::current()->fun();
- ZoneScopeInfo info(fun->scope());
- InLoopFlag in_loop = fun->loop_nesting() ? IN_LOOP : NOT_IN_LOOP;
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- Handle<Code> code = Factory::NewCode(desc, &info, flags, masm->CodeObject());
-
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, masm);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code) {
- // Print the source code if available.
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(fun->start_position());
- // fun->end_position() points to the last character in the
- // stream. We need to compensate by adding one to calculate the
- // length.
- int source_len = fun->end_position() - fun->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- PrintF("--- Code ---\n");
- code->Disassemble(*fun->name()->ToCString());
- }
-#endif
-
- return code;
-}
-
-
-void ZeroOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where(TempLocation::STACK);
-}
-
-
-void OneOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where((temp == value_)
- ? TempLocation::ACCUMULATOR
- : TempLocation::STACK);
-}
-
-
-void TwoOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where((temp == value0_ || temp == value1_)
- ? TempLocation::ACCUMULATOR
- : TempLocation::STACK);
-}
-
-
-void PositionInstr::Compile(MacroAssembler* masm) {
- if (FLAG_debug_info && pos_ != RelocInfo::kNoPosition) {
- masm->RecordStatementPosition(pos_);
- masm->RecordPosition(pos_);
- }
-}
-
-
-void MoveInstr::Compile(MacroAssembler* masm) {
- location()->Move(masm, value());
-}
-
-
-// The expression builder should not be used for declarations or statements.
-void ExpressionCfgBuilder::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-#define DEFINE_VISIT(type) \
- void ExpressionCfgBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
-STATEMENT_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-// Macros (temporarily) handling unsupported expression types.
-#define BAILOUT(reason) \
- do { \
- graph_ = NULL; \
- return; \
- } while (false)
-
-void ExpressionCfgBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- BAILOUT("FunctionLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
-}
-
-
-void ExpressionCfgBuilder::VisitSlot(Slot* expr) {
- BAILOUT("Slot");
-}
-
-
-void ExpressionCfgBuilder::VisitVariableProxy(VariableProxy* expr) {
- Expression* rewrite = expr->var()->rewrite();
- if (rewrite == NULL || rewrite->AsSlot() == NULL) {
- BAILOUT("unsupported variable (not a slot)");
- }
- Slot* slot = rewrite->AsSlot();
- if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
- BAILOUT("unsupported slot type (not a parameter or local)");
- }
- // Ignore the passed destination.
- value_ = new SlotLocation(slot->type(), slot->index());
-}
-
-
-void ExpressionCfgBuilder::VisitLiteral(Literal* expr) {
- // Ignore the passed destination.
- value_ = new Constant(expr->handle());
-}
-
-
-void ExpressionCfgBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- BAILOUT("RegExpLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- BAILOUT("ArrayLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
- if (expr->op() != Token::ASSIGN && expr->op() != Token::INIT_VAR) {
- BAILOUT("unsupported compound assignment");
- }
- Expression* lhs = expr->target();
- if (lhs->AsProperty() != NULL) {
- BAILOUT("unsupported property assignment");
- }
-
- Variable* var = lhs->AsVariableProxy()->AsVariable();
- if (var == NULL) {
- BAILOUT("unsupported invalid left-hand side");
- }
- if (var->is_global()) {
- BAILOUT("unsupported global variable");
- }
- Slot* slot = var->slot();
- ASSERT(slot != NULL);
- if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
- BAILOUT("unsupported slot lhs (not a parameter or local)");
- }
-
- // Parameter and local slot assignments.
- ExpressionCfgBuilder builder;
- SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
- builder.Build(expr->value(), loc);
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in assignment");
- }
- // If the expression did not come back in the slot location, append
- // a move to the CFG.
- graph_ = builder.graph();
- if (builder.value() != loc) {
- graph()->Append(new MoveInstr(loc, builder.value()));
- }
- // Record the assignment.
- assigned_vars_.AddElement(loc);
- // Ignore the destination passed to us.
- value_ = loc;
-}
-
-
-void ExpressionCfgBuilder::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
-}
-
-
-void ExpressionCfgBuilder::VisitProperty(Property* expr) {
- ExpressionCfgBuilder object, key;
- object.Build(expr->obj(), NULL);
- if (object.graph() == NULL) {
- BAILOUT("unsupported object subexpression in propload");
- }
- key.Build(expr->key(), NULL);
- if (key.graph() == NULL) {
- BAILOUT("unsupported key subexpression in propload");
- }
-
- if (destination_ == NULL) destination_ = new TempLocation();
-
- graph_ = object.graph();
- // Insert a move to a fresh temporary if the object value is in a slot
- // that's assigned in the key.
- Location* temp = NULL;
- if (object.value()->is_slot() &&
- key.assigned_vars()->Contains(SlotLocation::cast(object.value()))) {
- temp = new TempLocation();
- graph()->Append(new MoveInstr(temp, object.value()));
- }
- graph()->Concatenate(key.graph());
- graph()->Append(new PropLoadInstr(destination_,
- temp == NULL ? object.value() : temp,
- key.value()));
-
- assigned_vars_ = *object.assigned_vars();
- assigned_vars()->Union(key.assigned_vars());
-
- value_ = destination_;
-}
-
-
-void ExpressionCfgBuilder::VisitCall(Call* expr) {
- BAILOUT("Call");
-}
-
-
-void ExpressionCfgBuilder::VisitCallEval(CallEval* expr) {
- BAILOUT("CallEval");
-}
-
-
-void ExpressionCfgBuilder::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
-}
-
-
-void ExpressionCfgBuilder::VisitCallRuntime(CallRuntime* expr) {
- BAILOUT("CallRuntime");
-}
-
-
-void ExpressionCfgBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- Token::Value op = expr->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- BAILOUT("unsupported binary operation");
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- ExpressionCfgBuilder left, right;
- left.Build(expr->left(), NULL);
- if (left.graph() == NULL) {
- BAILOUT("unsupported left subexpression in binop");
- }
- right.Build(expr->right(), NULL);
- if (right.graph() == NULL) {
- BAILOUT("unsupported right subexpression in binop");
- }
-
- if (destination_ == NULL) destination_ = new TempLocation();
-
- graph_ = left.graph();
- // Insert a move to a fresh temporary if the left value is in a
- // slot that's assigned on the right.
- Location* temp = NULL;
- if (left.value()->is_slot() &&
- right.assigned_vars()->Contains(SlotLocation::cast(left.value()))) {
- temp = new TempLocation();
- graph()->Append(new MoveInstr(temp, left.value()));
- }
- graph()->Concatenate(right.graph());
- graph()->Append(new BinaryOpInstr(destination_, op,
- temp == NULL ? left.value() : temp,
- right.value()));
-
- assigned_vars_ = *left.assigned_vars();
- assigned_vars()->Union(right.assigned_vars());
-
- value_ = destination_;
- return;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void ExpressionCfgBuilder::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
-}
-
-#undef BAILOUT
-
-
-// Macros (temporarily) handling unsupported statement types.
-#define BAILOUT(reason) \
- do { \
- graph_ = NULL; \
- return; \
- } while (false)
-
-#define CHECK_BAILOUT() \
- if (graph() == NULL) { return; } else {}
-
-void StatementCfgBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT();
- if (!graph()->has_exit()) return;
- }
-}
-
-
-// The statement builder should not be used for declarations or expressions.
-void StatementCfgBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
-
-#define DEFINE_VISIT(type) \
- void StatementCfgBuilder::Visit##type(type* expr) { UNREACHABLE(); }
-EXPRESSION_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-void StatementCfgBuilder::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void StatementCfgBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- ExpressionCfgBuilder builder;
- builder.Build(stmt->expression(), CfgGlobals::current()->nowhere());
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in expression statement");
- }
- graph()->Append(new PositionInstr(stmt->statement_pos()));
- graph()->Concatenate(builder.graph());
-}
-
-
-void StatementCfgBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void StatementCfgBuilder::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
-}
-
-
-void StatementCfgBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- BAILOUT("ContinueStatement");
-}
-
-
-void StatementCfgBuilder::VisitBreakStatement(BreakStatement* stmt) {
- BAILOUT("BreakStatement");
-}
-
-
-void StatementCfgBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- ExpressionCfgBuilder builder;
- builder.Build(stmt->expression(), NULL);
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in return statement");
- }
-
- graph()->Append(new PositionInstr(stmt->statement_pos()));
- graph()->Concatenate(builder.graph());
- graph()->AppendReturnInstruction(builder.value());
-}
-
-
-void StatementCfgBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void StatementCfgBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
-
-
-void StatementCfgBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void StatementCfgBuilder::VisitLoopStatement(LoopStatement* stmt) {
- BAILOUT("LoopStatement");
-}
-
-
-void StatementCfgBuilder::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void StatementCfgBuilder::VisitTryCatch(TryCatch* stmt) {
- BAILOUT("TryCatch");
-}
-
-
-void StatementCfgBuilder::VisitTryFinally(TryFinally* stmt) {
- BAILOUT("TryFinally");
-}
-
-
-void StatementCfgBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
-}
-
-
-#ifdef DEBUG
-// CFG printing support (via depth-first, preorder block traversal).
-
-void Cfg::Print() {
- entry_->Print();
- entry_->Unmark();
-}
-
-
-void Constant::Print() {
- PrintF("Constant ");
- handle_->Print();
-}
-
-
-void Nowhere::Print() {
- PrintF("Nowhere");
-}
-
-
-void SlotLocation::Print() {
- PrintF("Slot ");
- switch (type_) {
- case Slot::PARAMETER:
- PrintF("(PARAMETER, %d)", index_);
- break;
- case Slot::LOCAL:
- PrintF("(LOCAL, %d)", index_);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Print() {
- PrintF("Temp %d", number());
-}
-
-
-void OneOperandInstruction::Print() {
- PrintF("(");
- location()->Print();
- PrintF(", ");
- value_->Print();
- PrintF(")");
-}
-
-
-void TwoOperandInstruction::Print() {
- PrintF("(");
- location()->Print();
- PrintF(", ");
- value0_->Print();
- PrintF(", ");
- value1_->Print();
- PrintF(")");
-}
-
-
-void MoveInstr::Print() {
- PrintF("Move ");
- OneOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void PropLoadInstr::Print() {
- PrintF("PropLoad ");
- TwoOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void BinaryOpInstr::Print() {
- switch (op()) {
- case Token::OR:
- // Two character operand.
- PrintF("BinaryOp[OR] ");
- break;
- case Token::AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Three character operands.
- PrintF("BinaryOp[%s] ", Token::Name(op()));
- break;
- case Token::COMMA:
- // Five character operand.
- PrintF("BinaryOp[COMMA] ");
- break;
- case Token::BIT_OR:
- // Six character operand.
- PrintF("BinaryOp[BIT_OR] ");
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Seven character operands.
- PrintF("BinaryOp[%s] ", Token::Name(op()));
- break;
- default:
- UNREACHABLE();
- }
- TwoOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void ReturnInstr::Print() {
- PrintF("Return ");
- OneOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void InstructionBlock::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- PrintF("L%d:\n", number());
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- instructions_[i]->Print();
- }
- PrintF("Goto L%d\n\n", successor_->number());
- successor_->Print();
- }
-}
-
-
-void EntryNode::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- successor_->Print();
- }
-}
-
-
-void ExitNode::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- PrintF("L%d:\nExit\n\n", number());
- }
-}
-
-#endif // DEBUG
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CFG_H_
-#define V8_CFG_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-class ExitNode;
-class Location;
-
-// Translate a source AST into a control-flow graph (CFG). The CFG contains
-// single-entry, single-exit blocks of straight-line instructions and
-// administrative nodes.
-//
-// Instructions are described by the following grammar.
-//
-// <Instruction> ::=
-// Move <Location> <Value>
-// | PropLoad <Location> <Value> <Value>
-// | BinaryOp <Location> Token::Value <Value> <Value>
-// | Return Nowhere <Value>
-// | Position <Int>
-//
-// Values are trivial expressions:
-//
-// <Value> ::= Constant | <Location>
-//
-// Locations are storable values ('lvalues'). They can be slots,
-// compiler-generated temporaries, or the special location 'Nowhere'
-// indicating that no value is needed.
-//
-// <Location> ::=
-// SlotLocation Slot::Type <Index>
-// | TempLocation
-// | Nowhere
-
-
-// Administrative nodes: There are several types of 'administrative' nodes
-// that do not contain instructions and do not necessarily have a single
-// predecessor and a single successor.
-//
-// EntryNode: there is a distinguished entry node that has no predecessors
-// and a single successor.
-//
-// ExitNode: there is a distinguished exit node that has arbitrarily many
-// predecessors and no successor.
-//
-// JoinNode: join nodes have multiple predecessors and a single successor.
-//
-// BranchNode: branch nodes have a single predecessor and multiple
-// successors.
-
-
-// A convenient class to keep 'global' values when building a CFG. Since
-// CFG construction can be invoked recursively, CFG globals are stacked.
-class CfgGlobals BASE_EMBEDDED {
- public:
- explicit CfgGlobals(FunctionLiteral* fun);
-
- ~CfgGlobals() { top_ = previous_; }
-
- static CfgGlobals* current() {
- ASSERT(top_ != NULL);
- return top_;
- }
-
- // The function currently being compiled.
- FunctionLiteral* fun() { return global_fun_; }
-
- // The shared global exit node for all exits from the function.
- ExitNode* exit() { return global_exit_; }
-
- // A singleton.
- Location* nowhere() { return nowhere_; }
-
-#ifdef DEBUG
- int next_node_number() { return node_counter_++; }
- int next_temp_number() { return temp_counter_++; }
-#endif
-
- private:
- static CfgGlobals* top_;
- FunctionLiteral* global_fun_;
- ExitNode* global_exit_;
- Location* nowhere_;
-
-#ifdef DEBUG
- // Used to number nodes and temporaries when printing.
- int node_counter_;
- int temp_counter_;
-#endif
-
- CfgGlobals* previous_;
-};
-
-
-class SlotLocation;
-
-// Values represent trivial source expressions: ones with no side effects
-// and that do not require code to be generated.
-class Value : public ZoneObject {
- public:
- virtual ~Value() {}
-
- // Predicates:
-
- virtual bool is_temporary() { return false; }
- virtual bool is_slot() { return false; }
- virtual bool is_constant() { return false; }
-
- // True if the value is a temporary allocated to the stack in
- // fast-compilation mode.
- virtual bool is_on_stack() { return false; }
-
- // Support for fast-compilation mode:
-
- // Move the value into a register.
- virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
- // Push the value on the stack.
- virtual void Push(MacroAssembler* masm) = 0;
-
- // Move the value into a slot location.
- virtual void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-};
-
-
-// A compile-time constant that appeared as a literal in the source AST.
-class Constant : public Value {
- public:
- explicit Constant(Handle<Object> handle) : handle_(handle) {}
-
- // Cast accessor.
- static Constant* cast(Value* value) {
- ASSERT(value->is_constant());
- return reinterpret_cast<Constant*>(value);
- }
-
- // Accessors.
- Handle<Object> handle() { return handle_; }
-
- // Predicates.
- bool is_constant() { return true; }
-
- // Support for fast-compilation mode.
- void Get(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Handle<Object> handle_;
-};
-
-
-// Locations are values that can be stored into ('lvalues').
-class Location : public Value {
- public:
- virtual ~Location() {}
-
- // Static factory function returning the singleton nowhere location.
- static Location* Nowhere() {
- return CfgGlobals::current()->nowhere();
- }
-
- // Support for fast-compilation mode:
-
- // Assumes temporaries have been allocated.
- virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
- // Store the value in a register to the location. Assumes temporaries
- // have been allocated.
- virtual void Set(MacroAssembler* masm, Register reg) = 0;
-
- // Assumes temporaries have been allocated, and if the value is a
- // temporary it was not allocated to the stack.
- virtual void Push(MacroAssembler* masm) = 0;
-
- // Emit code to move a value into this location.
- virtual void Move(MacroAssembler* masm, Value* value) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-};
-
-
-// Nowhere is a special (singleton) location that indicates the value of a
-// computation is not needed (though its side effects are).
-class Nowhere : public Location {
- public:
- // We should not try to emit code to read Nowhere.
- void Get(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
- void Push(MacroAssembler* masm) { UNREACHABLE(); }
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { UNREACHABLE(); }
-
- // Setting Nowhere is ignored.
- void Set(MacroAssembler* masm, Register reg) {}
- void Move(MacroAssembler* masm, Value* value) {}
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Nowhere() {}
-
- friend class CfgGlobals;
-};
-
-
-// SlotLocations represent parameters and stack-allocated (i.e.,
-// non-context) local variables.
-class SlotLocation : public Location {
- public:
- SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
-
- // Cast accessor.
- static SlotLocation* cast(Value* value) {
- ASSERT(value->is_slot());
- return reinterpret_cast<SlotLocation*>(value);
- }
-
- // Accessors.
- Slot::Type type() { return type_; }
- int index() { return index_; }
-
- // Predicates.
- bool is_slot() { return true; }
-
- // Support for fast-compilation mode.
- void Get(MacroAssembler* masm, Register reg);
- void Set(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void Move(MacroAssembler* masm, Value* value);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Slot::Type type_;
- int index_;
-};
-
-
-// TempLocations represent compiler generated temporaries. They are
-// allocated to registers or memory either before code generation (in the
-// optimized-for-speed compiler) or on the fly during code generation (in
-// the optimized-for-space compiler).
-class TempLocation : public Location {
- public:
- // Fast-compilation mode allocation decisions.
- enum Where {
- NOT_ALLOCATED, // Not yet allocated.
- ACCUMULATOR, // Allocated to the dedicated accumulator register.
- STACK // " " " " stack.
- };
-
- TempLocation() : where_(NOT_ALLOCATED) {
-#ifdef DEBUG
- number_ = -1;
-#endif
- }
-
- // Cast accessor.
- static TempLocation* cast(Value* value) {
- ASSERT(value->is_temporary());
- return reinterpret_cast<TempLocation*>(value);
- }
-
- // Accessors.
- Where where() { return where_; }
- void set_where(Where where) {
- ASSERT(where_ == TempLocation::NOT_ALLOCATED);
- where_ = where;
- }
-
- // Predicates.
- bool is_on_stack() { return where_ == STACK; }
- bool is_temporary() { return true; }
-
- // Support for fast-compilation mode. Assume the temp has been allocated.
- void Get(MacroAssembler* masm, Register reg);
- void Set(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void Move(MacroAssembler* masm, Value* value);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_temp_number();
- return number_;
- }
-
- void Print();
-#endif
-
- private:
- Where where_;
-
-#ifdef DEBUG
- int number_;
-#endif
-};
-
-
-// Instructions are computations. The represent non-trivial source
-// expressions: typically ones that have side effects and require code to
-// be generated.
-class Instruction : public ZoneObject {
- public:
- // Accessors.
- Location* location() { return location_; }
- void set_location(Location* location) { location_ = location; }
-
- // Support for fast-compilation mode:
-
- // Emit code to perform the instruction.
- virtual void Compile(MacroAssembler* masm) = 0;
-
- // Allocate a temporary which is the result of the immediate predecessor
- // instruction. It is allocated to the accumulator register if it is used
- // as an operand to this instruction, otherwise to the stack.
- virtual void FastAllocate(TempLocation* temp) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
- // Every instruction has a location where its result is stored (which may
- // be Nowhere).
- explicit Instruction(Location* location) : location_(location) {}
-
- virtual ~Instruction() {}
-
- Location* location_;
-};
-
-
-// Base class of instructions that have no input operands.
-class ZeroOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands (nothing).
- virtual void Print() {}
-#endif
-
- protected:
- explicit ZeroOperandInstruction(Location* loc) : Instruction(loc) {}
-};
-
-
-// Base class of instructions that have a single input operand.
-class OneOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands.
- virtual void Print();
-#endif
-
- protected:
- OneOperandInstruction(Location* loc, Value* value)
- : Instruction(loc), value_(value) {
- }
-
- Value* value_;
-};
-
-
-// Base class of instructions that have two input operands.
-class TwoOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands.
- virtual void Print();
-#endif
-
- protected:
- TwoOperandInstruction(Location* loc, Value* value0, Value* value1)
- : Instruction(loc), value0_(value0), value1_(value1) {
- }
-
- Value* value0_;
- Value* value1_;
-};
-
-
-// A phantom instruction that indicates the start of a statement. It
-// causes the statement position to be recorded in the relocation
-// information but generates no code.
-class PositionInstr : public ZeroOperandInstruction {
- public:
- explicit PositionInstr(int pos)
- : ZeroOperandInstruction(CfgGlobals::current()->nowhere()), pos_(pos) {
- }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
- // This should not be called. The last instruction of the previous
- // statement should not have a temporary as its location.
- void FastAllocate(TempLocation* temp) { UNREACHABLE(); }
-
-#ifdef DEBUG
- // Printing support. Print nothing.
- void Print() {}
-#endif
-
- private:
- int pos_;
-};
-
-
-// Move a value to a location.
-class MoveInstr : public OneOperandInstruction {
- public:
- MoveInstr(Location* loc, Value* value)
- : OneOperandInstruction(loc, value) {
- }
-
- // Accessors.
- Value* value() { return value_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- // Printing support.
- void Print();
-#endif
-};
-
-
-// Load a property from a receiver, leaving the result in a location.
-class PropLoadInstr : public TwoOperandInstruction {
- public:
- PropLoadInstr(Location* loc, Value* object, Value* key)
- : TwoOperandInstruction(loc, object, key) {
- }
-
- // Accessors.
- Value* object() { return value0_; }
- Value* key() { return value1_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// Perform a (non-short-circuited) binary operation on a pair of values,
-// leaving the result in a location.
-class BinaryOpInstr : public TwoOperandInstruction {
- public:
- BinaryOpInstr(Location* loc, Token::Value op, Value* left, Value* right)
- : TwoOperandInstruction(loc, left, right), op_(op) {
- }
-
- // Accessors.
- Value* left() { return value0_; }
- Value* right() { return value1_; }
- Token::Value op() { return op_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Token::Value op_;
-};
-
-
-// Return a value. Has the side effect of moving its value into the return
-// value register. Can only occur as the last instruction in an instruction
-// block, and implies that the block is closed (cannot have instructions
-// appended or graph fragments concatenated to the end) and that the block's
-// successor is the global exit node for the current function.
-class ReturnInstr : public OneOperandInstruction {
- public:
- explicit ReturnInstr(Value* value)
- : OneOperandInstruction(CfgGlobals::current()->nowhere(), value) {
- }
-
- virtual ~ReturnInstr() {}
-
- // Accessors.
- Value* value() { return value_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// Nodes make up control-flow graphs.
-class CfgNode : public ZoneObject {
- public:
- CfgNode() : is_marked_(false) {
-#ifdef DEBUG
- number_ = -1;
-#endif
- }
-
- virtual ~CfgNode() {}
-
- // Because CFGs contain cycles, nodes support marking during traversal
- // (e.g., for printing or compilation). The traversal functions will mark
- // unmarked nodes and backtrack if they encounter a marked one. After a
- // traversal, the graph should be explicitly unmarked by calling Unmark on
- // the entry node.
- bool is_marked() { return is_marked_; }
- virtual void Unmark() = 0;
-
- // Predicates:
-
- // True if the node is an instruction block.
- virtual bool is_block() { return false; }
-
- // Support for fast-compilation mode. Emit the instructions or control
- // flow represented by the node.
- virtual void Compile(MacroAssembler* masm) = 0;
-
-#ifdef DEBUG
- int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_node_number();
- return number_;
- }
-
- virtual void Print() = 0;
-#endif
-
- protected:
- bool is_marked_;
-
-#ifdef DEBUG
- int number_;
-#endif
-};
-
-
-// A block is a single-entry, single-exit block of instructions.
-class InstructionBlock : public CfgNode {
- public:
- InstructionBlock() : successor_(NULL), instructions_(4) {}
-
- virtual ~InstructionBlock() {}
-
- void Unmark();
-
- // Cast accessor.
- static InstructionBlock* cast(CfgNode* node) {
- ASSERT(node->is_block());
- return reinterpret_cast<InstructionBlock*>(node);
- }
-
- bool is_block() { return true; }
-
- // Accessors.
- CfgNode* successor() { return successor_; }
-
- void set_successor(CfgNode* succ) {
- ASSERT(successor_ == NULL);
- successor_ = succ;
- }
-
- ZoneList<Instruction*>* instructions() { return &instructions_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
- // Add an instruction to the end of the block.
- void Append(Instruction* instr) { instructions_.Add(instr); }
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- CfgNode* successor_;
- ZoneList<Instruction*> instructions_;
-};
-
-
-// An entry node (one per function).
-class EntryNode : public CfgNode {
- public:
- explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
-
- virtual ~EntryNode() {}
-
- void Unmark();
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- InstructionBlock* successor_;
-};
-
-
-// An exit node (one per function).
-class ExitNode : public CfgNode {
- public:
- ExitNode() {}
-
- virtual ~ExitNode() {}
-
- void Unmark();
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// A CFG consists of a linked structure of nodes. Nodes are linked by
-// pointing to their successors, always beginning with a (single) entry node
-// (not necessarily of type EntryNode). If it is still possible to add
-// nodes to the end of the graph (i.e., there is a (single) path that does
-// not end with the global exit node), then the CFG has an exit node as
-// well.
-//
-// The empty CFG is represented by a NULL entry and a NULL exit.
-//
-// We use the term 'open fragment' to mean a CFG whose entry and exits are
-// both instruction blocks. It is always possible to add instructions and
-// nodes to the beginning or end of an open fragment.
-//
-// We use the term 'closed fragment' to mean a CFG whose entry is an
-// instruction block and whose exit is NULL (all paths go to the global
-// exit).
-//
-// We use the term 'fragment' to refer to a CFG that is known to be an open
-// or closed fragment.
-class Cfg : public ZoneObject {
- public:
- // Create an empty CFG fragment.
- Cfg() : entry_(NULL), exit_(NULL) {}
-
- // Build the CFG for a function. The returned CFG begins with an
- // EntryNode and all paths end with the ExitNode.
- static Cfg* Build();
-
- // The entry and exit nodes of the CFG (not necessarily EntryNode and
- // ExitNode).
- CfgNode* entry() { return entry_; }
- CfgNode* exit() { return exit_; }
-
- // True if the CFG has no nodes.
- bool is_empty() { return entry_ == NULL; }
-
- // True if the CFG has an available exit node (i.e., it can be appended or
- // concatenated to).
- bool has_exit() { return exit_ != NULL; }
-
- // Add an EntryNode to a CFG fragment. It is no longer a fragment
- // (instructions can no longer be prepended).
- void PrependEntryNode();
-
- // Append an instruction to the end of an open fragment.
- void Append(Instruction* instr);
-
- // Appends a return instruction to the end of an open fragment and make
- // it a closed fragment (the exit's successor becomes global exit node).
- void AppendReturnInstruction(Value* value);
-
- // Glue an other CFG fragment to the end of this (open) fragment.
- void Concatenate(Cfg* other);
-
- // Support for compilation. Compile the entire CFG.
- Handle<Code> Compile(Handle<Script> script);
-
-#ifdef DEBUG
- // Support for printing.
- void Print();
-#endif
-
- private:
- // Entry and exit nodes.
- CfgNode* entry_;
- CfgNode* exit_;
-};
-
-
-// An implementation of a set of locations (currently slot locations), most
-// of the operations are destructive.
-class LocationSet BASE_EMBEDDED {
- public:
- // Construct an empty location set.
- LocationSet() : parameters_(0), locals_(0) {}
-
- // Raw accessors.
- uintptr_t parameters() { return parameters_; }
- uintptr_t locals() { return locals_; }
-
- // Make this the empty set.
- void Empty() {
- parameters_ = locals_ = 0;
- }
-
- // Insert an element.
- void AddElement(SlotLocation* location) {
- if (location->type() == Slot::PARAMETER) {
- // Parameter indexes begin with -1 ('this').
- ASSERT(location->index() < kBitsPerPointer - 1);
- parameters_ |= (1 << (location->index() + 1));
- } else {
- ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kBitsPerPointer);
- locals_ |= (1 << location->index());
- }
- }
-
- // (Destructively) compute the union with another set.
- void Union(LocationSet* other) {
- parameters_ |= other->parameters();
- locals_ |= other->locals();
- }
-
- bool Contains(SlotLocation* location) {
- if (location->type() == Slot::PARAMETER) {
- ASSERT(location->index() < kBitsPerPointer - 1);
- return (parameters_ & (1 << (location->index() + 1)));
- } else {
- ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kBitsPerPointer);
- return (locals_ & (1 << location->index()));
- }
- }
-
- private:
- uintptr_t parameters_;
- uintptr_t locals_;
-};
-
-
-// An ExpressionCfgBuilder traverses an expression and returns an open CFG
-// fragment (currently a possibly empty list of instructions represented by
-// a singleton instruction block) and the expression's value.
-//
-// Failure to build the CFG is indicated by a NULL CFG.
-class ExpressionCfgBuilder : public AstVisitor {
- public:
- ExpressionCfgBuilder() : destination_(NULL), value_(NULL), graph_(NULL) {}
-
- // Result accessors.
- Value* value() { return value_; }
- Cfg* graph() { return graph_; }
- LocationSet* assigned_vars() { return &assigned_vars_; }
-
- // Build the cfg for an expression and remember its value. The
- // destination is a 'hint' where the value should go which may be ignored.
- // NULL is used to indicate no preference.
- //
- // Concretely, if the expression needs to generate a temporary for its
- // value, it should use the passed destination or generate one if NULL.
- void Build(Expression* expr, Location* destination) {
- value_ = NULL;
- graph_ = new Cfg();
- assigned_vars_.Empty();
- destination_ = destination;
- Visit(expr);
- }
-
- // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- // State for the visitor. Input parameter:
- Location* destination_;
-
- // Output parameters:
- Value* value_;
- Cfg* graph_;
- LocationSet assigned_vars_;
-};
-
-
-// A StatementCfgBuilder maintains a CFG fragment accumulator. When it
-// visits a statement, it concatenates the CFG for the statement to the end
-// of the accumulator.
-class StatementCfgBuilder : public AstVisitor {
- public:
- StatementCfgBuilder() : graph_(new Cfg()) {}
-
- Cfg* graph() { return graph_; }
-
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- // State for the visitor. Input/output parameter:
- Cfg* graph_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CFG_H_
}
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- int pos = fun->start_position();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
+static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+ if (pos != RelocInfo::kNoPosition) {
+ cgen->masm()->RecordStatementPosition(pos);
+ cgen->masm()->RecordPosition(pos);
}
}
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+}
+
+
void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- int pos = fun->end_position();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
- }
+ if (FLAG_debug_info) RecordPositions(this, fun->end_position());
}
-void CodeGenerator::CodeForStatementPosition(AstNode* node) {
- if (FLAG_debug_info) {
- int pos = node->statement_pos();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
- }
+void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
+ if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
}
void CodeGenerator::CodeForSourcePosition(int pos) {
- if (FLAG_debug_info) {
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordPosition(pos);
- }
+ if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ masm()->RecordPosition(pos);
}
}
#include "v8.h"
#include "bootstrapper.h"
-#include "cfg.h"
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
return Handle<Code>::null();
}
- if (FLAG_multipass) {
- CfgGlobals scope(literal);
- Cfg* cfg = Cfg::Build();
-#ifdef DEBUG
- if (FLAG_print_cfg && cfg != NULL) {
- SmartPointer<char> name = literal->name()->ToCString();
- PrintF("Function \"%s\":\n", *name);
- cfg->Print();
- PrintF("\n");
- }
-#endif
- if (cfg != NULL) {
- return cfg->Compile(script);
- }
- }
-
// Generate code and return it.
Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
return result;
ScriptDataImpl* pre_data) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- // Make sure we have an initial stack limit.
- StackGuard guard;
PostponeInterruptsScope postpone;
ASSERT(!i::Top::global_context().is_null());
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
- // Make sure we have an initial stack limit.
- StackGuard guard;
PostponeInterruptsScope postpone;
// Compute name, source code and script data.
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
+ listening_->Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
}
+void DebuggerAgent::WaitUntilListening() {
+ listening_->Wait();
+}
+
void DebuggerAgent::CreateSession(Socket* client) {
ScopedLock with(session_access_);
: name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)) {
+ terminate_now_(OS::CreateSemaphore(0)),
+ listening_(OS::CreateSemaphore(0)) {
ASSERT(instance_ == NULL);
instance_ = this;
}
}
void Shutdown();
+ void WaitUntilListening();
private:
void Run();
Mutex* session_access_; // Mutex guarging access to session_.
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
+ Semaphore* listening_;
static DebuggerAgent* instance_;
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// jsminify this file, js2c: jsmin
-
// Default number of frames to include in the response to backtrace request.
const kDefaultBacktraceLength = 10;
// Regular expression to skip "crud" at the beginning of a source line which is
// not really code. Currently the regular expression matches whitespace and
// comments.
-const sourceLineBeginningSkip = /^(?:[ \v\h]*(?:\/\*.*?\*\/)*)*/;
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
// Debug events which can occour in the V8 JavaScript engine. These originate
// from the API include file debug.h.
if (!script.sourceColumnStart_) {
script.sourceColumnStart_ = new Array(script.lineCount());
}
-
+
// Fill cache if needed and get column where the actual source starts.
if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
script.sourceColumnStart_[line] =
// Convert the line and column into an absolute position within the script.
var pos = Debug.findScriptSourcePosition(script, this.line(), column);
-
+
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
if (pos === null) return;
-
+
// Create a break point object and set the break point.
break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount());
// Returns the character position in a script based on a line number and an
// optional position within that line.
Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = script.locationFromLine(opt_line, opt_column);
+ var location = script.locationFromLine(opt_line, opt_column);
return location ? location.position : null;
}
o.body = { uncaught: this.uncaught_,
exception: MakeMirror(this.exception_)
};
-
+
// Exceptions might happen whithout any JavaScript frames.
if (this.exec_state_.frameCount() > 0) {
o.body.sourceLine = this.sourceLine();
function ProtocolMessage(request) {
// Update sequence number.
this.seq = next_response_seq++;
-
+
if (request) {
// If message is based on a request this is a response. Fill the initial
// response from the request.
response.failed('Missing argument "groupId"');
return;
}
-
+
var cleared_break_points = [];
var new_script_break_points = [];
for (var i = 0; i < script_break_points.length; i++) {
if (index < 0 || this.exec_state_.frameCount() <= index) {
return response.failed('Invalid frame number');
}
-
+
this.exec_state_.setSelectedFrame(request.arguments.number);
}
response.body = this.exec_state_.frame();
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
-
+
// Fill all scopes for this frame.
var total_scopes = frame.scopeCount();
var scopes = [];
includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
-
+
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
}
+void Debugger::WaitForAgent() {
+ if (agent_ != NULL)
+ agent_->WaitUntilListening();
+}
+
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
static char* ArchiveDebug(char* to);
static char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
+ static void FreeThreadResources() { }
// Mirror cache handling.
static void ClearMirrorCache();
// Stop the debugger agent.
static void StopAgent();
+ // Blocks until the agent has started listening for connections
+ static void WaitForAgent();
+
// Unload the debugger if possible. Only called when no debugger is currently
// active.
static void UnloadDebugger();
// Entering JavaScript.
VMState state(JS);
- // Guard the stack against too much recursion.
- StackGuard guard;
-
// Placeholder for return value.
Object* value = reinterpret_cast<Object*>(kZapValue);
StackGuard::ThreadLocal StackGuard::thread_local_;
-StackGuard::StackGuard() {
- // NOTE: Overall the StackGuard code assumes that the stack grows towards
- // lower addresses.
- ExecutionAccess access;
- if (thread_local_.nesting_++ == 0) {
- // Initial StackGuard is being set. We will set the stack limits based on
- // the current stack pointer allowing the stack to grow kLimitSize from
- // here.
-
- // Ensure that either the stack limits are unset (kIllegalLimit) or that
- // they indicate a pending interruption. The interrupt limit will be
- // temporarily reset through the code below and reestablished if the
- // interrupt flags indicate that an interrupt is pending.
- ASSERT(thread_local_.jslimit_ == kIllegalLimit ||
- (thread_local_.jslimit_ == kInterruptLimit &&
- thread_local_.interrupt_flags_ != 0));
- ASSERT(thread_local_.climit_ == kIllegalLimit ||
- (thread_local_.climit_ == kInterruptLimit &&
- thread_local_.interrupt_flags_ != 0));
-
- uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
- thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
- Heap::SetStackLimit(limit);
- // NOTE: The check for overflow is not safe as there is no guarantee that
- // the running thread has its stack in all memory up to address 0x00000000.
- thread_local_.initial_climit_ = thread_local_.climit_ =
- reinterpret_cast<uintptr_t>(this) >= kLimitSize ?
- reinterpret_cast<uintptr_t>(this) - kLimitSize : 0;
-
- if (thread_local_.interrupt_flags_ != 0) {
- set_limits(kInterruptLimit, access);
- }
- }
- // Ensure that proper limits have been set.
- ASSERT(thread_local_.jslimit_ != kIllegalLimit &&
- thread_local_.climit_ != kIllegalLimit);
- ASSERT(thread_local_.initial_jslimit_ != kIllegalLimit &&
- thread_local_.initial_climit_ != kIllegalLimit);
-}
-
-
-StackGuard::~StackGuard() {
- ExecutionAccess access;
- if (--thread_local_.nesting_ == 0) {
- set_limits(kIllegalLimit, access);
- }
-}
-
-
bool StackGuard::IsStackOverflow() {
ExecutionAccess access;
return (thread_local_.jslimit_ != kInterruptLimit &&
ExecutionAccess access;
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
- thread_local_.jslimit_ = limit;
- Heap::SetStackLimit(limit);
+ thread_local_.jslimit_ = jslimit;
+ Heap::SetStackLimit(jslimit);
}
if (thread_local_.climit_ == thread_local_.initial_climit_) {
thread_local_.climit_ = limit;
}
thread_local_.initial_climit_ = limit;
- thread_local_.initial_jslimit_ = limit;
+ thread_local_.initial_jslimit_ = jslimit;
}
}
+static internal::Thread::LocalStorageKey stack_limit_key =
+ internal::Thread::CreateThreadLocalKey();
+
+
+void StackGuard::FreeThreadResources() {
+ Thread::SetThreadLocal(
+ stack_limit_key,
+ reinterpret_cast<void*>(thread_local_.initial_climit_));
+}
+
+
+void StackGuard::ThreadLocal::Clear() {
+ initial_jslimit_ = kIllegalLimit;
+ jslimit_ = kIllegalLimit;
+ initial_climit_ = kIllegalLimit;
+ climit_ = kIllegalLimit;
+ nesting_ = 0;
+ postpone_interrupts_nesting_ = 0;
+ interrupt_flags_ = 0;
+ Heap::SetStackLimit(kIllegalLimit);
+}
+
+
+void StackGuard::ThreadLocal::Initialize() {
+ if (initial_climit_ == kIllegalLimit) {
+ // Takes the address of the limit variable in order to find out where
+ // the top of stack is right now.
+ intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+ initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ initial_climit_ = limit;
+ climit_ = limit;
+ Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+ }
+ nesting_ = 0;
+ postpone_interrupts_nesting_ = 0;
+ interrupt_flags_ = 0;
+}
+
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+ thread_local_.Clear();
+}
+
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+ thread_local_.Initialize();
+ void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+ // You should hold the ExecutionAccess lock when you call this.
+ if (stored_limit != NULL) {
+ StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+ }
+}
+
+
// --- C a l l s t o n a t i v e s ---
#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
class ExecutionAccess;
-// Stack guards are used to limit the number of nested invocations of
-// JavaScript and the stack size used in each invocation.
-class StackGuard BASE_EMBEDDED {
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class StackGuard : public AllStatic {
public:
- StackGuard();
-
- ~StackGuard();
-
+ // Pass the address beyond which the stack should not grow. The stack
+ // is assumed to grow downwards.
static void SetStackLimit(uintptr_t limit);
static Address address_of_jslimit() {
static char* ArchiveStackGuard(char* to);
static char* RestoreStackGuard(char* from);
static int ArchiveSpacePerThread();
+ static void FreeThreadResources();
+ // Sets up the default stack guard for this thread if it has not
+ // already been set up.
+ static void InitThread(const ExecutionAccess& lock);
+ // Clears the stack guard for this thread so it does not look as if
+ // it has been set up.
+ static void ClearThread(const ExecutionAccess& lock);
static bool IsStackOverflow();
static bool IsPreempted();
#endif
static void Continue(InterruptFlag after_what);
+ // This provides an asynchronous read of the stack limit for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ static uintptr_t climit() {
+ return thread_local_.climit_;
+ }
+
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
// You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock);
- // This provides an asynchronous read of the stack limit for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- static uintptr_t climit() {
- return thread_local_.climit_;
- }
-
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
Heap::SetStackLimit(value);
// Reset limits to initial values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
static void reset_limits(const ExecutionAccess& lock) {
- if (thread_local_.nesting_ == 0) {
- // No limits have been set yet.
- set_limits(kIllegalLimit, lock);
- } else {
- thread_local_.jslimit_ = thread_local_.initial_jslimit_;
- Heap::SetStackLimit(thread_local_.jslimit_);
- thread_local_.climit_ = thread_local_.initial_climit_;
- }
+ thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+ Heap::SetStackLimit(thread_local_.jslimit_);
+ thread_local_.climit_ = thread_local_.initial_climit_;
}
// Enable or disable interrupts.
static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xffffffffffffffff);
+ static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xffffffff;
+ static const uintptr_t kIllegalLimit = 0xfffffff8;
#endif
class ThreadLocal {
public:
- ThreadLocal()
- : initial_jslimit_(kIllegalLimit),
- jslimit_(kIllegalLimit),
- initial_climit_(kIllegalLimit),
- climit_(kIllegalLimit),
- nesting_(0),
- postpone_interrupts_nesting_(0),
- interrupt_flags_(0) {
- Heap::SetStackLimit(kIllegalLimit);
- }
+ ThreadLocal() { Clear(); }
+ // You should hold the ExecutionAccess lock when you call Initialize or
+ // Clear.
+ void Initialize();
+ void Clear();
uintptr_t initial_jslimit_;
uintptr_t jslimit_;
uintptr_t initial_climit_;
}
+Handle<String> Factory::NumberToString(Handle<Object> number) {
+ CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+}
+
+
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
Handle<Object> value,
PropertyAttributes attributes);
+ static Handle<String> NumberToString(Handle<Object> number);
+
enum ApiInstanceType {
JavaScriptObject,
InnerGlobalObject,
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"Minimum length for automatic enable preparsing")
-DEFINE_bool(multipass, false, "use the multipass code generator")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_cfg, false, "print control-flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
#include "accessors.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
int HandleScope::NumberOfHandles() {
- int n = HandleScopeImplementer::instance()->Blocks()->length();
+ int n = HandleScopeImplementer::instance()->blocks()->length();
if (n == 0) return 0;
return ((n - 1) * kHandleBlockSize) +
- (current_.next - HandleScopeImplementer::instance()->Blocks()->last());
+ (current_.next - HandleScopeImplementer::instance()->blocks()->last());
}
HandleScopeImplementer* impl = HandleScopeImplementer::instance();
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
- if (!impl->Blocks()->is_empty()) {
- Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
+ if (!impl->blocks()->is_empty()) {
+ Object** limit = &impl->blocks()->last()[kHandleBlockSize];
if (current_.limit != limit) {
current_.limit = limit;
}
result = impl->GetSpareOrNewBlock();
// Add the extension to the global list of blocks, but count the
// extension as part of the current scope.
- impl->Blocks()->Add(result);
+ impl->blocks()->Add(result);
current_.extensions++;
current_.limit = &result[kHandleBlockSize];
}
}
+void CustomArguments::IterateInstance(ObjectVisitor* v) {
+ v->VisitPointers(values_, values_ + 4);
+}
+
+
// Compute the property keys from the interceptor.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- Handle<Object> data(interceptor->data());
- v8::AccessorInfo info(
- v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(object));
+ CustomArguments args(interceptor->data(), *receiver, *object);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- Handle<Object> data(interceptor->data());
- v8::AccessorInfo info(
- v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(object));
+ CustomArguments args(interceptor->data(), *receiver, *object);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
if (obj->IsJSObject()) {
JSObject* js_obj = JSObject::cast(obj);
String* constructor = JSObject::cast(js_obj)->constructor_name();
- // Differentiate Array, Function, and Object instances.
+ // Differentiate Object and Array instances.
if (fine_grain && (constructor == Heap::Object_symbol() ||
- constructor == Heap::Array_symbol() ||
- constructor == Heap::function_class_symbol())) {
+ constructor == Heap::Array_symbol())) {
return JSObjectsCluster(constructor, obj);
} else {
return JSObjectsCluster(constructor);
};
-class RetainerTreePrinter BASE_EMBEDDED {
+// Visitor for printing a cluster tree.
+class ClusterTreePrinter BASE_EMBEDDED {
public:
- explicit RetainerTreePrinter(StringStream* stream) : stream_(stream) {}
+ explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) {
Print(stream_, cluster, number_and_size);
}
static void Print(StringStream* stream,
const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& numNNber_and_size);
+ const NumberAndSizeInfo& number_and_size);
private:
StringStream* stream_;
};
-void RetainerTreePrinter::Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
+void ClusterTreePrinter::Print(StringStream* stream,
+ const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
stream->Put(',');
cluster.Print(stream);
stream->Add(";%d", number_and_size.number());
}
+// Visitor for printing a retainer tree.
+class SimpleRetainerTreePrinter BASE_EMBEDDED {
+ public:
+ explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
+ : printer_(printer) {}
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+ RetainerHeapProfile::Printer* printer_;
+};
+
+
+void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ ClusterTreePrinter retainers_printer(&stream);
+ tree->ForEach(&retainers_printer);
+ printer_->PrintRetainers(cluster, stream);
+}
+
+
+// Visitor for aggregating references count of equivalent clusters.
+class RetainersAggregator BASE_EMBEDDED {
+ public:
+ RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
+ : coarser_(coarser), dest_tree_(dest_tree) {}
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ ClustersCoarser* coarser_;
+ JSObjectsClusterTree* dest_tree_;
+};
+
+
+void RetainersAggregator::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+ if (eq.is_null()) eq = cluster;
+ JSObjectsClusterTree::Locator loc;
+ dest_tree_->Insert(eq, &loc);
+ NumberAndSizeInfo aggregated_number = loc.value();
+ aggregated_number.increment_number(number_and_size.number());
+ loc.set_value(aggregated_number);
+}
+
+
+// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
+class AggregatingRetainerTreePrinter BASE_EMBEDDED {
+ public:
+ AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
+ RetainerHeapProfile::Printer* printer)
+ : coarser_(coarser), printer_(printer) {}
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+ ClustersCoarser* coarser_;
+ RetainerHeapProfile::Printer* printer_;
+};
+
+
+void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
+ JSObjectsClusterTree dest_tree_;
+ RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
+ tree->ForEach(&retainers_aggregator);
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ ClusterTreePrinter retainers_printer(&stream);
+ dest_tree_.ForEach(&retainers_printer);
+ printer_->PrintRetainers(cluster, stream);
+}
+
+
+// A helper class for building a retainers tree, that aggregates
+// all equivalent clusters.
+class RetainerTreeAggregator BASE_EMBEDDED {
+ public:
+ explicit RetainerTreeAggregator(ClustersCoarser* coarser)
+ : coarser_(coarser) {}
+ void Process(JSObjectsRetainerTree* input_tree) {
+ input_tree->ForEach(this);
+ }
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+ JSObjectsRetainerTree& output_tree() { return output_tree_; }
+
+ private:
+ ClustersCoarser* coarser_;
+ JSObjectsRetainerTree output_tree_;
+};
+
+
+void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+ if (eq.is_null()) return;
+ JSObjectsRetainerTree::Locator loc;
+ if (output_tree_.Insert(eq, &loc)) {
+ loc.set_value(new JSObjectsClusterTree());
+ }
+ RetainersAggregator retainers_aggregator(coarser_, loc.value());
+ tree->ForEach(&retainers_aggregator);
+}
+
} // namespace
accumulator->Add("(roots)");
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
accumulator->Add("(global property)");
+ } else if (constructor_ == FromSpecialCase(SELF)) {
+ accumulator->Add("(self)");
} else {
SmartPointer<char> s_name(
constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
ClustersCoarser::ClustersCoarser()
- : zscope_(DELETE_ON_EXIT),
- sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
- current_pair_(NULL) {
+ : zscope_(DELETE_ON_EXIT),
+ sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
+ current_pair_(NULL),
+ current_set_(NULL),
+ self_(NULL) {
}
ASSERT(current_pair_ == NULL);
current_pair_ = &pair;
current_set_ = new JSObjectsRetainerTree();
+ self_ = &cluster;
tree->ForEach(this);
sim_list_.Add(pair);
current_pair_ = NULL;
current_set_ = NULL;
+ self_ = NULL;
}
const NumberAndSizeInfo& number_and_size) {
ASSERT(current_pair_ != NULL);
ASSERT(current_set_ != NULL);
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ ASSERT(self_ != NULL);
JSObjectsRetainerTree::Locator loc;
+ if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
+ current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
+ return;
+ }
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
if (!eq.is_null()) {
if (current_set_->Find(eq, &loc)) return;
current_pair_->refs.Add(eq);
int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
tree->ForEach(this);
- // To sort similarity list properly, references list of a cluster is
- // required to be sorted, thus 'O1 <- A, B' and 'O2 <- B, A' would
- // be considered equivalent. But we don't sort them explicitly
- // because we know that they come from a splay tree traversal, so
- // they are already sorted.
+ sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
sim_list_.Sort(ClusterBackRefsCmp);
return FillEqualityTree();
}
bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
// Return true for coarsible clusters that have a non-identical equivalent.
- return cluster.can_be_coarsed() &&
- JSObjectsCluster::Compare(cluster, GetCoarseEquivalent(cluster)) != 0;
+ if (!cluster.can_be_coarsed()) return false;
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
}
RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(DELETE_ON_EXIT),
- coarse_cluster_tree_(NULL),
- current_printer_(NULL),
- current_stream_(NULL) {
+ : zscope_(DELETE_ON_EXIT) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
Heap::IterateRoots(&extractor);
void RetainerHeapProfile::DebugPrintStats(
RetainerHeapProfile::Printer* printer) {
coarser_.Process(&retainers_tree_);
- ASSERT(current_printer_ == NULL);
- current_printer_ = printer;
- retainers_tree_.ForEach(this);
- current_printer_ = NULL;
+ // Print clusters that have no equivalents, aggregating their retainers.
+ AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
+ retainers_tree_.ForEach(&agg_printer);
+ // Now aggregate clusters that have equivalents...
+ RetainerTreeAggregator aggregator(&coarser_);
+ aggregator.Process(&retainers_tree_);
+ // ...and print them.
+ SimpleRetainerTreePrinter s_printer(printer);
+ aggregator.output_tree().ForEach(&s_printer);
}
}
-void RetainerHeapProfile::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- // First level of retainer graph.
- if (coarser_.HasAnEquivalent(cluster)) return;
- ASSERT(current_stream_ == NULL);
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- current_stream_ = &stream;
- ASSERT(coarse_cluster_tree_ == NULL);
- coarse_cluster_tree_ = new JSObjectsClusterTree();
- tree->ForEach(this);
- // Print aggregated counts and sizes.
- RetainerTreePrinter printer(current_stream_);
- coarse_cluster_tree_->ForEach(&printer);
- coarse_cluster_tree_ = NULL;
- current_printer_->PrintRetainers(cluster, stream);
- current_stream_ = NULL;
-}
-
-
-void RetainerHeapProfile::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ASSERT(coarse_cluster_tree_ != NULL);
- ASSERT(current_stream_ != NULL);
- JSObjectsCluster eq = coarser_.GetCoarseEquivalent(cluster);
- if (eq.is_null()) {
- RetainerTreePrinter::Print(current_stream_, cluster, number_and_size);
- } else {
- // Aggregate counts and sizes for equivalent clusters.
- JSObjectsClusterTree::Locator loc;
- coarse_cluster_tree_->Insert(eq, &loc);
- NumberAndSizeInfo eq_number_and_size = loc.value();
- eq_number_and_size.increment_number(number_and_size.number());
- loc.set_value(eq_number_and_size);
- }
-}
-
-
//
// HeapProfiler class implementation.
//
// These special cases are used in retainer profile.
enum SpecialCase {
ROOTS = 1,
- GLOBAL_PROPERTY = 2
+ GLOBAL_PROPERTY = 2,
+ SELF = 3 // This case is used in ClustersCoarser only.
};
JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
(a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
: cons_cmp;
}
+ static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
+ return Compare(*a, *b);
+ }
bool is_null() const { return constructor_ == NULL; }
bool can_be_coarsed() const { return instance_ != NULL; }
switch (special) {
case ROOTS: return Heap::result_symbol();
case GLOBAL_PROPERTY: return Heap::code_symbol();
+ case SELF: return Heap::catch_var_symbol();
default:
UNREACHABLE();
return NULL;
ClusterBackRefs& operator=(const ClusterBackRefs& src);
static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
+ void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
+ static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
JSObjectsCluster cluster;
ZoneList<JSObjectsCluster> refs;
EqualityTree eq_tree_;
ClusterBackRefs* current_pair_;
JSObjectsRetainerTree* current_set_;
+ const JSObjectsCluster* self_;
};
void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
private:
- // Limit on the number of retainers to be printed per cluster.
- static const int kMaxRetainersToPrint = 50;
ZoneScope zscope_;
JSObjectsRetainerTree retainers_tree_;
ClustersCoarser coarser_;
- // TODO(mnaganov): Use some helper class to hold these state variables.
- JSObjectsClusterTree* coarse_cluster_tree_;
- Printer* current_printer_;
- StringStream* current_stream_;
- public:
- // Used by JSObjectsRetainerTree::ForEach.
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
};
int Heap::semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
+size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
int Heap::semispace_size_ = 16*MB;
int Heap::old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB;
+size_t Heap::code_range_size_ = 256*MB;
#else
int Heap::semispace_size_ = 8*MB;
int Heap::old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
+size_t Heap::code_range_size_ = 0;
#endif
GCCallback Heap::global_gc_prologue_callback_ = NULL;
DisableAssertNoAllocation allow_allocation;
GlobalHandles::PostGarbageCollectionProcessing();
}
- // Update flat string readers.
- FlatStringReader::PostGarbageCollectionProcessing();
+ // Update relocatables.
+ Relocatable::PostGarbageCollectionProcessing();
}
// spaces.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
Object* Heap::AllocateHeapNumber(double value) {
// Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
+ if (always_allocate()) return AllocateHeapNumber(value, TENURED);
+
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
}
+Object* Heap::NumberToString(Object* number) {
+ Object* cached = GetNumberStringCache(number);
+ if (cached != undefined_value()) {
+ return cached;
+ }
+
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str;
+ if (number->IsSmi()) {
+ int num = Smi::cast(number)->value();
+ str = IntToCString(num, buffer);
+ } else {
+ double num = HeapNumber::cast(number)->value();
+ str = DoubleToCString(num, buffer);
+ }
+ Object* result = AllocateStringFromAscii(CStrVector(str));
+
+ if (!result->IsFailure()) {
+ SetNumberStringCache(number, String::cast(result));
+ }
+ return result;
+}
+
+
Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
return SmiOrNumberFromDouble(value,
true /* number object must be new */,
AllocationSpace space =
size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = LO_SPACE;
+
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object
HeapObject::cast(result)->set_map(code_map());
Code* code = Code::cast(result);
+ ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_size(desc.reloc_size);
code->set_sinfo_size(sinfo_size);
obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
+ ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
}
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
int size = SeqAsciiString::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
int size = SeqTwoByteString::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
Object* Heap::AllocateRawFixedArray(int length) {
// Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
+ if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
ASSERT(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
+ // New space can't cope with forced allocation.
+ if (always_allocate()) pretenure = TENURED;
+
int size = FixedArray::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (pretenure != TENURED) {
SYNCHRONIZE_TAG("bootstrapper");
Top::Iterate(v);
SYNCHRONIZE_TAG("top");
+ Relocatable::Iterate(v);
+ SYNCHRONIZE_TAG("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v);
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
+ // On 64-bit platform(s), we put all code objects in a 2 GB range of
+ // virtual address space, so that they can call each other with near calls.
+ if (code_range_size_ > 0) {
+ if (!CodeRange::Setup(code_range_size_)) {
+ return false;
+ }
+ }
+
code_space_ =
new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
kRootListLength
};
+ static Object* NumberToString(Object* number);
+
private:
static int semispace_size_;
static int initial_semispace_size_;
static int young_generation_size_;
static int old_generation_size_;
+ static size_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return *reinterpret_cast<Object**>(pc_);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(pc_);
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
+ return &Memory::Object_at(pc_);
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- *reinterpret_cast<Object**>(pc_) = target;
+ Memory::Object_at(pc_) = target;
}
__ mov(Operand::StaticVariable(passed), edi);
// The actual argument count has already been loaded into register
- // eax, but JumpToBuiltin expects eax to contain the number of
+ // eax, but JumpToRuntime expects eax to contain the number of
// arguments including the receiver.
__ inc(eax);
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
}
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
- __ AllocateObjectInNewSpace(edi,
- ebx,
- edi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
- __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// ebx: JSObject
// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter holes is larger than zero an elements backing
-// store is allocated with this size and filled with the hole values. Otherwise
-// the elements backing store is set to the empty FixedArray.
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
- int holes,
+ int initial_capacity,
Label* gc_required) {
- ASSERT(holes >= 0);
+ ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ mov(scratch1, FieldOperand(array_function,
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
- if (holes > 0) {
- size += FixedArray::SizeFor(holes);
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
}
- __ AllocateObjectInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// If no storage is requested for the elements array just set the empty
// fixed array.
- if (holes == 0) {
+ if (initial_capacity == 0) {
__ mov(FieldOperand(result, JSArray::kElementsOffset),
Factory::empty_fixed_array());
return;
// scratch2: start of next object
__ mov(FieldOperand(scratch1, JSObject::kMapOffset),
Factory::fixed_array_map());
- __ mov(FieldOperand(scratch1, Array::kLengthOffset), Immediate(holes));
+ __ mov(FieldOperand(scratch1, Array::kLengthOffset),
+ Immediate(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- if (holes <= kLoopUnfoldLimit) {
+ if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
__ mov(scratch3, Factory::the_hole_value());
- for (int i = 0; i < holes; i++) {
+ for (int i = 0; i < initial_capacity; i++) {
__ mov(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
scratch3);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateObjectInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
__ jmp(&allocated);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(¬_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ AllocateObjectInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size, // array_size is a smi.
- array_size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
+++ /dev/null
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-ia32.h"
-#include "macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Label deferred_enter, deferred_exit;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ push(ebp);
- __ mov(ebp, esp);
- __ push(esi);
- __ push(edi);
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ Set(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < count; i++) {
- __ push(eax);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, &deferred_enter);
- __ bind(&deferred_exit);
- }
- }
- successor_->Compile(masm);
- if (FLAG_check_stack) {
- Comment cmnt(masm, "[ Deferred Stack Check");
- __ bind(&deferred_enter);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ jmp(&deferred_exit);
- }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
- __ mov(esp, ebp);
- __ pop(ebp);
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ret((count + 1) * kPointerSize);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
- // A test eax instruction after the call indicates to the IC code that it
- // was inlined. Ensure there is not one here.
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ pop(ebx); // Discard key.
- } else {
- key()->Get(masm, ecx);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- __ pop(ebx); // Discard receiver.
- location()->Set(masm, eax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Push both operands and call the specialized stub.
- if (!left()->is_on_stack()) left()->Push(masm);
- right()->Push(masm);
- GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
- __ CallStub(&stub);
- location()->Set(masm, eax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value_->Get(masm, eax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Immediate(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ push(Immediate(handle_));
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(ebp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ToOperand(loc), Immediate(handle_));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ mov(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // We dispatch to the value because in some cases (temp or constant)
- // we can use a single instruction.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- // The accumulator is not live across a MoveInstr.
- __ mov(eax, ToOperand(this));
- __ mov(ToOperand(loc), eax);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(eax)) __ mov(reg, eax);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(eax)) __ mov(eax, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(eax);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, eax);
- break;
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ mov(ToOperand(loc), eax);
- break;
- case STACK:
- __ pop(ToOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
+#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
}
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatementPosition(node);
-
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
frame_->Push(&result);
void CodeGenerator::VisitCall(Call* node) {
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ mov(result.reg(),
+ FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
-
- // Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ mov(result.reg(),
- FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
- frame_->SetElementAt(arg_count, &result);
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-}
-
-
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Register scratch2,
Register result) {
// Allocate heap number in new space.
- __ AllocateObjectInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Set the map.
__ mov(FieldOperand(result, HeapObject::kMapOffset),
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
__ sar(ebx, kSmiTagSize); // Untag the index.
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
+ __ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
{ // Clamp the value to [0..255].
- Label done, check_255;
- __ cmp(eax, 0);
- __ j(greater_equal, &check_255);
- __ mov(eax, Immediate(0));
- __ jmp(&done);
- __ bind(&check_255);
- __ cmp(eax, 255);
- __ j(less_equal, &done);
+ Label done, is_negative;
+ __ test(eax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ j(negative, &is_negative);
__ mov(eax, Immediate(255));
+ __ jmp(&done);
+ __ bind(&is_negative);
+ __ xor_(eax, Operand(eax)); // Clear eax.
__ bind(&done);
}
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ mov(eax, edx); // Return the original value.
__ ret(0);
// Extra capacity case: Check if there is extra capacity to
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
}
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(1);
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the contnt of result is known to be
// the allocation top on entry (could be result_end from a previous call to
- // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& ext);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& ext);
// ---------------------------------------------------------------------------
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
+#include "allocation.h"
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
// ebx: initial map
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ AllocateObjectInNewSpace(ecx,
- edx,
- ecx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(ecx,
+ edx,
+ ecx,
+ no_reg,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); }
+ // Deallocates memory used by the list and leaves the list in a consistent
+ // empty state.
+ void Free() {
+ DeleteData(data_);
+ Initialize(0);
+ }
+
INLINE(void* operator new(size_t size)) { return P::New(size); }
INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
void Log::Close() {
if (Write == WriteToFile) {
- fclose(output_handle_);
+ if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
} else if (Write == WriteToMemory) {
delete output_buffer_;
};
-// Flags used for the AllocateObjectInNewSpace functions.
+// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
rinfo->IsCallInstruction());
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
MarkCompactCollector::MarkObject(code);
- // When compacting we convert the call to a real object pointer.
- if (IsCompacting()) rinfo->set_call_object(code);
}
private:
reinterpret_cast<Code*>(target)->instruction_start());
}
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ VisitPointer(&target);
+ rinfo->set_call_address(
+ reinterpret_cast<Code*>(target)->instruction_start());
+ }
+
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
static Object*& Object_at(Address addr) {
return *reinterpret_cast<Object**>(addr);
}
+
+ static Handle<Object>& Object_Handle_at(Address addr) {
+ return *reinterpret_cast<Handle<Object>*>(addr);
+ }
};
} } // namespace v8::internal
var kVowelSounds = 0;
var kCapitalVowelSounds = 0;
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
function GetInstanceName(cons) {
if (cons.length == 0) {
// ----------------------------------------------------------------------------
// Error implementation
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
// Defines accessors for a property that is calculated the first time
// the property is read.
function DefineOneShotAccessor(obj, name, fun) {
}
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
+ var line;
try {
- var line = FormatSourcePosition(frame);
+ line = FormatSourcePosition(frame);
} catch (e) {
try {
- var line = "<error: " + e + ">";
+ line = "<error: " + e + ">";
} catch (ee) {
// Any code that reaches this point is seriously nasty!
- var line = "<error>";
+ line = "<error>";
}
}
lines.push(" at " + line);
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// jsminify this file, js2c: jsmin
-
// Touch the RegExp and Date functions to make sure that date-delay.js and
// regexp-delay.js has been loaded. This is required as the mirrors use
// functions within these files through the builtins object.
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "debug.h"
#include "execution.h"
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope;
- Handle<JSObject> self(JSObject::cast(receiver));
- Handle<JSObject> holder_handle(JSObject::cast(holder));
+ JSObject* self = JSObject::cast(receiver);
+ JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
- Handle<Object> fun_data(data->data());
- LOG(ApiNamedPropertyAccess("load", *self, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(self),
- v8::Utils::ToLocal(fun_data),
- v8::Utils::ToLocal(holder_handle));
+ LOG(ApiNamedPropertyAccess("load", self, name));
+ CustomArguments args(data->data(), self, holder_handle);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
Handle<Object> value_handle(value);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- Handle<Object> data_handle(interceptor->data());
LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
v8::Handle<v8::Value> result;
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
Handle<String> key(name);
- Handle<Object> fun_data(data->data());
LOG(ApiNamedPropertyAccess("store", this, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(self),
- v8::Utils::ToLocal(fun_data),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(data->data(), this, JSObject::cast(holder));
+ v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- Handle<Object> data_handle(interceptor->data());
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
if (!interceptor->deleter()->IsUndefined()) {
v8::NamedPropertyDeleter deleter =
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- Handle<Object> data_handle(interceptor->data());
LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
v8::IndexedPropertyDeleter deleter =
v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
Handle<JSObject> this_handle(this);
- Handle<Object> data_handle(interceptor->data());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
}
-FlatStringReader* FlatStringReader::top_ = NULL;
+Relocatable* Relocatable::top_ = NULL;
+
+
+void Relocatable::PostGarbageCollectionProcessing() {
+ Relocatable* current = top_;
+ while (current != NULL) {
+ current->PostGarbageCollection();
+ current = current->prev_;
+ }
+}
+
+
+// Reserve space for statics needing saving and restoring.
+int Relocatable::ArchiveSpacePerThread() {
+ return sizeof(top_);
+}
+
+
+// Archive statics that are thread local.
+char* Relocatable::ArchiveState(char* to) {
+ *reinterpret_cast<Relocatable**>(to) = top_;
+ top_ = NULL;
+ return to + ArchiveSpacePerThread();
+}
+
+
+// Restore statics that are thread local.
+char* Relocatable::RestoreState(char* from) {
+ top_ = *reinterpret_cast<Relocatable**>(from);
+ return from + ArchiveSpacePerThread();
+}
+
+
+char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
+ Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
+ Iterate(v, top);
+ return thread_storage + ArchiveSpacePerThread();
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v) {
+ Iterate(v, top_);
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
+ Relocatable* current = top;
+ while (current != NULL) {
+ current->IterateInstance(v);
+ current = current->prev_;
+ }
+}
FlatStringReader::FlatStringReader(Handle<String> str)
: str_(str.location()),
- length_(str->length()),
- prev_(top_) {
- top_ = this;
- RefreshState();
+ length_(str->length()) {
+ PostGarbageCollection();
}
FlatStringReader::FlatStringReader(Vector<const char> input)
- : str_(NULL),
+ : str_(0),
is_ascii_(true),
length_(input.length()),
- start_(input.start()),
- prev_(top_) {
- top_ = this;
-}
+ start_(input.start()) { }
-FlatStringReader::~FlatStringReader() {
- ASSERT_EQ(top_, this);
- top_ = prev_;
-}
-
-
-void FlatStringReader::RefreshState() {
+void FlatStringReader::PostGarbageCollection() {
if (str_ == NULL) return;
Handle<String> str(str_);
ASSERT(str->IsFlat());
}
-void FlatStringReader::PostGarbageCollectionProcessing() {
- FlatStringReader* current = top_;
- while (current != NULL) {
- current->RefreshState();
- current = current->prev_;
- }
-}
-
-
void StringInputBuffer::Seek(unsigned pos) {
Reset(pos, input_);
}
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::kApplyMask;
+ Assembler* origin = desc.origin; // Needed to find target_object on X64.
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+ Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
// pointers to the first instruction in the code object
- Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+ Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(code->instruction_start());
} else {
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
- Handle<Object> data_handle(interceptor->data());
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetter setter =
v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- Handle<Object> data_handle(interceptor->data());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
Handle<JSObject> holder_handle(this);
if (!interceptor->getter()->IsUndefined()) {
- Handle<Object> data_handle(interceptor->data());
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- Handle<Object> data_handle(interceptor->data());
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V) \
+#define INSTANCE_TYPE_LIST_ALL(V) \
V(SHORT_SYMBOL_TYPE) \
V(MEDIUM_SYMBOL_TYPE) \
V(LONG_SYMBOL_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE) \
V(SCRIPT_TYPE) \
\
V(JS_VALUE_TYPE) \
\
V(JS_FUNCTION_TYPE) \
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO_TYPE) \
+ V(BREAK_POINT_INFO_TYPE)
+#else
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)
+#endif
+
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_ALL(V) \
+ INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
+#endif
SCRIPT_TYPE,
JS_VALUE_TYPE,
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
+#ifdef DEBUG
+ // See objects-inl.h for more details
inline bool IsSeqString();
inline bool IsSlicedString();
inline bool IsExternalString();
- inline bool IsConsString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
+#endif // DEBUG
+ inline bool IsConsString();
inline bool IsNumber();
inline bool IsByteArray();
};
+// Utility superclass for stack-allocated objects that must be updated
+// on gc. It provides two ways for the gc to update instances, either
+// iterating or updating after gc.
+class Relocatable BASE_EMBEDDED {
+ public:
+ inline Relocatable() : prev_(top_) { top_ = this; }
+ virtual ~Relocatable() {
+ ASSERT_EQ(top_, this);
+ top_ = prev_;
+ }
+ virtual void IterateInstance(ObjectVisitor* v) { }
+ virtual void PostGarbageCollection() { }
+
+ static void PostGarbageCollectionProcessing();
+ static int ArchiveSpacePerThread();
+ static char* ArchiveState(char* to);
+ static char* RestoreState(char* from);
+ static void Iterate(ObjectVisitor* v);
+ static void Iterate(ObjectVisitor* v, Relocatable* top);
+ static char* Iterate(ObjectVisitor* v, char* t);
+ private:
+ static Relocatable* top_;
+ Relocatable* prev_;
+};
+
+
// A flat string reader provides random access to the contents of a
// string independent of the character width of the string. The handle
// must be valid as long as the reader is being used.
-class FlatStringReader BASE_EMBEDDED {
+class FlatStringReader : public Relocatable {
public:
explicit FlatStringReader(Handle<String> str);
explicit FlatStringReader(Vector<const char> input);
- ~FlatStringReader();
- void RefreshState();
+ void PostGarbageCollection();
inline uc32 Get(int index);
int length() { return length_; }
- static void PostGarbageCollectionProcessing();
private:
String** str_;
bool is_ascii_;
int length_;
const void* start_;
- FlatStringReader* prev_;
- static FlatStringReader* top_;
};
void JSArrayVerify();
#endif
+ // Number of element slots to pre-allocate for an empty array.
+ static const int kPreallocatedArrayElements = 4;
+
// Layout description.
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
return Call::sentinel();
}
- virtual Expression* NewCallEval(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return CallEval::sentinel();
- }
-
virtual Statement* EmptyStatement() {
return NULL;
}
return new Call(expression, arguments, pos);
}
- virtual Expression* NewCallEval(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return new CallEval(expression, arguments, pos);
- }
-
virtual Statement* EmptyStatement();
};
bool Parser::PreParseProgram(Handle<String> source,
unibrow::CharacterStream* stream) {
HistogramTimerScope timer(&Counters::pre_parse);
- StackGuard guard;
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
Statement* Parser::ParseFunctionDeclaration(bool* ok) {
- // Parse a function literal. We may or may not have a function name.
- // If we have a name we use it as the variable name for the function
- // (a function declaration) and not as the function name of a function
- // expression.
-
+ // FunctionDeclaration ::
+ // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
-
- Handle<String> name;
- if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
- FunctionLiteral* fun = ParseFunctionLiteral(name, function_token_position,
- DECLARATION, CHECK_OK);
-
- if (name.is_null()) {
- // We don't have a name - it is always an anonymous function
- // expression.
- return NEW(ExpressionStatement(fun));
- } else {
- // We have a name so even if we're not at the top-level of the
- // global or a function scope, we treat is as such and introduce
- // the function with it's initial value upon entering the
- // corresponding scope.
- Declare(name, Variable::VAR, fun, true, CHECK_OK);
- return factory()->EmptyStatement();
- }
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ FunctionLiteral* fun = ParseFunctionLiteral(name,
+ function_token_position,
+ DECLARATION,
+ CHECK_OK);
+ // Even if we're not at the top-level of the global or a function
+ // scope, we treat is as such and introduce the function with it's
+ // initial value upon entering the corresponding scope.
+ Declare(name, Variable::VAR, fun, true, CHECK_OK);
+ return factory()->EmptyStatement();
}
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
if (peek() == Token::IN) {
- // Report syntax error if the expression is an invalid
- // left-hand side expression.
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report
+ // the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- if (expression != NULL && expression->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
- expression = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_for_in",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+ expression = NewThrowReferenceError(type);
}
ForInStatement* loop = NEW(ForInStatement(labels));
Target target(this, loop);
return expression;
}
+ // Signal a reference error if the expression is an invalid left-hand
+ // side expression. We could report this as a syntax error here but
+ // for compatibility with JSC we choose to report the error at
+ // runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- if (expression != NULL && expression->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
- expression = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- //
- // NOTE: KJS sometimes delay the error reporting to runtime. If
- // we want to be completely compatible we should do the same.
- // For example: "(x++) = 42" gives a reference error at runtime
- // with KJS whereas we report a syntax error at compile time.
- ReportMessage("invalid_lhs_in_assignment", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+ expression = NewThrowReferenceError(type);
}
-
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
- Expression* x = ParseUnaryExpression(CHECK_OK);
+ Expression* expression = ParseUnaryExpression(CHECK_OK);
// Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
+ if (expression != NULL && expression->AsLiteral() &&
+ expression->AsLiteral()->handle()->IsNumber()) {
+ double value = expression->AsLiteral()->handle()->Number();
switch (op) {
case Token::ADD:
- return x;
+ return expression;
case Token::SUB:
- return NewNumberLiteral(-x_val);
+ return NewNumberLiteral(-value);
case Token::BIT_NOT:
- return NewNumberLiteral(~DoubleToInt32(x_val));
+ return NewNumberLiteral(~DoubleToInt32(value));
default: break;
}
}
- return NEW(UnaryOperation(op, x));
+ return NEW(UnaryOperation(op, expression));
} else if (Token::IsCountOp(op)) {
op = Next();
- Expression* x = ParseUnaryExpression(CHECK_OK);
- if (x == NULL || !x->IsValidLeftHandSide()) {
- if (x != NULL && x->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
- x = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_prefix_op", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Expression* expression = ParseUnaryExpression(CHECK_OK);
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report the
+ // error at runtime.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+ expression = NewThrowReferenceError(type);
}
- return NEW(CountOperation(true /* prefix */, op, x));
+ return NEW(CountOperation(true /* prefix */, op, expression));
} else {
return ParsePostfixExpression(ok);
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- Expression* result = ParseLeftHandSideExpression(CHECK_OK);
+ Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
- if (result == NULL || !result->IsValidLeftHandSide()) {
- if (result != NULL && result->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
- result = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_postfix_op",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report the
+ // error at runtime.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+ expression = NewThrowReferenceError(type);
}
Token::Value next = Next();
- result = NEW(CountOperation(false /* postfix */, next, result));
+ expression = NEW(CountOperation(false /* postfix */, next, expression));
}
- return result;
+ return expression;
}
// declared in the current scope chain. These calls are marked as
// potentially direct eval calls. Whether they are actually direct calls
// to eval is determined at run time.
-
- bool is_potentially_direct_eval = false;
if (!is_pre_parsing_) {
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
top_scope_->RecordEvalCall();
- is_potentially_direct_eval = true;
}
}
}
-
- if (is_potentially_direct_eval) {
- result = factory()->NewCallEval(result, args, pos);
- } else {
- result = factory()->NewCall(result, args, pos);
- }
+ result = factory()->NewCall(result, args, pos);
break;
}
bool multiline,
RegExpCompileData* result) {
ASSERT(result != NULL);
- // Make sure we have a stack guard.
- StackGuard guard;
RegExpParser parser(input, &result->error, multiline);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
-#if defined (__arm__) || defined(__thumb__)
- sample.pc = mcontext.mc_r15;
- sample.sp = mcontext.mc_r13;
- sample.fp = mcontext.mc_r11;
-#else
+#if V8_HOST_ARCH_IA32
sample.pc = mcontext.mc_eip;
sample.sp = mcontext.mc_esp;
sample.fp = mcontext.mc_ebp;
+#elif V8_HOST_ARCH_X64
+ sample.pc = mcontext.mc_rip;
+ sample.sp = mcontext.mc_rsp;
+ sample.fp = mcontext.mc_rbp;
+#elif V8_HOST_ARCH_ARM
+ sample.pc = mcontext.mc_r15;
+ sample.sp = mcontext.mc_r13;
+ sample.fp = mcontext.mc_r11;
#endif
active_sampler_->SampleStack(&sample);
}
#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
+#include <mach/vm_statistics.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
}
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase = mmap(NULL, msize, prot,
+ MAP_PRIVATE | MAP_ANON,
+ kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
}
-void PrettyPrinter::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void PrettyPrinter::VisitCallNew(CallNew* node) {
Print("new (");
Visit(node->expression());
}
-void AstPrinter::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void AstPrinter::VisitCallNew(CallNew* node) {
IndentedScope indent("CALL NEW");
Visit(node->expression());
}
+void RegExpStack::ThreadLocal::Free() {
+ if (thread_local_.memory_size_ > 0) {
+ DeleteArray(thread_local_.memory_);
+ thread_local_ = ThreadLocal();
+ }
+}
+
+
Address RegExpStack::EnsureCapacity(size_t size) {
if (size > kMaximumStackSize) return NULL;
if (size < kMinimumStackSize) size = kMinimumStackSize;
static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
static char* ArchiveStack(char* to);
static char* RestoreStack(char* from);
+ static void FreeThreadResources() { thread_local_.Free(); }
private:
// Artificial limit used when no memory has been allocated.
Address memory_;
size_t memory_size_;
Address limit_;
+ void Free();
};
// Resets the buffer if it has grown beyond the default/minimum size.
}
-void AstOptimizer::VisitCallEval(CallEval* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
void AstOptimizer::VisitCallNew(CallNew* node) {
Visit(node->expression());
OptimizeArguments(node->arguments());
}
-void Processor::VisitCallEval(CallEval* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
void Processor::VisitCallNew(CallNew* node) {
USE(node);
UNREACHABLE();
// Some fast paths through GetKeysInFixedArrayFor reuse a cached
// property array and since the result is mutable we have to create
// a fresh clone on each invocation.
- Handle<FixedArray> copy = Factory::NewFixedArray(contents->length());
- contents->CopyTo(0, *copy, 0, contents->length());
+ int length = contents->length();
+ Handle<FixedArray> copy = Factory::NewFixedArray(length);
+ for (int i = 0; i < length; i++) {
+ Object* entry = contents->get(i);
+ if (entry->IsString()) {
+ copy->set(i, entry);
+ } else {
+ ASSERT(entry->IsNumber());
+ HandleScope scope;
+ Handle<Object> entry_handle(entry);
+ Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+ copy->set(i, *entry_str);
+ }
+ }
return *Factory::NewJSArrayWithElements(copy);
}
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- Object* cached = Heap::GetNumberStringCache(number);
- if (cached != Heap::undefined_value()) {
- return cached;
- }
-
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Smi::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = HeapNumber::cast(number)->value();
- str = DoubleToCString(num, buffer);
- }
- Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
-
- if (!result->IsFailure()) {
- Heap::SetNumberStringCache(number, String::cast(result));
- }
- return result;
+ return Heap::NumberToString(number);
}
// the function being debugged.
// function(arguments,__source__) {return eval(__source__);}
static const char* source_str =
- "function(arguments,__source__){return eval(__source__);}";
+ "(function(arguments,__source__){return eval(__source__);})";
static const int source_str_length = strlen(source_str);
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
serializer_(serializer),
reference_encoder_(serializer->reference_encoder_),
offsets_(8),
- addresses_(8) {
+ addresses_(8),
+ offsets_32_bit_(0),
+ data_32_bit_(0) {
}
virtual void VisitPointers(Object** start, Object** end) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Address encoded_target = serializer_->GetSavedAddress(target);
- offsets_.Add(rinfo->target_address_address() - obj_address_);
- addresses_.Add(encoded_target);
+ // All calls and jumps are to code objects that encode into 32 bits.
+ offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
+ uint32_t small_target =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
+ ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
+ data_32_bit_.Add(small_target);
}
for (int i = 0; i < offsets_.length(); i++) {
memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
}
+ for (int i = 0; i < offsets_32_bit_.length(); i++) {
+ memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
+ sizeof(uint32_t));
+ }
}
private:
ExternalReferenceEncoder* reference_encoder_;
List<int> offsets_;
List<Address> addresses_;
+ // Some updates are 32-bit even on a 64-bit platform.
+ // We keep a separate list of them on 64-bit platforms.
+ List<int> offsets_32_bit_;
+ List<uint32_t> data_32_bit_;
};
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
- CHECK(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
// We need a counter function during serialization to resolve the
// references to counters in the code on the heap.
// No active threads.
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
- ASSERT(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
reference_decoder_ = new ExternalReferenceDecoder();
// By setting linear allocation only, we forbid the use of free list
// allocation which is not predicted by SimulatedAddress.
void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+ // On all platforms, the encoded code object address is only 32 bits.
+ Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
+ reinterpret_cast<Address>(rinfo->target_object_address())));
Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
rinfo->set_target_address(target_object->instruction_start());
}
obj->IterateBody(type, size, this);
if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- LOG(CodeMoveEvent(a, code->address()));
+ LOG(CodeMoveEvent(a, obj->address()));
}
objects_++;
return o;
// Encoded addresses of HeapObjects always have 'HeapObject' tags.
ASSERT(o->IsHeapObject());
-
switch (GetSpace(encoded)) {
// For Map space and Old space, we cache the known Pages in map_pages,
// old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
#endif
// -----------------------------------------------------------------------------
+// CodeRange
+
+List<CodeRange::FreeBlock> CodeRange::free_list_(0);
+List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
+int CodeRange::current_allocation_block_index_ = 0;
+VirtualMemory* CodeRange::code_range_ = NULL;
+
+
+bool CodeRange::Setup(const size_t requested) {
+ ASSERT(code_range_ == NULL);
+
+ code_range_ = new VirtualMemory(requested);
+ CHECK(code_range_ != NULL);
+ if (!code_range_->IsReserved()) {
+ delete code_range_;
+ code_range_ = NULL;
+ return false;
+ }
+
+ // We are sure that we have mapped a block of requested addresses.
+ ASSERT(code_range_->size() == requested);
+ LOG(NewEvent("CodeRange", code_range_->address(), requested));
+ allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+ current_allocation_block_index_ = 0;
+ return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right) {
+ // The entire point of CodeRange is that the difference between two
+ // addresses in the range can be represented as a signed 32-bit int,
+ // so the cast is semantically correct.
+ return static_cast<int>(left->start - right->start);
+}
+
+
+void CodeRange::GetNextAllocationBlock(size_t requested) {
+ for (current_allocation_block_index_++;
+ current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_++) {
+ if (requested <= allocation_list_[current_allocation_block_index_].size) {
+ return; // Found a large enough allocation block.
+ }
+ }
+
+ // Sort and merge the free blocks on the free list and the allocation list.
+ free_list_.AddAll(allocation_list_);
+ allocation_list_.Clear();
+ free_list_.Sort(&CompareFreeBlockAddress);
+ for (int i = 0; i < free_list_.length();) {
+ FreeBlock merged = free_list_[i];
+ i++;
+ // Add adjacent free blocks to the current merged block.
+ while (i < free_list_.length() &&
+ free_list_[i].start == merged.start + merged.size) {
+ merged.size += free_list_[i].size;
+ i++;
+ }
+ if (merged.size > 0) {
+ allocation_list_.Add(merged);
+ }
+ }
+ free_list_.Clear();
+
+ for (current_allocation_block_index_ = 0;
+ current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_++) {
+ if (requested <= allocation_list_[current_allocation_block_index_].size) {
+ return; // Found a large enough allocation block.
+ }
+ }
+
+ // Code range is full or too fragmented.
+ V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+}
+
+
+
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+ ASSERT(current_allocation_block_index_ < allocation_list_.length());
+ if (requested > allocation_list_[current_allocation_block_index_].size) {
+ // Find an allocation block large enough. This function call may
+ // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
+ GetNextAllocationBlock(requested);
+ }
+ // Commit the requested memory at the start of the current allocation block.
+ *allocated = RoundUp(requested, Page::kPageSize);
+ FreeBlock current = allocation_list_[current_allocation_block_index_];
+ if (*allocated >= current.size - Page::kPageSize) {
+ // Don't leave a small free block, useless for a large object or chunk.
+ *allocated = current.size;
+ }
+ ASSERT(*allocated <= current.size);
+ if (!code_range_->Commit(current.start, *allocated, true)) {
+ *allocated = 0;
+ return NULL;
+ }
+ allocation_list_[current_allocation_block_index_].start += *allocated;
+ allocation_list_[current_allocation_block_index_].size -= *allocated;
+ if (*allocated == current.size) {
+ GetNextAllocationBlock(0); // This block is used up, get the next one.
+ }
+ return current.start;
+}
+
+
+void CodeRange::FreeRawMemory(void* address, size_t length) {
+ free_list_.Add(FreeBlock(address, length));
+ code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+ delete code_range_; // Frees all memory in the virtual memory range.
+ code_range_ = NULL;
+ free_list_.Free();
+ allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
// MemoryAllocator
//
int MemoryAllocator::capacity_ = 0;
size_t* allocated,
Executability executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
-
- void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
+ void* mem;
+ if (executable == EXECUTABLE && CodeRange::exists()) {
+ mem = CodeRange::AllocateRawMemory(requested, allocated);
+ } else {
+ mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+ }
int alloced = *allocated;
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
- OS::Free(mem, length);
+ if (CodeRange::contains(static_cast<Address>(mem))) {
+ CodeRange::FreeRawMemory(mem, length);
+ } else {
+ OS::Free(mem, length);
+ }
Counters::memory_allocated.Decrement(length);
size_ -= length;
ASSERT(size_ >= 0);
// ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements. This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space. On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange : public AllStatic {
+ public:
+ // Reserves a range of virtual memory, but does not commit any of it.
+ // Can only be called once, at heap initialization time.
+ // Returns false on failure.
+ static bool Setup(const size_t requested_size);
+
+ // Frees the range of virtual memory, and frees the data structures used to
+ // manage it.
+ static void TearDown();
+
+ static bool exists() { return code_range_ != NULL; }
+ static bool contains(Address address) {
+ if (code_range_ == NULL) return false;
+ Address start = static_cast<Address>(code_range_->address());
+ return start <= address && address < start + code_range_->size();
+ }
+
+ // Allocates a chunk of memory from the large-object portion of
+ // the code range. On platforms with no separate code range, should
+ // not be called.
+ static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+ static void FreeRawMemory(void* buf, size_t length);
+
+ private:
+ // The reserved range of virtual memory that all code objects are put in.
+ static VirtualMemory* code_range_;
+ // Plain old data class, just a struct plus a constructor.
+ class FreeBlock {
+ public:
+ FreeBlock(Address start_arg, size_t size_arg)
+ : start(start_arg), size(size_arg) {}
+ FreeBlock(void* start_arg, size_t size_arg)
+ : start(static_cast<Address>(start_arg)), size(size_arg) {}
+
+ Address start;
+ size_t size;
+ };
+
+ // Freed blocks of memory are added to the free list. When the allocation
+ // list is exhausted, the free list is sorted and merged to make the new
+ // allocation list.
+ static List<FreeBlock> free_list_;
+ // Memory is allocated from the free blocks on the allocation list.
+ // The block at current_allocation_block_index_ is the current block.
+ static List<FreeBlock> allocation_list_;
+ static int current_allocation_block_index_;
+
+ // Finds a block on the allocation list that contains at least the
+ // requested amount of memory. If none is found, sorts and merges
+ // the existing free memory blocks, and searches again.
+ // If none can be found, terminates V8 with FatalProcessOutOfMemory.
+ static void GetNextAllocationBlock(size_t requested);
+ // Compares the start addresses of two free blocks.
+ static int CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right);
+};
+
+
+// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator manages chunks for the paged heap spaces (old space and map
// space). A paged chunk consists of pages. Pages in a chunk have contiguous
// function returns an invalid page pointer (NULL). The caller must check
// whether the returned page is valid (by calling Page::is_valid()). It is
// guaranteed that allocated pages have contiguous addresses. The actual
- // number of allocated page is returned in the output parameter
- // allocated_pages.
+ // number of allocated pages is returned in the output parameter
+ // allocated_pages. If the PagedSpace owner is executable and there is
+ // a code range, the pages are allocated from the code range.
static Page* AllocatePages(int requested_pages, int* allocated_pages,
PagedSpace* owner);
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
+ // If the flag is EXECUTABLE and a code range exists, the requested
+ // memory is allocated from the code range. If a code range exists
+ // and the freed memory is in it, the code range manages the freed memory.
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable);
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- var char_code = %_FastCharCodeAt(this, index);
+ var char_code = %_FastCharCodeAt(this, pos);
if (!%_IsSmi(char_code)) {
var subject = ToString(this);
var index = TO_INTEGER(pos);
}
+// This has the same size as the lastMatchInfo array, and can be used for
+// functions that expect that structure to be returned. It is used when the
+// needle is a string rather than a regexp. In this case we can't update
+// lastMatchArray without erroneously affecting the properties on the global
+// RegExp object.
+var reusableMatchInfo = [2, "", "", -1, -1];
+
+
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
var subject = ToString(this);
}
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned. It is used when the
-// needle is a string rather than a regexp. In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
// Helper function for regular expressions in String.prototype.replace.
function StringReplaceRegExp(subject, regexp, replace) {
replace = ToString(replace);
// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
// should be 'abcd' and not 'dddd' (or anything else).
function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(lastMatchInfo)) return subject;
+ var matchInfo = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(matchInfo)) return subject;
var result = new ReplaceResultBuilder(subject);
// There's at least one match. If the regexp is global, we have to loop
if (regexp.global) {
var previous = 0;
do {
- result.addSpecialSlice(previous, lastMatchInfo[CAPTURE0]);
- var startOfMatch = lastMatchInfo[CAPTURE0];
- previous = lastMatchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
- // Can't use lastMatchInfo any more from here, since the function could
+ result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
+ var startOfMatch = matchInfo[CAPTURE0];
+ previous = matchInfo[CAPTURE1];
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ // Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
// Increment previous if we matched an empty string, as per ECMA-262
// Per ECMA-262 15.10.6.2, if the previous index is greater than the
// string length, there is no match
- lastMatchInfo = (previous > subject.length)
+ matchInfo = (previous > subject.length)
? null
: DoRegExpExec(regexp, subject, previous);
- } while (!IS_NULL(lastMatchInfo));
+ } while (!IS_NULL(matchInfo));
// Tack on the final right substring after the last match, if necessary.
if (previous < subject.length) {
result.addSpecialSlice(previous, subject.length);
}
} else { // Not a global regexp, no need to loop.
- result.addSpecialSlice(0, lastMatchInfo[CAPTURE0]);
- var endOfMatch = lastMatchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
- // Can't use lastMatchInfo any more from here, since the function could
+ result.addSpecialSlice(0, matchInfo[CAPTURE0]);
+ var endOfMatch = matchInfo[CAPTURE1];
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ // Can't use matchInfo any more from here, since the function could
// overwrite it.
result.addSpecialSlice(endOfMatch, subject.length);
}
// Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
+function ApplyReplacementFunction(replace, matchInfo, subject) {
// Compute the parameter list consisting of the match, captures, index,
// and subject for the replace function invocation.
- var index = lastMatchInfo[CAPTURE0];
+ var index = matchInfo[CAPTURE0];
// The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+ var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
if (m == 1) {
- var s = CaptureString(subject, lastMatchInfo, 0);
+ var s = CaptureString(subject, matchInfo, 0);
// Don't call directly to avoid exposing the built-in global object.
return replace.call(null, s, index, subject);
}
var parameters = $Array(m + 2);
for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, lastMatchInfo, j);
+ parameters[j] = CaptureString(subject, matchInfo, j);
}
parameters[j] = index;
parameters[j + 1] = subject;
return result;
}
- var lastMatchInfo = splitMatch(separator, subject, currentIndex, startIndex);
+ var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
- if (IS_NULL(lastMatchInfo)) {
+ if (IS_NULL(matchInfo)) {
result[result.length] = subject.slice(currentIndex, length);
return result;
}
- var endIndex = lastMatchInfo[CAPTURE1];
+ var endIndex = matchInfo[CAPTURE1];
// We ignore a zero-length match at the currentIndex.
if (startIndex === endIndex && endIndex === currentIndex) {
continue;
}
- result[result.length] = SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
+ result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
if (result.length === limit) return result;
- for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
- var start = lastMatchInfo[CAPTURE(i)];
- var end = lastMatchInfo[CAPTURE(i + 1)];
+ for (var i = 2; i < NUMBER_OF_CAPTURES(matchInfo); i += 2) {
+ var start = matchInfo[CAPTURE(i)];
+ var end = matchInfo[CAPTURE(i + 1)];
if (start != -1 && end != -1) {
result[result.length] = SubString(subject, start, end);
} else {
// ECMA-262 section 15.5.4.14
-// Helper function used by split. This version returns the lastMatchInfo
+// Helper function used by split. This version returns the matchInfo
// instead of allocating a new array with basically the same information.
function splitMatch(separator, subject, current_index, start_index) {
if (IS_REGEXP(separator)) {
- var lastMatchInfo = DoRegExpExec(separator, subject, start_index);
- if (lastMatchInfo == null) return null;
+ var matchInfo = DoRegExpExec(separator, subject, start_index);
+ if (matchInfo == null) return null;
// Section 15.5.4.14 paragraph two says that we do not allow zero length
// matches at the end of the string.
- if (lastMatchInfo[CAPTURE0] === subject.length) return null;
- return lastMatchInfo;
+ if (matchInfo[CAPTURE0] === subject.length) return null;
+ return matchInfo;
}
var separatorIndex = subject.indexOf(separator, start_index);
Object* LoadCallbackProperty(Arguments args) {
- Handle<JSObject> recv = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
AccessorInfo* callback = AccessorInfo::cast(args[2]);
- Handle<Object> data = args.at<Object>(3);
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
- Handle<String> name = args.at<String>(4);
- // NOTE: If we can align the structure of an AccessorInfo with the
- // locations of the arguments to this function maybe we don't have
- // to explicitly create the structure but can just pass a pointer
- // into the stack.
- LOG(ApiNamedPropertyAccess("load", *recv, *name));
- v8::AccessorInfo info(v8::Utils::ToLocal(recv),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(holder));
+ v8::AccessorInfo info(args.arguments());
HandleScope scope;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- result = fun(v8::Utils::ToLocal(name), info);
+ result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
if (result.IsEmpty()) return Heap::undefined_value();
Object* StoreCallbackProperty(Arguments args) {
- Handle<JSObject> recv = args.at<JSObject>(0);
+ JSObject* recv = JSObject::cast(args[0]);
AccessorInfo* callback = AccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
HandleScope scope;
- Handle<Object> data(callback->data());
- LOG(ApiNamedPropertyAccess("store", *recv, *name));
- v8::AccessorInfo info(v8::Utils::ToLocal(recv),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(recv));
+ LOG(ApiNamedPropertyAccess("store", recv, *name));
+ CustomArguments custom_args(callback->data(), recv, recv);
+ v8::AccessorInfo info(custom_args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
* provide any value for the given name.
*/
Object* LoadPropertyWithInterceptorOnly(Arguments args) {
- Handle<JSObject> receiver_handle = args.at<JSObject>(0);
- Handle<JSObject> holder_handle = args.at<JSObject>(1);
+ JSObject* receiver_handle = JSObject::cast(args[0]);
+ JSObject* holder_handle = JSObject::cast(args[1]);
Handle<String> name_handle = args.at<String>(2);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
- Handle<Object> data_handle = args.at<Object>(4);
+ Object* data_handle = args[4];
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
{
// Use the interceptor getter.
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(data_handle, receiver_handle, holder_handle);
+ v8::AccessorInfo info(args.end());
HandleScope scope;
v8::Handle<v8::Value> r;
{
{
// Use the interceptor getter.
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
+ v8::AccessorInfo info(args.end());
HandleScope scope;
v8::Handle<v8::Value> r;
{
thread_local_.stack_is_cooked_ = false;
thread_local_.try_catch_handler_ = NULL;
thread_local_.context_ = NULL;
- thread_local_.thread_id_ = ThreadManager::kInvalidId;
+ int id = ThreadManager::CurrentId();
+ thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
thread_local_.external_caught_exception_ = false;
thread_local_.failed_access_check_callback_ = NULL;
clear_pending_exception();
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_ == NULL);
+ }
};
#define TOP_ADDRESS_LIST(C) \
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
static char* ArchiveThread(char* to);
static char* RestoreThread(char* from);
+ static void FreeThreadResources() { thread_local_.Free(); }
static const char* kStackOverflowMessage;
// Expect $String = global.String;
+// Lazily initialized.
+var hexCharArray = 0;
+var hexCharCodeArray = 0;
+
+
function URIAddEncodedOctetToBuffer(octet, result, index) {
result[index++] = 37; // Char code of '%'.
result[index++] = hexCharCodeArray[octet >> 4];
}
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
-
-
function HexValueOf(c) {
var code = c.charCodeAt(0);
public:
static bool Traverse(AstNode* node);
- void VisitBlock(Block* node);
- void VisitDeclaration(Declaration* node);
- void VisitExpressionStatement(ExpressionStatement* node);
- void VisitEmptyStatement(EmptyStatement* node);
- void VisitIfStatement(IfStatement* node);
- void VisitContinueStatement(ContinueStatement* node);
- void VisitBreakStatement(BreakStatement* node);
- void VisitReturnStatement(ReturnStatement* node);
- void VisitWithEnterStatement(WithEnterStatement* node);
- void VisitWithExitStatement(WithExitStatement* node);
- void VisitSwitchStatement(SwitchStatement* node);
- void VisitLoopStatement(LoopStatement* node);
- void VisitForInStatement(ForInStatement* node);
- void VisitTryCatch(TryCatch* node);
- void VisitTryFinally(TryFinally* node);
- void VisitDebuggerStatement(DebuggerStatement* node);
- void VisitFunctionLiteral(FunctionLiteral* node);
- void VisitFunctionBoilerplateLiteral(FunctionBoilerplateLiteral* node);
- void VisitConditional(Conditional* node);
- void VisitSlot(Slot* node);
- void VisitVariable(Variable* node);
- void VisitVariableProxy(VariableProxy* node);
- void VisitLiteral(Literal* node);
- void VisitRegExpLiteral(RegExpLiteral* node);
- void VisitObjectLiteral(ObjectLiteral* node);
- void VisitArrayLiteral(ArrayLiteral* node);
- void VisitCatchExtensionObject(CatchExtensionObject* node);
- void VisitAssignment(Assignment* node);
- void VisitThrow(Throw* node);
- void VisitProperty(Property* node);
- void VisitCall(Call* node);
- void VisitCallEval(CallEval* node);
- void VisitCallNew(CallNew* node);
- void VisitCallRuntime(CallRuntime* node);
- void VisitUnaryOperation(UnaryOperation* node);
- void VisitCountOperation(CountOperation* node);
- void VisitBinaryOperation(BinaryOperation* node);
- void VisitCompareOperation(CompareOperation* node);
- void VisitThisFunction(ThisFunction* node);
+ // AST node visit functions.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ void VisitVariable(Variable* var);
private:
int weight_;
}
-void UsageComputer::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void UsageComputer::VisitCallNew(CallNew* node) {
- VisitCall(node);
+ Read(node->expression());
+ ReadList(node->arguments());
}
FILE* f = OS::FOpen(filename, "wb");
if (f == NULL) {
if (verbose) {
- OS::PrintError("Cannot open file %s for reading.\n", filename);
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
}
return 0;
}
::assembler::arm::Simulator::Initialize();
#endif
+ { // NOLINT
+ // Ensure that the thread has a valid stack guard. The v8::Locker object
+ // will ensure this too, but we don't have to use lockers if we are only
+ // using one thread.
+ ExecutionAccess lock;
+ StackGuard::InitThread(lock);
+ }
+
// Setup the object heap
ASSERT(!Heap::HasBeenSetup());
if (!Heap::Setup(create_heap_objects)) {
}
-bool V8::IdleNotification(bool is_high_priority) {
- if (!FLAG_use_idle_notification) return false;
- // Ignore high priority instances of V8.
- if (is_high_priority) return false;
+bool V8::IdleNotification() {
+ // Returning true tells the caller that there is no need to call
+ // IdleNotification again.
+ if (!FLAG_use_idle_notification) return true;
// Tell the heap that it may want to adjust.
return Heap::IdleNotification();
static Smi* RandomPositiveSmi();
// Idle notification directly from the API.
- static bool IdleNotification(bool is_high_priority);
+ static bool IdleNotification();
private:
// True if engine is currently running
if (!internal::ThreadManager::IsLockedByCurrentThread()) {
internal::ThreadManager::Lock();
has_lock_ = true;
+ // Make sure that V8 is initialized. Archiving of threads interferes
+ // with deserialization by adding additional root pointers, so we must
+ // initialize here, before anyone can call ~Locker() or Unlocker().
+ if (!internal::V8::IsRunning()) {
+ V8::Initialize();
+ }
// This may be a locker within an unlocker in which case we have to
// get the saved state for this thread and restore it.
if (internal::ThreadManager::RestoreThread()) {
top_level_ = false;
+ } else {
+ internal::ExecutionAccess access;
+ internal::StackGuard::ClearThread(access);
+ internal::StackGuard::InitThread(access);
}
}
ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
Locker::~Locker() {
ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
if (has_lock_) {
- if (!top_level_) {
+ if (top_level_) {
+ internal::ThreadManager::FreeThreadResources();
+ } else {
internal::ThreadManager::ArchiveThread();
}
internal::ThreadManager::Unlock();
ThreadState* state =
reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
if (state == NULL) {
+ // This is a new thread.
+ StackGuard::InitThread(access);
return false;
}
char* from = state->data();
from = HandleScopeImplementer::RestoreThread(from);
from = Top::RestoreThread(from);
+ from = Relocatable::RestoreState(from);
#ifdef ENABLE_DEBUGGER_SUPPORT
from = Debug::RestoreDebug(from);
#endif
#endif
StackGuard::ArchiveSpacePerThread() +
RegExpStack::ArchiveSpacePerThread() +
- Bootstrapper::ArchiveSpacePerThread();
+ Bootstrapper::ArchiveSpacePerThread() +
+ Relocatable::ArchiveSpacePerThread();
}
// in ThreadManager::Iterate(ObjectVisitor*).
to = HandleScopeImplementer::ArchiveThread(to);
to = Top::ArchiveThread(to);
+ to = Relocatable::ArchiveState(to);
#ifdef ENABLE_DEBUGGER_SUPPORT
to = Debug::ArchiveDebug(to);
#endif
}
+void ThreadManager::FreeThreadResources() {
+ HandleScopeImplementer::FreeThreadResources();
+ Top::FreeThreadResources();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug::FreeThreadResources();
+#endif
+ StackGuard::FreeThreadResources();
+ RegExpStack::FreeThreadResources();
+ Bootstrapper::FreeThreadResources();
+}
+
+
bool ThreadManager::IsArchived() {
return Thread::HasThreadLocal(thread_state_key);
}
char* data = state->data();
data = HandleScopeImplementer::Iterate(v, data);
data = Top::Iterate(v, data);
+ data = Relocatable::Iterate(v, data);
}
}
static void ArchiveThread();
static bool RestoreThread();
+ static void FreeThreadResources();
static bool IsArchived();
static void Iterate(ObjectVisitor* v);
UseCount* var_uses() { return &var_uses_; }
UseCount* obj_uses() { return &obj_uses_; }
- bool IsVariable(Handle<String> n) {
+ bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
+ // True if the variable is named eval and not known to be shadowed.
+ bool is_possibly_eval() const {
+ return IsVariable(Factory::eval_symbol()) &&
+ (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+ }
+
Variable* local_if_not_shadowed() const {
ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
return local_if_not_shadowed_;
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 13
+#define BUILD_NUMBER 14
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
}
+void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ RecordRelocInfo(rmode);
+ int current = code_targets_.length();
+ if (current > 0 && code_targets_.last().is_identical_to(target)) {
+ // Optimization if we keep jumping to the same code target.
+ emitl(current - 1);
+ } else {
+ code_targets_.Add(target);
+ emitl(current);
+ }
+}
+
+
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
}
Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(pc);
+ return Memory::int32_at(pc) + pc + 4;
}
void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(pc) = target;
- CPU::FlushICache(pc, sizeof(intptr_t));
+ Memory::int32_at(pc) = target - pc - 4;
+ CPU::FlushICache(pc, sizeof(int32_t));
}
+Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
+ return code_targets_[Memory::int32_at(pc)];
+}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
- *p += delta; // relocate entry
+ Memory::Address_at(pc_) += delta;
+ } else if (IsCodeTarget(rmode_)) {
+ Memory::int32_at(pc_) -= delta;
+ } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ Memory::int32_at(pc_ + 1) -= delta; // relocate entry
}
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
+ if (IsCodeTarget(rmode_)) {
+ return Assembler::target_address_at(pc_);
+ } else {
+ return Memory::Address_at(pc_);
+ }
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
+ if (IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, target);
+ } else {
+ Memory::Address_at(pc_) = target;
+ }
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return *reinterpret_cast<Object**>(pc_);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ if (rmode_ == EMBEDDED_OBJECT) {
+ return Memory::Object_Handle_at(pc_);
+ } else {
+ return origin->code_target_object_handle_at(pc_);
+ }
}
Address RelocInfo::call_address() {
ASSERT(IsCallInstruction());
- return Assembler::target_address_at(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset);
+ return Memory::Address_at(
+ pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsCallInstruction());
- Assembler::set_target_address_at(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset,
- target);
+ Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
+ target;
}
byte* Assembler::spare_buffer_ = NULL;
-Assembler::Assembler(void* buffer, int buffer_size) {
+Assembler::Assembler(void* buffer, int buffer_size)
+ : code_targets_(100) {
if (buffer == NULL) {
// do our own buffer management
if (buffer_size <= kMinimalBufferSize) {
}
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp
+ emit(0xE8);
+ emit_code_target(target, rmode);
+}
+
+
void Assembler::call(Register adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
+void Assembler::j(Condition cc,
+ Handle<Code> target,
+ RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint4(cc));
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emit_code_target(target, rmode);
+}
+
+
void Assembler::jmp(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
+void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emit_code_target(target, rmode);
+}
+
+
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
-
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::JS_RETURN;
} } // namespace v8::internal
// Assembler functions are invoked in between GetCode() calls.
void GetCode(CodeDesc* desc);
- // Read/Modify the code target in the branch/call instruction at pc.
- // On the x64 architecture, the address is absolute, not relative.
+ // Read/Modify the code target in the relative branch/call instruction at pc.
+ // On the x64 architecture, we use relative jumps with a 32-bit displacement
+ // to jump to other Code objects in the Code space in the heap.
+ // Jumps to C functions are done indirectly through a 64-bit register holding
+ // the absolute address of the target.
+ // These functions convert between absolute Addresses of Code objects and
+ // the relative displacements stored in the code.
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
-
+ inline Handle<Object> code_target_object_handle_at(Address pc);
// Distance between the address of the code target in the call instruction
- // and the return address. Checked in the debug build.
- static const int kCallTargetAddressOffset = 3 + kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to (movq = REX.W 0xB8+r.).
- static const int kPatchReturnSequenceAddressOffset = 2;
-
+ // and the return address pushed on the stack.
+ static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
+ // Distance between the start of the JS return sequence and where the
+ // 32-bit displacement of a near call would be, relative to the pushed
+ // return address. TODO: Use return sequence length instead.
+ // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
+ static const int kPatchReturnSequenceAddressOffset = 13 - 4;
+ // TODO(X64): Rename this, removing the "Real", after changing the above.
+ static const int kRealPatchReturnSequenceAddressOffset = 2;
// ---------------------------------------------------------------------------
// Code generation
//
immediate_arithmetic_op(0x4, dst, src);
}
+ void andl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+
void decq(Register dst);
void decq(const Operand& dst);
void decl(Register dst);
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
+ void call(Handle<Code> target, RelocInfo::Mode rmode);
// Call near absolute indirect, address in register
void call(Register adr);
// Jumps
// Jump short or near relative.
+ // Use a 32-bit signed displacement.
void jmp(Label* L); // unconditional jump to L
+ void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
void jmp(Register adr);
// Conditional jumps
void j(Condition cc, Label* L);
+ void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Floating-point operations
void fld(int i);
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- // Writes a doubleword of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- // void dd(uint32_t data);
-
- // Writes a quadword of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- // void dd(uint64_t data, RelocInfo::Mode reloc_info);
-
int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
- inline void emit(Handle<Object> handle);
inline void emitq(uint64_t x, RelocInfo::Mode rmode);
inline void emitw(uint16_t x);
+ inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
+ List< Handle<Code> > code_targets_;
// push-pop elimination
byte* last_pc_;
__ movq(Operand(kScratchRegister, 0), rdi);
// The actual argument count has already been loaded into register
- // rax, but JumpToBuiltin expects rax to contain the number of
+ // rax, but JumpToRuntime expects rax to contain the number of
// arguments including the receiver.
__ incq(rax);
- __ JumpToBuiltin(ExternalReference(id), 1);
+ __ JumpToRuntime(ExternalReference(id), 1);
}
}
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+ __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ movq(result,
+ Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity >= 0);
+
+ // Load the initial map from the array function.
+ __ movq(scratch1, FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+ Factory::empty_fixed_array());
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+ // If no storage is requested for the elements array just set the empty
+ // fixed array.
+ if (initial_capacity == 0) {
+ __ Move(FieldOperand(result, JSArray::kElementsOffset),
+ Factory::empty_fixed_array());
+ return;
+ }
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array
+ // scratch2: start of next object
+ __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ movq(FieldOperand(scratch1, Array::kLengthOffset),
+ Immediate(initial_capacity));
+
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+ static const int kLoopUnfoldLimit = 4;
+ ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ __ Move(scratch3, Factory::the_hole_value());
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ // Use a scratch register here to have only one reloc info when unfolding
+ // the loop.
+ for (int i = 0; i < initial_capacity; i++) {
+ __ movq(FieldOperand(scratch1,
+ FixedArray::kHeaderSize + i * kPointerSize),
+ scratch3);
+ }
+ } else {
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(scratch1, 0), scratch3);
+ __ addq(scratch1, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(scratch1, scratch2);
+ __ j(below, &loop);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array,
+ Register elements_array_end,
+ Register scratch,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ movq(elements_array,
+ FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ testq(array_size, array_size);
+ __ j(not_zero, ¬_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested elements.
+ __ bind(¬_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array: initial map
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+ __ Move(elements_array, Factory::empty_fixed_array());
+ __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ lea(elements_array, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+ // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ SmiToInteger64(array_size, array_size);
+ __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ Label not_empty_2, fill_array;
+ __ testq(array_size, array_size);
+ __ j(not_zero, ¬_empty_2);
+ // Length of the FixedArray is the number of pre-allocated elements even
+ // though the actual JSArray has length 0.
+ __ movq(FieldOperand(elements_array, Array::kLengthOffset),
+ Immediate(kPreallocatedArrayElements));
+ __ jmp(&fill_array);
+ __ bind(¬_empty_2);
+ // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+ // same.
+ __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ __ bind(&fill_array);
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ Move(scratch, Factory::the_hole_value());
+ __ lea(elements_array, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(elements_array, 0), scratch);
+ __ addq(elements_array, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(elements_array, elements_array_end);
+ __ j(below, &loop);
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// rdi: constructor (built-in Array function)
+// rax: argc
+// rsp[0]: return address
+// rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments.
+ __ testq(rax, rax);
+ __ j(not_zero, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ rdi,
+ rbx,
+ rcx,
+ rdx,
+ r8,
+ kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(kPointerSize);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmpq(rax, Immediate(1));
+ __ j(not_equal, &argc_two_or_more);
+ __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+ Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
+ __ j(not_positive_smi, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is to large to actually allocate an elements array.
+ __ JumpIfSmiGreaterEqualsConstant(rdx,
+ JSObject::kInitialMaxFastElementArray,
+ call_generic_code);
+
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0]: return address
+ // esp[8]: argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(2 * kPointerSize);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ movq(rdx, rax);
+ __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0] : return address
+ // esp[8] : last argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+
+ // rax: argc
+ // rbx: JSArray
+ // rcx: elements_array
+ // r8: elements_array_end (untagged)
+ // esp[0]: return address
+ // esp[8]: last argument
+
+ // Location of the last argument
+ __ lea(r9, Operand(rsp, kPointerSize));
+
+ // Location of the first array element (Parameter fill_with_holes to
+ // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+ __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // rax: argc
+ // rbx: JSArray
+ // rdx: location of the first array element
+ // r9: location of the last argument
+ // esp[0]: return address
+ // esp[8]: last argument
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(Operand(rdx, 0), kScratchRegister);
+ __ addq(rdx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Remove caller arguments from the stack and return.
+ // rax: argc
+ // rbx: JSArray
+ // esp[0]: return address
+ // esp[8]: last argument
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ push(rcx);
+ __ movq(rax, rbx);
+ __ ret(0);
+}
+
+
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // Just jump to the generic array code.
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, rdi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = __ CheckNotSmi(rbx);
+ __ Assert(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // Just jump to the generic construct code.
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // does always have a map.
+ GenerateLoadArrayFunction(masm, rbx);
+ __ cmpq(rdi, rbx);
+ __ Assert(equal, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = __ CheckNotSmi(rbx);
+ __ Assert(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// rdi: constructor
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
+ ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
// rdi: constructor
// rax: initial map (if proven valid below)
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
// rdi: size of new object
- __ AllocateObjectInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
// rbx: JSObject
// rdi: start of next object (will be start of FixedArray)
// rdx: number of elements in properties array
- __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// rbx: JSObject
+++ /dev/null
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-x64.h"
-#include "debug.h"
-#include "macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Label deferred_enter, deferred_exit;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- __ push(rdi);
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(kScratchRegister);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ movq(kScratchRegister, stack_limit);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ j(below, &deferred_enter);
- __ bind(&deferred_exit);
- }
- }
- successor_->Compile(masm);
- if (FLAG_check_stack) {
- Comment cmnt(masm, "[ Deferred Stack Check");
- __ bind(&deferred_enter);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ jmp(&deferred_exit);
- }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
- __ movq(rsp, rbp);
- __ pop(rbp);
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ret((count + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // "movq rsp, rbp; pop rbp" has length 4. "ret k" has length 3.
- const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3;
- for (int i = 0; i < kPadding; ++i) {
- __ int3();
- }
-#endif
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
- // A test rax instruction after the call indicates to the IC code that it
- // was inlined. Ensure there is not one after the call below.
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(rbx); // Discard key.
- } else {
- key()->Get(masm, rcx);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- }
- __ pop(rbx); // Discard receiver.
- location()->Set(masm, rax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Push both operands and call the specialized stub.
- if (!left()->is_on_stack()) left()->Push(masm);
- right()->Push(masm);
- GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
- __ CallStub(&stub);
- location()->Set(masm, rax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, rax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ Move(reg, handle_);
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ Push(handle_);
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(rbp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return Operand(rax, 0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ Move(ToOperand(loc), handle_);
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ movq(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ movq(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // We dispatch to the value because in some cases (temp or constant) we
- // can use special instruction sequences.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ movq(kScratchRegister, ToOperand(this));
- __ movq(ToOperand(loc), kScratchRegister);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(rax)) __ movq(reg, rax);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(rax)) __ movq(rax, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(rax);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, rax);
- break;
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ movq(ToOperand(loc), rax);
- break;
- case STACK:
- __ pop(ToOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatementPosition(node);
-
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
frame_->Push(&result);
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
-
// Check if the function is a variable or a property.
Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ movq(scratch.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ movq(result.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
frame_->RestoreContextRegister();
// Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
+
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
// Call the function.
CallWithArguments(args, node->position());
+
} else if (property != NULL) {
// Check if the key is a literal string.
Literal* literal = property->key()->AsLiteral();
// Call the function.
CallWithArguments(args, node->position());
}
+
} else {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is not global
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
-
- // Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ movq(scratch.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ movq(result.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
- frame_->SetElementAt(arg_count, &result);
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
Register scratch,
Register result) {
// Allocate heap number in new space.
- __ AllocateObjectInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch,
+ no_reg,
+ need_gc,
+ TAG_OBJECT);
// Set the map and tag the result.
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, fast, check_string, index_int, index_string;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
__ bind(&index_int);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &slow);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(below, &fast); // Unsigned comparison rejects negative indices.
+ __ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
+ // Fast case: Do the load.
+ __ movq(rax, Operand(rcx, rax, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
+
+ // Check whether the elements is a pixel array.
+ // rax: untagged index
+ // rcx: elements array
+ __ bind(&check_pixel_array);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kPixelArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
+
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
__ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
__ shrl(rax, Immediate(String::kLongLengthShift));
__ jmp(&index_int);
- // Fast case: Do the load.
- __ bind(&fast);
- __ movq(rax, Operand(rcx, rax, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, &slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
- __ ret(0);
}
// -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, fast, array, extra, check_pixel_array;
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
// rbx: index (as a smi), zero-extended.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &slow);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ SmiToInteger32(rdx, rbx);
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
// rbx: index (as a smi)
__ j(below, &fast);
-
// Slow case: Push extra copies of the arguments (3).
__ bind(&slow);
__ pop(rcx);
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ // Check whether the elements is a pixel array.
+ // rax: value
+ // rcx: elements array
+ // rbx: index (as a smi), zero-extended.
+ __ bind(&check_pixel_array);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kPixelArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ // Check that the value is a smi. If a conversion is needed call into the
+ // runtime to convert and clamp.
+ __ JumpIfNotSmi(rax, &slow);
+ __ SmiToInteger32(rbx, rbx);
+ __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ movq(rdx, rax); // Save the value.
+ __ SmiToInteger32(rax, rax);
+ { // Clamp the value to [0..255].
+ Label done, is_negative;
+ __ testl(rax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ j(negative, &is_negative);
+ __ movl(rax, Immediate(255));
+ __ jmp(&done);
+ __ bind(&is_negative);
+ __ xorl(rax, rax); // Clear rax.
+ __ bind(&done);
+ }
+ __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movb(Operand(rcx, rbx, times_1, 0), rax);
+ __ movq(rax, rdx); // Return the original value.
+ __ ret(0);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
__ SmiSubConstant(rbx, rbx, 1, NULL);
__ jmp(&fast);
-
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
__ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
__ j(above_equal, &extra);
-
// Fast case: Do the store.
__ bind(&fast);
// rax: value
}
+void MacroAssembler::CompareRoot(Operand with,
+ Heap::RootListIndex index) {
+ LoadRoot(kScratchRegister, index);
+ cmpq(with, kScratchRegister);
+}
+
+
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
Register addr,
// should remove this need and make the runtime routine entry code
// smarter.
movq(rax, Immediate(num_arguments));
- JumpToBuiltin(ext, result_size);
+ JumpToRuntime(ext, result_size);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext,
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
movq(rbx, ext);
CEntryStub ces(result_size);
- movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
- jmp(kScratchRegister);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
unresolved_.Add(entry);
}
+void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
+ int constant,
+ Label* on_greater_equals) {
+ if (Smi::IsValid(constant)) {
+ Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
+ j(are_greater_equal, on_greater_equals);
+ } else if (constant < Smi::kMinValue) {
+ jmp(on_greater_equals);
+ }
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
Condition is_valid = CheckInteger32ValidSmiValue(src);
j(ReverseCondition(is_valid), on_invalid);
}
+Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
+ int constant) {
+ if (constant == 0) {
+ testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
+ return positive;
+ }
+ if (Smi::IsValid(constant)) {
+ cmpl(src, Immediate(Smi::FromInt(constant)));
+ return greater_equal;
+ }
+ // Can't be equal.
+ UNREACHABLE();
+ return no_condition;
+}
+
+
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
// A 32-bit integer value can be converted to a smi if it is in the
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
- Label target;
- bind(&target);
-#endif
- jmp(kScratchRegister);
-#ifdef DEBUG
- ASSERT_EQ(kCallTargetAddressOffset,
- SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+ // TODO(X64): Inline this
+ jmp(code_object, rmode);
}
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
WriteRecordedPositions();
- movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
- // Patch target is kPointer size bytes *before* target label.
- Label target;
- bind(&target);
-#endif
- call(kScratchRegister);
-#ifdef DEBUG
- ASSERT_EQ(kCallTargetAddressOffset,
- SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+ call(code_object, rmode);
}
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry =
{ pc_offset() - kCallTargetAddressOffset, flags, name };
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
}
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
// Load address of new object into result.
LoadAllocationTopHelper(result, result_end, scratch, flags);
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// Check whether a tagged smi is equal to a constant.
Condition CheckSmiEqualsConstant(Register src, int constant);
+ // Check whether a tagged smi is greater than or equal to a constant.
+ Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
+
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
// to the constant.
void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
+ // Jump to label if the value is a tagged smi with value greater than or equal
+ // to the constant.
+ void JumpIfSmiGreaterEqualsConstant(Register src,
+ int constant,
+ Label* on_equals);
+
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the content of result is known to be
// the allocation top on entry (could be result_end from a previous call to
- // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& ext, int result_size);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& ext, int result_size);
// ---------------------------------------------------------------------------
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
+#include "allocation.h"
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on x64 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
// Load the initial map and verify that it is in fact a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
__ JumpIfSmi(rbx, &generic_stub_call);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ j(not_equal, &generic_stub_call);
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateObjectInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rcx,
+ rdx,
+ rcx,
+ no_reg,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
NewSpace* new_space = Heap::new_space();
static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
while (new_space->Available() > kNewSpaceFillerSize) {
+ int available_before = new_space->Available();
CHECK(!Heap::AllocateByteArray(0)->IsFailure());
+ if (available_before == new_space->Available()) {
+ // It seems that we are avoiding new space allocations when
+ // allocation is forced, so no need to fill up new space
+ // in order to make the test harder.
+ break;
+ }
}
CHECK(!Heap::AllocateByteArray(100)->IsFailure());
CHECK(!Heap::AllocateFixedArray(100, NOT_TENURED)->IsFailure());
CHECK_EQ(42, result->Int32Value());
env->Exit();
}
+
+
+// CodeRange test.
+// Tests memory management in a CodeRange by allocating and freeing blocks,
+// using a pseudorandom generator to choose block sizes geometrically
+// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
+// Ensure that the freed chunks are collected and reused by allocating (in
+// total) more than the size of the CodeRange.
+
+// This pseudorandom generator does not need to be particularly good.
+// Use the lower half of the V8::Random() generator.
+unsigned int Pseudorandom() {
+ static uint32_t lo = 2345;
+ lo = 18273 * (lo & 0xFFFF) + (lo >> 16); // Provably not 0.
+ return lo & 0xFFFF;
+}
+
+
+// Plain old data class. Represents a block of allocated memory.
+class Block {
+ public:
+ Block(void* base_arg, int size_arg)
+ : base(base_arg), size(size_arg) {}
+
+ void *base;
+ int size;
+};
+
+
+TEST(CodeRange) {
+ const int code_range_size = 16*MB;
+ CodeRange::Setup(code_range_size);
+ int current_allocated = 0;
+ int total_allocated = 0;
+ List<Block> blocks(1000);
+
+ while (total_allocated < 5 * code_range_size) {
+ if (current_allocated < code_range_size / 10) {
+ // Allocate a block.
+ // Geometrically distributed sizes, greater than Page::kPageSize.
+ size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
+ Pseudorandom() % 5000 + 1;
+ size_t allocated = 0;
+ void* base = CodeRange::AllocateRawMemory(requested, &allocated);
+ blocks.Add(Block(base, allocated));
+ current_allocated += allocated;
+ total_allocated += allocated;
+ } else {
+ // Free a block.
+ int index = Pseudorandom() % blocks.length();
+ CodeRange::FreeRawMemory(blocks[index].base, blocks[index].size);
+ current_allocated -= blocks[index].size;
+ if (index < blocks.length() - 1) {
+ blocks[index] = blocks.RemoveLast();
+ } else {
+ blocks.RemoveLast();
+ }
+ }
+ }
+
+ CodeRange::TearDown();
+}
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
#include "api.h"
#include "compilation-cache.h"
+#include "execution.h"
#include "snapshot.h"
#include "platform.h"
#include "top.h"
CHECK_EQ(1503, result->Int32Value());
result = CompileRun("pixels[1]");
CHECK_EQ(1, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = -i;"
+ "}"
+ "sum;");
+ CHECK_EQ(-28, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 0;"
+ "}"
+ "sum;");
+ CHECK_EQ(0, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 255;"
+ "}"
+ "sum;");
+ CHECK_EQ(8 * 255, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 256 + i;"
+ "}"
+ "sum;");
+ CHECK_EQ(2076, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = i;"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value());
+
result = CompileRun("var sum = 0;"
"for (var i = 0; i < 8; i++) {"
" sum += pixels[i];"
CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+ result = CompileRun("pixels[1] = 23;");
+ CHECK_EQ(23, result->Int32Value());
+
free(pixel_data);
}
}
-// Test that idle notification can be handled when V8 has not yet been
-// set up.
+// Test that idle notification can be handled and eventually returns true.
THREADED_TEST(IdleNotification) {
- for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
- for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
+ bool rv = false;
+ for (int i = 0; i < 100; i++) {
+ rv = v8::V8::IdleNotification();
+ if (rv)
+ break;
+ }
+ CHECK(rv == true);
+}
+
+
+static uint32_t* stack_limit;
+
+static v8::Handle<Value> GetStackLimitCallback(const v8::Arguments& args) {
+ stack_limit = reinterpret_cast<uint32_t*>(i::StackGuard::climit());
+ return v8::Undefined();
+}
+
+
+// Uses the address of a local variable to determine the stack top now.
+// Given a size, returns an address that is that far from the current
+// top of stack.
+static uint32_t* ComputeStackLimit(uint32_t size) {
+ uint32_t* answer = &size - (size / sizeof(size));
+ // If the size is very large and the stack is very near the bottom of
+ // memory then the calculation above may wrap around and give an address
+ // that is above the (downwards-growing) stack. In that case we return
+ // a very low address.
+ if (answer > &size) return reinterpret_cast<uint32_t*>(sizeof(size));
+ return answer;
+}
+
+
+TEST(SetResourceConstraints) {
+ static const int K = 1024;
+ uint32_t* set_limit = ComputeStackLimit(128 * K);
+
+ // Set stack limit.
+ v8::ResourceConstraints constraints;
+ constraints.set_stack_limit(set_limit);
+ CHECK(v8::SetResourceConstraints(&constraints));
+
+ // Execute a script.
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(GetStackLimitCallback);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("get_stack_limit"), fun);
+ CompileRun("get_stack_limit();");
+
+ CHECK(stack_limit == set_limit);
+}
+
+
+TEST(SetResourceConstraintsInThread) {
+ uint32_t* set_limit;
+ {
+ v8::Locker locker;
+ static const int K = 1024;
+ set_limit = ComputeStackLimit(128 * K);
+
+ // Set stack limit.
+ v8::ResourceConstraints constraints;
+ constraints.set_stack_limit(set_limit);
+ CHECK(v8::SetResourceConstraints(&constraints));
+
+ // Execute a script.
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(GetStackLimitCallback);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("get_stack_limit"), fun);
+ CompileRun("get_stack_limit();");
+
+ CHECK(stack_limit == set_limit);
+ }
+ {
+ v8::Locker locker;
+ CHECK(stack_limit == set_limit);
+ }
}
// passed it throws an exception.
static const char* debugger_call_with_closure_source =
"var x = 3;"
- "function (exec_state) {"
+ "(function (exec_state) {"
" if (exec_state.y) return x - 1;"
" exec_state.y = x;"
" return exec_state.y"
- "}";
+ "})";
v8::Handle<v8::Function> debugger_call_with_closure;
// Function to retrieve the number of JavaScript frames by calling a JavaScript
// with the client connected.
ok = i::Debugger::StartAgent("test", kPort2);
CHECK(ok);
+ i::Debugger::WaitForAgent();
i::Socket* client = i::OS::CreateSocket();
ok = client->Connect("localhost", port2_str);
CHECK(ok);
}
-static JSObjectsCluster AddHeapObjectToTree(
- JSObjectsRetainerTree* tree,
- i::String* constructor,
- int instance,
- JSObjectsCluster* ref1 = NULL,
- JSObjectsCluster* ref2 = NULL,
- JSObjectsCluster* ref3 = NULL) {
+static JSObjectsCluster AddHeapObjectToTree(JSObjectsRetainerTree* tree,
+ i::String* constructor,
+ int instance,
+ JSObjectsCluster* ref1 = NULL,
+ JSObjectsCluster* ref2 = NULL,
+ JSObjectsCluster* ref3 = NULL) {
JSObjectsCluster o(constructor, reinterpret_cast<i::Object*>(instance));
JSObjectsClusterTree* o_tree = new JSObjectsClusterTree();
JSObjectsClusterTree::Locator o_loc;
}
+static void AddSelfReferenceToTree(JSObjectsRetainerTree* tree,
+ JSObjectsCluster* self_ref) {
+ JSObjectsRetainerTree::Locator loc;
+ CHECK(tree->Find(*self_ref, &loc));
+ JSObjectsClusterTree::Locator o_loc;
+ CHECK_NE(NULL, loc.value());
+ loc.value()->Insert(*self_ref, &o_loc);
+}
+
+
static inline void CheckEqualsHelper(const char* file, int line,
const char* expected_source,
const JSObjectsCluster& expected,
if (JSObjectsCluster::Compare(expected, value) == 0) {
i::HeapStringAllocator allocator;
i::StringStream stream(&allocator);
- stream.Add("# Expected: ");
+ stream.Add("# !Expected: ");
expected.DebugPrint(&stream);
stream.Add("\n# Found: ");
value.DebugPrint(&stream);
coarser.Process(&tree);
CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o11));
CHECK_EQ(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o12));
CHECK_EQ(coarser.GetCoarseEquivalent(o21), coarser.GetCoarseEquivalent(o22));
CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o21));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(p));
}
+TEST(ClustersCoarserSelf) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
+
+ JSObjectsRetainerTree tree;
+
+ // On the following graph:
+ //
+ // p (self-referencing)
+ // <- o1 <-
+ // q (self-referencing) o
+ // <- o2 <-
+ // r (self-referencing)
+ //
+ // we expect that coarser will deduce equivalences: p ~ q ~ r, o1 ~ o2;
+
+ JSObjectsCluster o =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100);
+ JSObjectsCluster o1 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x110, &o);
+ JSObjectsCluster o2 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x120, &o);
+ JSObjectsCluster p =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &o1);
+ AddSelfReferenceToTree(&tree, &p);
+ JSObjectsCluster q =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x310, &o1, &o2);
+ AddSelfReferenceToTree(&tree, &q);
+ JSObjectsCluster r =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x320, &o2);
+ AddSelfReferenceToTree(&tree, &r);
+
+ ClustersCoarser coarser;
+ coarser.Process(&tree);
+
+ CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o1));
+ CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
+ CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
+ CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
+ CHECK_NE(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(p));
+}
+
+
namespace {
class RetainerProfilePrinter : public RetainerHeapProfile::Printer {
}
RetainerProfilePrinter printer;
ret_profile.DebugPrintStats(&printer);
- CHECK_EQ("(global property);1,B;2,C;2", printer.GetRetainers("A"));
+ const char* retainers_of_a = printer.GetRetainers("A");
+ // The order of retainers is unspecified, so we check string length, and
+ // verify each retainer separately.
+ CHECK_EQ(static_cast<int>(strlen("(global property);1,B;2,C;2")),
+ static_cast<int>(strlen(retainers_of_a)));
+ CHECK(strstr(retainers_of_a, "(global property);1") != NULL);
+ CHECK(strstr(retainers_of_a, "B;2") != NULL);
+ CHECK(strstr(retainers_of_a, "C;2") != NULL);
CHECK_EQ("(global property);2", printer.GetRetainers("B"));
CHECK_EQ("(global property);1", printer.GetRetainers("C"));
}
} // namespace
TEST(ProfMultipleThreads) {
- // V8 needs to be initialized before the first Locker
- // instantiation. Otherwise, Top::Initialize will reset
- // thread_id_ in ThreadTopLocal.
- v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
-
LoopingJsThread jsThread;
jsThread.Start();
LoopingNonJsThread nonJsThread;
// Create the server socket and bind it to the requested port.
server_ = OS::CreateSocket();
+ server_->SetReuseAddress(true);
CHECK(server_ != NULL);
ok = server_->Bind(port_);
CHECK(ok);
Boolean: [ Boolean ],
Number: [ Number ],
Date: [ Date ],
- RegExp: [ RegExp ],
+ RegExp: [ RegExp ],
Error: [ Error, TypeError, RangeError, SyntaxError, ReferenceError, EvalError, URIError ]
}
for (f in funs) {
// Compile different sources.
compileSource('a=1');
-compileSource('function(){}');
+compileSource('(function(){})');
compileSource('eval("a=2")');
source_count++; // Using eval causes additional compilation event.
-compileSource('eval("eval(\'function(){return a;}\')")');
+compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
compileSource('JSON.parse("{a:1,b:2}")');
source_count++; // Using JSON.parse causes additional compilation event.
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test that we get exceptions for invalid left-hand sides. Also
-// tests that if the invalid left-hand side is a function call, the
-// exception is delayed until runtime.
+// Test that we get exceptions for invalid left-hand sides. The
+// exceptions are delayed until runtime.
// Normal assignments:
assertThrows("12 = 12");
// Assignments to 'this'.
assertThrows("this = 42");
-assertThrows("function f() { this = 12; }");
-assertThrows("for (this in Array) ;");
+assertDoesNotThrow("function f() { this = 12; }");
+assertThrows("for (this in {x:3, y:4, z:5}) ;");
assertThrows("for (this = 0;;) ;");
assertThrows("this++");
assertThrows("++this");
assertThrows("this--");
assertThrows("--this");
-
-
// Test the script mirror for different functions.
testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
-testScriptMirror(eval('function(){}'), null, 1, 2, 1, 'function(){}', 87);
-testScriptMirror(eval('function(){\n }'), null, 2, 2, 1, 'function(){\n }', 88);
+testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
+testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
testScriptMirror(%CompileString("({a:1,\n b:2})", true), null, 2, 2, 2, '({a:1,\n b:2})');
// Test taking slices of source.
-var mirror = debug.MakeMirror(eval('function(){\n 1;\n}')).script();
-assertEquals('function(){\n', mirror.sourceSlice(0, 1).sourceText());
+var mirror = debug.MakeMirror(eval('(function(){\n 1;\n})')).script();
+assertEquals('(function(){\n', mirror.sourceSlice(0, 1).sourceText());
assertEquals(' 1;\n', mirror.sourceSlice(1, 2).sourceText());
-assertEquals('}', mirror.sourceSlice(2, 3).sourceText());
-assertEquals('function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
-assertEquals(' 1;\n}', mirror.sourceSlice(1, 3).sourceText());
-assertEquals('function(){\n 1;\n}', mirror.sourceSlice(0, 3).sourceText());
+assertEquals('})', mirror.sourceSlice(2, 3).sourceText());
+assertEquals('(function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
+assertEquals(' 1;\n})', mirror.sourceSlice(1, 3).sourceText());
+assertEquals('(function(){\n 1;\n})', mirror.sourceSlice(0, 3).sourceText());
function foo(f) { eval(f); }
// Ensure that compiling a declaration of a function does not crash.
-foo("function (x) { with ({x: []}) function x(){} }");
+foo("(function (x) { with ({x: []}) function x(){} })");
function makeVeryLong(length) {
- var res = "function() {\n" +
+ var res = "(function () {\n" +
" var res = 0;\n" +
" for (var i = 0; i <= " + length + "; i++) {\n" +
" switch(i) {\n";
" }\n" +
" }\n" +
" return res;\n" +
- "}";
+ "})";
return eval(res);
}
var verylong_size = 1000;
assertEquals(Object.keys(x), []);
assertEquals(Object.keys(function () {}), []);
+assertEquals('string', typeof(Object.keys([1])[0]));
+
function argsTest(a, b, c) {
assertEquals([0, 1, 2], Object.keys(arguments));
}
'-O3',
],
'conditions': [
- [ 'gcc_version=="44"', {
+ [ 'gcc_version==44', {
'cflags': [
# Avoid gcc 4.4 strict aliasing issues in dtoa.c
'-fno-strict-aliasing',
'../../src/builtins.cc',
'../../src/builtins.h',
'../../src/bytecodes-irregexp.h',
- '../../src/cfg.cc',
- '../../src/cfg.h',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.cc',
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
- '../../src/arm/cfg-arm.cc',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/cfg-ia32.cc',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
'../../src/x64/builtins-x64.cc',
- '../../src/x64/cfg-x64.cc',
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
return lines
-def CompressScript(lines, do_jsmin):
- # If we're not expecting this code to be user visible, we can run it through
- # a more aggressive minifier.
- if do_jsmin:
- return jsmin.jsmin(lines)
-
- # Remove stuff from the source that we don't want to appear when
- # people print the source code using Function.prototype.toString().
- # Note that we could easily compress the scripts mode but don't
- # since we want it to remain readable.
- lines = RemoveCommentsAndTrailingWhitespace(lines)
- return lines
-
-
def ReadFile(filename):
file = open(filename, "rt")
try:
# Build source code lines
source_lines = [ ]
+
+ minifier = jsmin.JavaScriptMinifier()
+
source_lines_empty = []
for module in modules:
filename = str(module)
delay = filename.endswith('-delay.js')
lines = ReadFile(filename)
- do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
Validate(lines, filename)
- lines = CompressScript(lines, do_jsmin)
+ lines = minifier.JSMinify(lines)
data = ToCArray(lines)
id = (os.path.split(filename)[1])[:-3]
if delay: id = id[:-6]
-#!/usr/bin/python\r
-\r
-# This code is original from jsmin by Douglas Crockford, it was translated to\r
-# Python by Baruch Even. The original code had the following copyright and\r
-# license.\r
-#\r
-# /* jsmin.c\r
-# 2007-05-22\r
-#\r
-# Copyright (c) 2002 Douglas Crockford (www.crockford.com)\r
-#\r
-# Permission is hereby granted, free of charge, to any person obtaining a copy of\r
-# this software and associated documentation files (the "Software"), to deal in\r
-# the Software without restriction, including without limitation the rights to\r
-# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r
-# of the Software, and to permit persons to whom the Software is furnished to do\r
-# so, subject to the following conditions:\r
-#\r
-# The above copyright notice and this permission notice shall be included in all\r
-# copies or substantial portions of the Software.\r
-#\r
-# The Software shall be used for Good, not Evil.\r
-#\r
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
-# SOFTWARE.\r
-# */\r
-\r
-from StringIO import StringIO\r
-\r
-def jsmin(js):\r
- ins = StringIO(js)\r
- outs = StringIO()\r
- JavascriptMinify().minify(ins, outs)\r
- str = outs.getvalue()\r
- if len(str) > 0 and str[0] == '\n':\r
- str = str[1:]\r
- return str\r
-\r
-def isAlphanum(c):\r
- """return true if the character is a letter, digit, underscore,\r
- dollar sign, or non-ASCII character.\r
- """\r
- return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or\r
- (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));\r
-\r
-class UnterminatedComment(Exception):\r
- pass\r
-\r
-class UnterminatedStringLiteral(Exception):\r
- pass\r
-\r
-class UnterminatedRegularExpression(Exception):\r
- pass\r
-\r
-class JavascriptMinify(object):\r
-\r
- def _outA(self):\r
- self.outstream.write(self.theA)\r
- def _outB(self):\r
- self.outstream.write(self.theB)\r
-\r
- def _get(self):\r
- """return the next character from stdin. Watch out for lookahead. If\r
- the character is a control character, translate it to a space or\r
- linefeed.\r
- """\r
- c = self.theLookahead\r
- self.theLookahead = None\r
- if c == None:\r
- c = self.instream.read(1)\r
- if c >= ' ' or c == '\n':\r
- return c\r
- if c == '': # EOF\r
- return '\000'\r
- if c == '\r':\r
- return '\n'\r
- return ' '\r
-\r
- def _peek(self):\r
- self.theLookahead = self._get()\r
- return self.theLookahead\r
-\r
- def _next(self):\r
- """get the next character, excluding comments. peek() is used to see\r
- if an unescaped '/' is followed by a '/' or '*'.\r
- """\r
- c = self._get()\r
- if c == '/' and self.theA != '\\':\r
- p = self._peek()\r
- if p == '/':\r
- c = self._get()\r
- while c > '\n':\r
- c = self._get()\r
- return c\r
- if p == '*':\r
- c = self._get()\r
- while 1:\r
- c = self._get()\r
- if c == '*':\r
- if self._peek() == '/':\r
- self._get()\r
- return ' '\r
- if c == '\000':\r
- raise UnterminatedComment()\r
-\r
- return c\r
-\r
- def _action(self, action):\r
- """do something! What you do is determined by the argument:\r
- 1 Output A. Copy B to A. Get the next B.\r
- 2 Copy B to A. Get the next B. (Delete A).\r
- 3 Get the next B. (Delete B).\r
- action treats a string as a single character. Wow!\r
- action recognizes a regular expression if it is preceded by ( or , or =.\r
- """\r
- if action <= 1:\r
- self._outA()\r
-\r
- if action <= 2:\r
- self.theA = self.theB\r
- if self.theA == "'" or self.theA == '"':\r
- while 1:\r
- self._outA()\r
- self.theA = self._get()\r
- if self.theA == self.theB:\r
- break\r
- if self.theA <= '\n':\r
- raise UnterminatedStringLiteral()\r
- if self.theA == '\\':\r
- self._outA()\r
- self.theA = self._get()\r
-\r
-\r
- if action <= 3:\r
- self.theB = self._next()\r
- if self.theB == '/' and (self.theA == '(' or self.theA == ',' or\r
- self.theA == '=' or self.theA == ':' or\r
- self.theA == '[' or self.theA == '?' or\r
- self.theA == '!' or self.theA == '&' or\r
- self.theA == '|' or self.theA == ';' or\r
- self.theA == '{' or self.theA == '}' or\r
- self.theA == '\n'):\r
- self._outA()\r
- self._outB()\r
- while 1:\r
- self.theA = self._get()\r
- if self.theA == '/':\r
- break\r
- elif self.theA == '\\':\r
- self._outA()\r
- self.theA = self._get()\r
- elif self.theA <= '\n':\r
- raise UnterminatedRegularExpression()\r
- self._outA()\r
- self.theB = self._next()\r
-\r
-\r
- def _jsmin(self):\r
- """Copy the input to the output, deleting the characters which are\r
- insignificant to JavaScript. Comments will be removed. Tabs will be\r
- replaced with spaces. Carriage returns will be replaced with linefeeds.\r
- Most spaces and linefeeds will be removed.\r
- """\r
- self.theA = '\n'\r
- self._action(3)\r
-\r
- while self.theA != '\000':\r
- if self.theA == ' ':\r
- if isAlphanum(self.theB):\r
- self._action(1)\r
- else:\r
- self._action(2)\r
- elif self.theA == '\n':\r
- if self.theB in ['{', '[', '(', '+', '-']:\r
- self._action(1)\r
- elif self.theB == ' ':\r
- self._action(3)\r
- else:\r
- if isAlphanum(self.theB):\r
- self._action(1)\r
- else:\r
- self._action(2)\r
- else:\r
- if self.theB == ' ':\r
- if isAlphanum(self.theA):\r
- self._action(1)\r
- else:\r
- self._action(3)\r
- elif self.theB == '\n':\r
- if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:\r
- self._action(1)\r
- else:\r
- if isAlphanum(self.theA):\r
- self._action(1)\r
- else:\r
- self._action(3)\r
- else:\r
- self._action(1)\r
-\r
- def minify(self, instream, outstream):\r
- self.instream = instream\r
- self.outstream = outstream\r
- self.theA = '\n'\r
- self.theB = None\r
- self.theLookahead = None\r
-\r
- self._jsmin()\r
- self.instream.close()\r
-\r
-if __name__ == '__main__':\r
- import sys\r
- jsm = JavascriptMinify()\r
- jsm.minify(sys.stdin, sys.stdout)\r
+#!/usr/bin/python2.4
+
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A JavaScript minifier.
+
+It is far from being a complete JS parser, so there are many valid
+JavaScript programs that will be ruined by it. Another strangeness is that
+it accepts $ and % as parts of identifiers. It doesn't merge lines or strip
+out blank lines in order to ease debugging. Variables at the top scope are
+properties of the global object so we can't rename them. It is assumed that
+you introduce variables with var as if JavaScript followed C++ scope rules
+around curly braces, so the declaration must be above the first use.
+
+Use as:
+import jsmin
+minifier = JavaScriptMinifier()
+program1 = minifier.JSMinify(program1)
+program2 = minifier.JSMinify(program2)
+"""
+
+import re
+
+
+class JavaScriptMinifier(object):
+ """An object that you can feed code snippets to to get them minified."""
+
+ def __init__(self):
+ # We prepopulate the list of identifiers that shouldn't be used. These
+ # short language keywords could otherwise be used by the script as variable
+ # names.
+ self.seen_identifiers = {"do": True, "in": True}
+ self.identifier_counter = 0
+ self.in_comment = False
+ self.map = {}
+ self.nesting = 0
+
+ def LookAtIdentifier(self, m):
+ """Records identifiers or keywords that we see in use.
+
+ (So we can avoid renaming variables to these strings.)
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ Nothing.
+ """
+ identifier = m.group(1)
+ self.seen_identifiers[identifier] = True
+
+ def Push(self):
+ """Called when we encounter a '{'."""
+ self.nesting += 1
+
+ def Pop(self):
+ """Called when we encounter a '}'."""
+ self.nesting -= 1
+ # We treat each top-level opening brace as a single scope that can span
+ # several sets of nested braces.
+ if self.nesting == 0:
+ self.map = {}
+ self.identifier_counter = 0
+
+ def Declaration(self, m):
+ """Rewrites bits of the program selected by a regexp.
+
+ These can be curly braces, literal strings, function declarations and var
+ declarations. (These last two must be on one line including the opening
+ curly brace of the function for their variables to be renamed).
+
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ The string that should replace the match in the rewritten program.
+ """
+ matched_text = m.group(0)
+ if matched_text == "{":
+ self.Push()
+ return matched_text
+ if matched_text == "}":
+ self.Pop()
+ return matched_text
+ if re.match("[\"'/]", matched_text):
+ return matched_text
+ m = re.match(r"var ", matched_text)
+ if m:
+ var_names = matched_text[m.end():]
+ var_names = re.split(r",", var_names)
+ return "var " + ",".join(map(self.FindNewName, var_names))
+ m = re.match(r"(function\b[^(]*)\((.*)\)\{$", matched_text)
+ if m:
+ up_to_args = m.group(1)
+ args = m.group(2)
+ args = re.split(r",", args)
+ self.Push()
+ return up_to_args + "(" + ",".join(map(self.FindNewName, args)) + "){"
+
+ if matched_text in self.map:
+ return self.map[matched_text]
+
+ return matched_text
+
+ def CharFromNumber(self, number):
+ """A single-digit base-52 encoding using a-zA-Z."""
+ if number < 26:
+ return chr(number + 97)
+ number -= 26
+ return chr(number + 65)
+
+ def FindNewName(self, var_name):
+ """Finds a new 1-character or 2-character name for a variable.
+
+ Enters it into the mapping table for this scope.
+
+ Args:
+ var_name: The name of the variable before renaming.
+
+ Returns:
+ The new name of the variable.
+ """
+ new_identifier = ""
+ # Variable names that end in _ are member variables of the global object,
+ # so they can be visible from code in a different scope. We leave them
+ # alone.
+ if var_name in self.map:
+ return self.map[var_name]
+ if self.nesting == 0:
+ return var_name
+ while True:
+ identifier_first_char = self.identifier_counter % 52
+ identifier_second_char = self.identifier_counter / 52
+ new_identifier = self.CharFromNumber(identifier_first_char)
+ if identifier_second_char != 0:
+ new_identifier = (
+ self.CharFromNumber(identifier_second_char - 1) + new_identifier)
+ self.identifier_counter += 1
+ if not new_identifier in self.seen_identifiers:
+ break
+
+ self.map[var_name] = new_identifier
+ return new_identifier
+
+ def RemoveSpaces(self, m):
+ """Returns literal strings unchanged, replaces other inputs with group 2.
+
+ Other inputs are replaced with the contents of capture 1. This is either
+ a single space or an empty string.
+
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ The string that should be inserted instead of the matched text.
+ """
+ entire_match = m.group(0)
+ replacement = m.group(1)
+ if re.match(r"'.*'$", entire_match):
+ return entire_match
+ if re.match(r'".*"$', entire_match):
+ return entire_match
+ if re.match(r"/.+/$", entire_match):
+ return entire_match
+ return replacement
+
+ def JSMinify(self, text):
+ """The main entry point. Takes a text and returns a compressed version.
+
+ The compressed version hopefully does the same thing. Line breaks are
+ preserved.
+
+ Args:
+ text: The text of the code snippet as a multiline string.
+
+ Returns:
+ The compressed text of the code snippet as a multiline string.
+ """
+ new_lines = []
+ for line in re.split(r"\n", text):
+ line = line.replace("\t", " ")
+ if self.in_comment:
+ m = re.search(r"\*/", line)
+ if m:
+ line = line[m.end():]
+ self.in_comment = False
+ else:
+ new_lines.append("")
+ continue
+
+ if not self.in_comment:
+ line = re.sub(r"/\*.*?\*/", " ", line)
+ line = re.sub(r"//.*", "", line)
+ m = re.search(r"/\*", line)
+ if m:
+ line = line[:m.start()]
+ self.in_comment = True
+
+ # Strip leading and trailing spaces.
+ line = re.sub(r"^ +", "", line)
+ line = re.sub(r" +$", "", line)
+ # A regexp that matches a literal string surrounded by "double quotes".
+ # This regexp can handle embedded backslash-escaped characters including
+ # embedded backslash-escaped double quotes.
+ double_quoted_string = r'"(?:[^"\\]|\\.)*"'
+ # A regexp that matches a literal string surrounded by 'double quotes'.
+ single_quoted_string = r"'(?:[^'\\]|\\.)*'"
+ # A regexp that matches a regexp literal surrounded by /slashes/.
+ slash_quoted_regexp = r"/(?:[^/\\]|\\.)+/"
+ # Replace multiple spaces with a single space.
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ "( )+"]),
+ self.RemoveSpaces,
+ line)
+ # Strip single spaces unless they have an identifier character both before
+ # and after the space. % and $ are counted as identifier characters.
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
+ self.RemoveSpaces,
+ line)
+ # Collect keywords and identifiers that are already in use.
+ if self.nesting == 0:
+ re.sub(r"([a-zA-Z0-9_$%]+)", self.LookAtIdentifier, line)
+ function_declaration_regexp = (
+ r"\bfunction" # Function definition keyword...
+ r"( [\w$%]+)?" # ...optional function name...
+ r"\([\w$%,]+\)\{") # ...argument declarations.
+ # Unfortunately the keyword-value syntax { key:value } makes the key look
+ # like a variable where in fact it is a literal string. We use the
+ # presence or absence of a question mark to try to distinguish between
+ # this case and the ternary operator: "condition ? iftrue : iffalse".
+ if re.search(r"\?", line):
+ block_trailing_colon = r""
+ else:
+ block_trailing_colon = r"(?![:\w$%])"
+ # Variable use. Cannot follow a period precede a colon.
+ variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ r"\{", # Curly braces.
+ r"\}",
+ r"\bvar [\w$%,]+", # var declarations.
+ function_declaration_regexp,
+ variable_use_regexp]),
+ self.Declaration,
+ line)
+ new_lines.append(line)
+
+ return "\n".join(new_lines) + "\n"
>
</File>
<File
- RelativePath="..\..\src\ia32\cfg-ia32.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
>
</File>
<File
- RelativePath="..\..\src\arm\cfg-arm.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
>
</File>
<File
- RelativePath="..\..\src\x64\cfg-x64.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
require.async = requireAsync;
// create wrapper function
- var wrapper = "function (__module, __filename, exports, require) { " + content + "\n};";
+ var wrapper = "var __wrap__ = function (__module, __filename, exports, require) { "
+ + content
+ + "\n}; __wrap__;";
var compiled_wrapper = node.compile(wrapper, self.filename);
compiled_wrapper.apply(self.exports, [self, self.filename, self.exports, require]);
return ", ".join(result)
-def CompressScript(lines, do_jsmin):
- # If we're not expecting this code to be user visible, we can run it through
- # a more aggressive minifier.
- if do_jsmin:
- return jsmin.jsmin(lines)
-
- # Remove stuff from the source that we don't want to appear when
- # people print the source code using Function.prototype.toString().
- # Note that we could easily compress the scripts mode but don't
- # since we want it to remain readable.
- #lines = re.sub('//.*\n', '\n', lines) # end-of-line comments
- #lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
- #lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
+def RemoveCommentsAndTrailingWhitespace(lines):
+ lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+ lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+ lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
return lines
return string
+EVAL_PATTERN = re.compile(r'\beval\s*\(');
+WITH_PATTERN = re.compile(r'\bwith\s*\(');
+
+
+def Validate(lines, file):
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
+ # Because of simplified context setup, eval and with is not
+ # allowed in the natives files.
+ eval_match = EVAL_PATTERN.search(lines)
+ if eval_match:
+ raise ("Eval disallowed in natives: %s" % file)
+ with_match = WITH_PATTERN.search(lines)
+ if with_match:
+ raise ("With statements disallowed in natives: %s" % file)
+
+
def ExpandConstants(lines, constants):
for key, value in constants.items():
lines = lines.replace(key, str(value))
args.append(mapping[arg])
return str(self.fun(*args))
-CONST_PATTERN = re.compile('^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
-MACRO_PATTERN = re.compile('^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
-PYTHON_MACRO_PATTERN = re.compile('^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
+CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
+MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
+PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = { }
SOURCE_DECLARATION = """\
- static const char native_%(id)s[] = { %(data)s };
+ static const char %(id)s[] = { %(data)s };
"""
if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
"""
-def JS2C(source, target):
+def JS2C(source, target, env):
ids = []
delay_ids = []
modules = []
# Build source code lines
source_lines = [ ]
+
+ minifier = jsmin.JavaScriptMinifier()
+
source_lines_empty = []
- for s in modules:
- delay = str(s).endswith('-delay.js')
- lines = ReadFile(str(s))
- do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1
+ for module in modules:
+ filename = str(module)
+ delay = filename.endswith('-delay.js')
+ lines = ReadFile(filename)
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
- lines = CompressScript(lines, do_jsmin)
+ Validate(lines, filename)
+ lines = minifier.JSMinify(lines)
data = ToCArray(lines)
- id = (os.path.split(str(s))[1])[:-3]
+ id = (os.path.split(filename)[1])[:-3]
if delay: id = id[:-6]
if delay:
delay_ids.append((id, len(lines)))
ids.append((id, len(lines)))
source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
-
+
# Build delay support functions
get_index_cases = [ ]
get_script_source_cases = [ ]
'source_lines': "\n".join(source_lines),
'get_index_cases': "".join(get_index_cases),
'get_script_source_cases': "".join(get_script_source_cases),
- 'get_script_name_cases': "".join(get_script_name_cases)
+ 'get_script_name_cases': "".join(get_script_name_cases),
+ 'type': env['TYPE']
})
output.close()
'source_lines': "\n".join(source_lines_empty),
'get_index_cases': "".join(get_index_cases),
'get_script_source_cases': "".join(get_script_source_cases),
- 'get_script_name_cases': "".join(get_script_name_cases)
+ 'get_script_name_cases': "".join(get_script_name_cases),
+ 'type': env['TYPE']
})
output.close()
+
+def main():
+ natives = sys.argv[1]
+ natives_empty = sys.argv[2]
+ type = sys.argv[3]
+ source_files = sys.argv[4:]
+ JS2C(source_files, [natives, natives_empty], { 'TYPE': type })
+
+if __name__ == "__main__":
+ main()