+2008-07-30: Version 0.2.0 (129146)
+
+ Changed all text files to have native svn:eol-style.
+
+ Added a few samples and support for building them. The samples
+ include a simple shell that can be used to benchmark and test V8.
+
+ Changed V8::GetVersion to return the version as a string.
+
+ Added source for lazily loaded scripts to snapshots and made
+ serialization non-destructive.
+
+ Improved ARM support by fixing the write barrier code to use
+ aligned loads and stores and by removing premature locals
+ optimization that relied on broken support for callee-saved
+ registers (removed).
+
+ Refactored the code for marking live objects during garbage
+ collection and the code for allocating objects in paged
+ spaces. Introduced an abstraction for the map word of a heap-
+ allocated object and changed the memory allocator to allocate
+ executable memory only for spaces that may contain code objects.
+
+ Moved StringBuilder to utils.h and ScopedLock to platform.h, where
+ they can be used by debugging and logging modules. Added
+ thread-safe message queues for dealing with debugger events.
+
+ Fixed the source code reported by toString for certain builtin
+ empty functions and made sure that the prototype property of a
+ function is enumerable.
+
+ Improved performance of converting values to condition flags in
+ generated code.
+
+ Merged disassembler-{arch} files.
+
+
2008-07-28: Version 0.1.4 (128918)
Added support for storing JavaScript stack traces in a stack
def GuessToolchain(os):
tools = Environment()['TOOLS']
if 'gcc' in tools:
- if os == 'macos' and 'Kernel Version 8' in platform.version():
- return 'gcc-darwin'
- else:
- return 'gcc'
+ return 'gcc'
elif 'msvc' in tools:
return 'msvc'
else:
os_guess = GuessOS()
toolchain_guess = GuessToolchain(os_guess)
processor_guess = GuessProcessor()
- result.Add('mode', 'debug or release', 'release')
- result.Add('toolchain', 'the toolchain to use (gcc, gcc-darwin or msvc)', toolchain_guess)
- result.Add('os', 'the os to build for (linux, macos or win32)', os_guess)
- result.Add('processor', 'the processor to build for (arm or ia32)', processor_guess)
+ result.Add('mode', 'compilation mode (debug, release)', 'release')
+ result.Add('toolchain', 'the toolchain to use (gcc, msvc)', toolchain_guess)
+ result.Add('os', 'the os to build for (linux, macos, win32)', os_guess)
+ result.Add('processor', 'the processor to build for (arm, ia32)', processor_guess)
result.Add('snapshot', 'build using snapshots for faster start-up (on, off)', 'off')
result.Add('library', 'which type of library to produce (static, shared, default)', 'default')
+ result.Add('sample', 'build sample (process, shell)', '')
return result
def VerifyOptions(env):
if not env['mode'] in ['debug', 'release']:
Abort("Unknown build mode '%s'." % env['mode'])
- if not env['toolchain'] in ['gcc', 'gcc-darwin', 'msvc']:
+ if not env['toolchain'] in ['gcc', 'msvc']:
Abort("Unknown toolchain '%s'." % env['toolchain'])
if not env['os'] in ['linux', 'macos', 'win32']:
Abort("Unknown os '%s'." % env['os'])
Abort("Illegal value for option snapshot: '%s'." % env['snapshot'])
if not env['library'] in ['static', 'shared', 'default']:
Abort("Illegal value for option library: '%s'." % env['library'])
+ if not env['sample'] in ['', 'process', 'shell']:
+ Abort("Illegal value for option sample: '%s'." % env['sample'])
-def Start():
+def Build():
opts = GetOptions()
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
use_snapshot = (env['snapshot'] == 'on')
library_type = env['library']
- env.SConscript(
+ # Build the object files by invoking SCons recursively.
+ object_files = env.SConscript(
join('src', 'SConscript'),
- build_dir=mode,
+ build_dir='build',
exports='toolchain arch os mode use_snapshot library_type',
duplicate=False
)
+ # Link the object files into a library.
+ if library_type == 'static':
+ library = env.StaticLibrary('v8', object_files)
+ elif library_type == 'shared':
+ # There seems to be a glitch in the way scons decides where to put
+ # PDB files when compiling using MSVC so we specify it manually.
+ # This should not affect any other platforms.
+ library = env.SharedLibrary('v8', object_files, PDB='v8.dll.pdb')
+ else:
+ library = env.Library('v8', object_files)
+
+ # Bail out if we're not building any sample.
+ sample = env['sample']
+ if not sample: return
+
+ # Build the sample.
+ env.Replace(CPPPATH='public')
+ object_path = join('build', 'samples', sample)
+ source_path = join('samples', sample + '.cc')
+ object = env.Object(object_path, source_path)
+ if toolchain == 'gcc':
+ env.Program(sample, [object, library], LIBS='pthread')
+ else:
+ env.Program(sample, [object, library], LIBS='WS2_32')
+
-Start()
+Build()
*/
namespace v8 {
-// Debug events which can occour in the V8 JavaScript engine.
+// Debug events which can occur in the V8 JavaScript engine.
enum DebugEvent {
Break = 1,
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
- AfterCompile = 5,
- PendingRequestProcessed = 6
+ AfterCompile = 5
};
*
* \param message the debug message
* \param length length of the message
+ * A DebugMessageHandler does not take posession of the message string,
+ * and must not rely on the data persisting after the handler returns.
*/
typedef void (*DebugMessageHandler)(const uint16_t* message, int length,
void* data);
* be careful to supply the length parameter.
* If it is not given, the function calls
* 'strlen' to determine the buffer length, it might be
- * wrong if '\0' character is in the 'data'.
+ * wrong if 'data' contains a null character.
*/
static Local<String> New(const char* data, int length = -1);
*/
static Local<String> NewExternal(ExternalAsciiStringResource* resource);
- /** Creates an undetectable string from the supplied character.*/
+ /** Creates an undetectable string from the supplied ascii or utf-8 data.*/
static Local<String> NewUndetectable(const char* data, int length = -1);
- /** Creates an undetectable string from the supplied unsigned integer.*/
+ /** Creates an undetectable string from the supplied utf-16 data.*/
static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
/**
};
-/**
- * Ignore
- */
-struct VersionInfo {
- int major, minor, build_major, build_minor, revision;
-};
-
// --- C o u n t e r s C a l l b a c k s
typedef int* (*CounterLookupCallback)(const wchar_t* name);
*/
static void SetFlagsFromString(const char* str, int length);
- /** Sets the version fields in the given VersionInfo struct.*/
- static void GetVersion(VersionInfo* info);
+ /** Get the version string. */
+ static const char* GetVersion();
/**
* Enables the host application to provide a mechanism for recording
*/
static bool Initialize();
+
+ /**
+ * Adjusts the about of registered external memory.
+ * Returns the adjusted value.
+ * Used for triggering a global GC earlier than otherwise.
+ */
+ static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
+
private:
V8();
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function Initialize() { }
+
+function Process(request) {
+ if (options.verbose) {
+ log("Processing " + request.host + request.path +
+ " from " + request.referrer + "@" + request.userAgent);
+ }
+ if (!output[request.host]) {
+ output[request.host] = 1;
+ } else {
+ output[request.host]++
+ }
+}
+
+Initialize();
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <v8.h>
+#include <string>
+#include <map>
+
+using namespace std;
+using namespace v8;
+
+// These interfaces represent an existing request processing interface.
+// The idea is to imagine a real application that uses these interfaces
+// and then add scripting capabilities that allow you to interact with
+// the objects through JavaScript.
+
+/**
+ * A simplified http request.
+ */
+class HttpRequest {
+ public:
+ virtual ~HttpRequest() { }
+ virtual const string& Path() = 0;
+ virtual const string& Referrer() = 0;
+ virtual const string& Host() = 0;
+ virtual const string& UserAgent() = 0;
+};
+
+/**
+ * The abstract superclass of http request processors.
+ */
+class HttpRequestProcessor {
+ public:
+ virtual ~HttpRequestProcessor() { }
+
+ // Initialize this processor. The map contains options that control
+ // how requests should be processed.
+ virtual bool Initialize(map<string, string>* options,
+ map<string, string>* output) = 0;
+
+ // Process a single request.
+ virtual bool Process(HttpRequest* req) = 0;
+
+ static void Log(const char* event);
+};
+
+/**
+ * An http request processor that is scriptable using JavaScript.
+ */
+class JsHttpRequestProcessor : public HttpRequestProcessor {
+ public:
+
+ // Creates a new processor that processes requests by invoking the
+ // Process function of the JavaScript script given as an argument.
+ JsHttpRequestProcessor(Handle<String> script) : script_(script) { }
+ virtual ~JsHttpRequestProcessor();
+
+ virtual bool Initialize(map<string, string>* opts,
+ map<string, string>* output);
+ virtual bool Process(HttpRequest* req);
+
+ private:
+
+ // Execute the script associated with this processor and extract the
+ // Process function. Returns true if this succeeded, otherwise false.
+ bool ExecuteScript(Handle<String> script);
+
+ // Wrap the options and output map in a JavaScript objects and
+ // install it in the global namespace as 'options' and 'output'.
+ bool InstallMaps(map<string, string>* opts, map<string, string>* output);
+
+ // Constructs the template that describes the JavaScript wrapper
+ // type for requests.
+ static Handle<ObjectTemplate> MakeRequestTemplate();
+ static Handle<ObjectTemplate> MakeMapTemplate();
+
+ // Callbacks that access the individual fields of request objects.
+ static Handle<Value> GetPath(Local<String> name, const AccessorInfo& info);
+ static Handle<Value> GetReferrer(Local<String> name, const AccessorInfo& info);
+ static Handle<Value> GetHost(Local<String> name, const AccessorInfo& info);
+ static Handle<Value> GetUserAgent(Local<String> name, const AccessorInfo& info);
+
+ // Callbacks that access maps
+ static Handle<Value> MapGet(Local<String> name, const AccessorInfo& info);
+ static Handle<Value> MapSet(Local<String> name, Local<Value> value,
+ const AccessorInfo& info);
+
+ // Utility methods for wrapping C++ objects as JavaScript objects,
+ // and going back again.
+ static Handle<Object> WrapMap(map<string, string>* obj);
+ static map<string, string>* UnwrapMap(Handle<Object> obj);
+ static Handle<Object> WrapRequest(HttpRequest* obj);
+ static HttpRequest* UnwrapRequest(Handle<Object> obj);
+
+ Handle<String> script_;
+ Persistent<Context> context_;
+ Persistent<Function> process_;
+ static Persistent<ObjectTemplate> request_template_;
+ static Persistent<ObjectTemplate> map_template_;
+};
+
+// -------------------------
+// --- P r o c e s s o r ---
+// -------------------------
+
+
+static Handle<Value> LogCallback(const Arguments& args) {
+ if (args.Length() < 1) return v8::Undefined();
+ HandleScope scope;
+ Handle<Value> arg = args[0];
+ String::AsciiValue value(arg);
+ HttpRequestProcessor::Log(*value);
+ return v8::Undefined();
+}
+
+
+// Execute the script and fetch the Process method.
+bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
+ map<string, string>* output) {
+ // Create a handle scope to hold the temporary references.
+ HandleScope handle_scope;
+
+ // Create a template for the global object where we set the
+ // built-in global functions.
+ Handle<ObjectTemplate> global = ObjectTemplate::New();
+ global->Set(String::New("log"), FunctionTemplate::New(LogCallback));
+
+ // Each processor gets its own context so different processors
+ // don't affect each other (ignore the first three lines).
+ Handle<Context> context = Context::New(NULL, global);
+
+ // Store the context in the processor object in a persistent handle,
+ // since we want the reference to remain after we return from this
+ // method.
+ context_ = Persistent<Context>::New(context);
+
+ // Enter the new context so all the following operations take place
+ // within it.
+ Context::Scope context_scope(context);
+
+ // Make the options mapping available within the context
+ if (!InstallMaps(opts, output))
+ return false;
+
+ // Compile and run the script
+ if (!ExecuteScript(script_))
+ return false;
+
+ // The script compiled and ran correctly. Now we fetch out the
+ // Process function from the global object.
+ Handle<String> process_name = String::New("Process");
+ Handle<Value> process_val = context->Global()->Get(process_name);
+
+ // If there is no Process function, or if it is not a function,
+ // bail out
+ if (!process_val->IsFunction()) return false;
+
+ // It is a function; cast it to a Function
+ Handle<Function> process_fun = Handle<Function>::Cast(process_val);
+
+ // Store the function in a Persistent handle, since we also want
+ // that to remain after this call returns
+ process_ = Persistent<Function>::New(process_fun);
+
+ // All done; all went well
+ return true;
+}
+
+
+bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
+ HandleScope handle_scope;
+
+ // We're just about to compile the script; set up an error handler to
+ // catch any exceptions the script might throw.
+ TryCatch try_catch;
+
+ // Compile the script and check for errors.
+ Handle<Script> compiled_script = Script::Compile(script);
+ if (compiled_script.IsEmpty()) {
+ String::AsciiValue error(try_catch.Exception());
+ Log(*error);
+ // The script failed to compile; bail out.
+ return false;
+ }
+
+ // Run the script!
+ Handle<Value> result = compiled_script->Run();
+ if (result.IsEmpty()) {
+ // The TryCatch above is still in effect and will have caught the error.
+ String::AsciiValue error(try_catch.Exception());
+ Log(*error);
+ // Running the script failed; bail out.
+ return false;
+ }
+ return true;
+}
+
+
+bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
+ map<string, string>* output) {
+ HandleScope handle_scope;
+
+ // Wrap the map object in a JavaScript wrapper
+ Handle<Object> opts_obj = WrapMap(opts);
+
+ // Set the options object as a property on the global object.
+ context_->Global()->Set(String::New("options"), opts_obj);
+
+ Handle<Object> output_obj = WrapMap(output);
+ context_->Global()->Set(String::New("output"), output_obj);
+
+ return true;
+}
+
+
+bool JsHttpRequestProcessor::Process(HttpRequest* request) {
+ // Create a handle scope to keep the temporary object references.
+ HandleScope handle_scope;
+
+ // Enter this processor's context so all the remaining operations
+ // take place there
+ Context::Scope context_scope(context_);
+
+ // Wrap the C++ request object in a JavaScript wrapper
+ Handle<Object> request_obj = WrapRequest(request);
+
+ // Set up an exception handler before calling the Process function
+ TryCatch try_catch;
+
+ // Invoke the process function, giving the global object as 'this'
+ // and one argument, the request.
+ const int argc = 1;
+ Handle<Value> argv[argc] = { request_obj };
+ Handle<Value> result = process_->Call(context_->Global(), argc, argv);
+ if (result.IsEmpty()) {
+ String::AsciiValue error(try_catch.Exception());
+ Log(*error);
+ return false;
+ } else {
+ return true;
+ }
+}
+
+
+JsHttpRequestProcessor::~JsHttpRequestProcessor() {
+ // Dispose the persistent handles. When noone else has any
+ // references to the objects stored in the handles they will be
+ // automatically reclaimed.
+ context_.Dispose();
+ process_.Dispose();
+}
+
+
+Persistent<ObjectTemplate> JsHttpRequestProcessor::request_template_;
+Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
+
+
+// -----------------------------------
+// --- A c c e s s i n g M a p s ---
+// -----------------------------------
+
+// Utility function that wraps a C++ http request object in a
+// JavaScript object.
+Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
+ // Handle scope for temporary handles.
+ HandleScope handle_scope;
+
+ // Fetch the template for creating JavaScript map wrappers.
+ // It only has to be created once, which we do on demand.
+ if (request_template_.IsEmpty()) {
+ Handle<ObjectTemplate> raw_template = MakeMapTemplate();
+ map_template_ = Persistent<ObjectTemplate>::New(raw_template);
+ }
+ Handle<ObjectTemplate> templ = map_template_;
+
+ // Create an empty map wrapper.
+ Handle<Object> result = templ->NewInstance();
+
+ // Wrap the raw C++ pointer in an External so it can be referenced
+ // from within JavaScript.
+ Handle<External> map_ptr = External::New(obj);
+
+ // Store the map pointer in the JavaScript wrapper.
+ result->SetInternalField(0, map_ptr);
+
+ // Return the result through the current handle scope. Since each
+ // of these handles will go away when the handle scope is deleted
+ // we need to call Close to let one, the result, escape into the
+ // outer handle scope.
+ return handle_scope.Close(result);
+}
+
+
+// Utility function that extracts the C++ map pointer from a wrapper
+// object.
+map<string, string>* JsHttpRequestProcessor::UnwrapMap(Handle<Object> obj) {
+ Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
+ void* ptr = field->Value();
+ return static_cast<map<string, string>*>(ptr);
+}
+
+
+// Convert a JavaScript string to a std::string. To not bother too
+// much with string encodings we just use ascii.
+string ObjectToString(Local<Value> value) {
+ String::AsciiValue ascii_value(value);
+ return string(*ascii_value);
+}
+
+
+Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
+ const AccessorInfo& info) {
+ // Fetch the map wrapped by this object.
+ map<string, string>* obj = UnwrapMap(info.Holder());
+
+ // Convert the JavaScript string to a std::string.
+ string key = ObjectToString(name);
+
+ // Look up the value if it exists using the standard STL ideom.
+ map<string, string>::iterator iter = obj->find(key);
+
+ // If the key is not present return an empty handle as signal
+ if (iter == obj->end()) return Handle<Value>();
+
+ // Otherwise fetch the value and wrap it in a JavaScript string
+ const string& value = (*iter).second;
+ return String::New(value.c_str(), value.length());
+}
+
+
+Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
+ Local<Value> value_obj, const AccessorInfo& info) {
+ // Fetch the map wrapped by this object.
+ map<string, string>* obj = UnwrapMap(info.Holder());
+
+ // Convert the key and value to std::strings.
+ string key = ObjectToString(name);
+ string value = ObjectToString(value_obj);
+
+ // Update the map.
+ (*obj)[key] = value;
+
+ // Return the value; any non-empty handle will work.
+ return value_obj;
+}
+
+
+Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate() {
+ HandleScope handle_scope;
+
+ Handle<ObjectTemplate> result = ObjectTemplate::New();
+ result->SetInternalFieldCount(1);
+ result->SetNamedPropertyHandler(MapGet, MapSet);
+
+ // Again, return the result through the current handle scope.
+ return handle_scope.Close(result);
+}
+
+
+// -------------------------------------------
+// --- A c c e s s i n g R e q u e s t s ---
+// -------------------------------------------
+
+/**
+ * Utility function that wraps a C++ http request object in a
+ * JavaScript object.
+ */
+Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
+ // Handle scope for temporary handles.
+ HandleScope handle_scope;
+
+ // Fetch the template for creating JavaScript http request wrappers.
+ // It only has to be created once, which we do on demand.
+ if (request_template_.IsEmpty()) {
+ Handle<ObjectTemplate> raw_template = MakeRequestTemplate();
+ request_template_ = Persistent<ObjectTemplate>::New(raw_template);
+ }
+ Handle<ObjectTemplate> templ = request_template_;
+
+ // Create an empty http request wrapper.
+ Handle<Object> result = templ->NewInstance();
+
+ // Wrap the raw C++ pointer in an External so it can be referenced
+ // from within JavaScript.
+ Handle<External> request_ptr = External::New(request);
+
+ // Store the request pointer in the JavaScript wrapper.
+ result->SetInternalField(0, request_ptr);
+
+ // Return the result through the current handle scope. Since each
+ // of these handles will go away when the handle scope is deleted
+ // we need to call Close to let one, the result, escape into the
+ // outer handle scope.
+ return handle_scope.Close(result);
+}
+
+
+/**
+ * Utility function that extracts the C++ http request object from a
+ * wrapper object.
+ */
+HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Handle<Object> obj) {
+ Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
+ void* ptr = field->Value();
+ return static_cast<HttpRequest*>(ptr);
+}
+
+
+Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
+ const AccessorInfo& info) {
+ // Extract the C++ request object from the JavaScript wrapper.
+ HttpRequest* request = UnwrapRequest(info.Holder());
+
+ // Fetch the path.
+ const string& path = request->Path();
+
+ // Wrap the result in a JavaScript string and return it.
+ return String::New(path.c_str(), path.length());
+}
+
+
+Handle<Value> JsHttpRequestProcessor::GetReferrer(Local<String> name,
+ const AccessorInfo& info) {
+ HttpRequest* request = UnwrapRequest(info.Holder());
+ const string& path = request->Referrer();
+ return String::New(path.c_str(), path.length());
+}
+
+
+Handle<Value> JsHttpRequestProcessor::GetHost(Local<String> name,
+ const AccessorInfo& info) {
+ HttpRequest* request = UnwrapRequest(info.Holder());
+ const string& path = request->Host();
+ return String::New(path.c_str(), path.length());
+}
+
+
+Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
+ const AccessorInfo& info) {
+ HttpRequest* request = UnwrapRequest(info.Holder());
+ const string& path = request->UserAgent();
+ return String::New(path.c_str(), path.length());
+}
+
+
+Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate() {
+ HandleScope handle_scope;
+
+ Handle<ObjectTemplate> result = ObjectTemplate::New();
+ result->SetInternalFieldCount(1);
+
+ // Add accessors for each of the fields of the request.
+ result->SetAccessor(String::NewSymbol("path"), GetPath);
+ result->SetAccessor(String::NewSymbol("referrer"), GetReferrer);
+ result->SetAccessor(String::NewSymbol("host"), GetHost);
+ result->SetAccessor(String::NewSymbol("userAgent"), GetUserAgent);
+
+ // Again, return the result through the current handle scope.
+ return handle_scope.Close(result);
+}
+
+
+// --- Test ---
+
+
+void HttpRequestProcessor::Log(const char* event) {
+ printf("Logged: %s\n", event);
+}
+
+
+/**
+ * A simplified http request.
+ */
+class StringHttpRequest : public HttpRequest {
+ public:
+ StringHttpRequest(const string& path, const string& referrer,
+ const string& host, const string& user_agent);
+ virtual const string& Path() { return path_; }
+ virtual const string& Referrer() { return referrer_; }
+ virtual const string& Host() { return host_; }
+ virtual const string& UserAgent() { return user_agent_; }
+ private:
+ string path_;
+ string referrer_;
+ string host_;
+ string user_agent_;
+};
+
+
+StringHttpRequest::StringHttpRequest(const string& path,
+ const string& referrer, const string& host, const string& user_agent)
+ : path_(path),
+ referrer_(referrer),
+ host_(host),
+ user_agent_(user_agent) { }
+
+
+void ParseOptions(int argc, char* argv[], map<string, string>& options,
+ string* file) {
+ for (int i = 1; i < argc; i++) {
+ string arg = argv[i];
+ int index = arg.find('=', 0);
+ if (index == string::npos) {
+ *file = arg;
+ } else {
+ string key = arg.substr(0, index);
+ string value = arg.substr(index+1);
+ options[key] = value;
+ }
+ }
+}
+
+
+// Reads a file into a v8 string.
+Handle<String> ReadFile(const string& name) {
+ FILE* file = fopen(name.c_str(), "rb");
+ if (file == NULL) return Handle<String>();
+
+ fseek(file, 0, SEEK_END);
+ long size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size; ) {
+ int read = fread(&chars[i], 1, size - i, file);
+ i += read;
+ }
+ fclose(file);
+ Handle<String> result = String::New(chars, size);
+ delete[] chars;
+ return result;
+}
+
+
+const int kSampleSize = 6;
+StringHttpRequest kSampleRequests[kSampleSize] = {
+ StringHttpRequest("/process.cc", "localhost", "google.com", "firefox"),
+ StringHttpRequest("/", "localhost", "google.net", "firefox"),
+ StringHttpRequest("/", "localhost", "google.org", "safari"),
+ StringHttpRequest("/", "localhost", "yahoo.com", "ie"),
+ StringHttpRequest("/", "localhost", "yahoo.com", "safari"),
+ StringHttpRequest("/", "localhost", "yahoo.com", "firefox")
+};
+
+
+bool ProcessEntries(HttpRequestProcessor* processor, int count,
+ StringHttpRequest* reqs) {
+ for (int i = 0; i < count; i++) {
+ if (!processor->Process(&reqs[i]))
+ return false;
+ }
+ return true;
+}
+
+
+void PrintMap(map<string, string>& m) {
+ for (map<string, string>::iterator i = m.begin(); i != m.end(); i++) {
+ pair<string, string> entry = *i;
+ printf("%s: %s\n", entry.first.c_str(), entry.second.c_str());
+ }
+}
+
+
+int main(int argc, char* argv[]) {
+ map<string, string> options;
+ string file;
+ ParseOptions(argc, argv, options, &file);
+ if (file.empty()) {
+ fprintf(stderr, "No script was specified.\n");
+ return 1;
+ }
+ HandleScope scope;
+ Handle<String> source = ReadFile(file);
+ if (source.IsEmpty()) {
+ fprintf(stderr, "Error reading '%s'.\n", file.c_str());
+ return 1;
+ }
+ JsHttpRequestProcessor processor(source);
+ map<string, string> output;
+ if (!processor.Initialize(&options, &output)) {
+ fprintf(stderr, "Error initializing processor.\n");
+ return 1;
+ }
+ if (!ProcessEntries(&processor, kSampleSize, kSampleRequests))
+ return 1;
+ PrintMap(output);
+}
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <v8.h>
+#include <cstring>
+#include <cstdio>
+
+
+void RunShell(v8::Handle<v8::Context> context);
+bool ExecuteString(v8::Handle<v8::String> source);
+v8::Handle<v8::Value> Print(const v8::Arguments& args);
+v8::Handle<v8::String> ReadFile(const char* name);
+
+
+int main(int argc, char* argv[]) {
+ v8::HandleScope handle_scope;
+ // Create a template for the global object.
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+ // Bind the global 'print' function to the C++ Print callback.
+ global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+ // Create a new execution environment containing the 'print' function.
+ v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
+ // Enter the newly created execution environment.
+ v8::Context::Scope context_scope(context);
+ bool run_shell = (argc == 1);
+ for (int i = 1; i < argc; i++) {
+ const char* str = argv[i];
+ if (strcmp(str, "--shell") == 0) {
+ run_shell = true;
+ } else {
+ v8::HandleScope handle_scope;
+ v8::Handle<v8::String> source = ReadFile(str);
+ if (source.IsEmpty()) {
+ printf("Error reading '%s'\n", str);
+ return 1;
+ }
+ if (!ExecuteString(source))
+ return 1;
+ }
+ }
+ if (run_shell) RunShell(context);
+ return 0;
+}
+
+
+// The callback that is invoked by v8 whenever the JavaScript 'print'
+// function is called. Prints its arguments on stdout separated by
+// spaces and ending with a newline.
+v8::Handle<v8::Value> Print(const v8::Arguments& args) {
+ bool first = true;
+ for (int i = 0; i < args.Length(); i++) {
+ v8::HandleScope handle_scope;
+ if (first) first = false;
+ else printf(" ");
+ v8::String::AsciiValue str(args[i]);
+ printf("%s", *str);
+ }
+ printf("\n");
+ return v8::Undefined();
+}
+
+
+// Reads a file into a v8 string.
+v8::Handle<v8::String> ReadFile(const char* name) {
+ FILE* file = fopen(name, "rb");
+ if (file == NULL) return v8::Handle<v8::String>();
+
+ fseek(file, 0, SEEK_END);
+ long size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size; ) {
+ int read = fread(&chars[i], 1, size - i, file);
+ i += read;
+ }
+ fclose(file);
+ v8::Handle<v8::String> result = v8::String::New(chars, size);
+ delete[] chars;
+ return result;
+}
+
+
+// The read-eval-execute loop of the shell.
+void RunShell(v8::Handle<v8::Context> context) {
+ printf("V8 version %s\n", v8::V8::GetVersion());
+ static const int kBufferSize = 256;
+ while (true) {
+ char buffer[kBufferSize];
+ printf("> ");
+ char* str = fgets(buffer, kBufferSize, stdin);
+ if (str == NULL) break;
+ v8::HandleScope handle_scope;
+ ExecuteString(v8::String::New(str));
+ }
+ printf("\n");
+}
+
+
+// Executes a string within the current v8 context.
+bool ExecuteString(v8::Handle<v8::String> source) {
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Script> script = v8::Script::Compile(source);
+ if (script.IsEmpty()) {
+ // Print errors that happened during compilation.
+ v8::String::AsciiValue error(try_catch.Exception());
+ printf("%s\n", *error);
+ return false;
+ } else {
+ v8::Handle<v8::Value> result = script->Run();
+ if (result.IsEmpty()) {
+ // Print errors that happened during execution.
+ v8::String::AsciiValue error(try_catch.Exception());
+ printf("%s\n", *error);
+ return false;
+ } else {
+ if (!result->IsUndefined()) {
+ // If all went well and the result wasn't undefined then print
+ // the returned value.
+ v8::String::AsciiValue str(result);
+ printf("%s\n", *str);
+ }
+ return true;
+ }
+ }
+}
}
}
},
- 'gcc-darwin': {
- 'debug': {
- 'default': {
- 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
- 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
- 'DIALECTFLAGS': '-ansi',
- 'LIBS': 'pthread',
- 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
- },
- 'dtoa': {
- 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
- 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
- 'DIALECTFLAGS': '-ansi',
- 'LIBS': 'pthread',
- 'WARNINGFLAGS': '-Werror'
- },
- 'jscre': {
- 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
- 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
- 'DIALECTFLAGS': '-ansi',
- 'LIBS': 'pthread',
- 'WARNINGFLAGS': '-w'
- }
- },
- 'release': {
- 'default': {
- 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
- 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
- 'DIALECTFLAGS': '-ansi',
- 'LIBS': 'pthread',
- 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
- },
- 'dtoa': {
- 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
- 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
- 'DIALECTFLAGS': '-ansi',
- 'LIBS': 'pthread',
- 'WARNINGFLAGS': '-Werror'
- },
- 'jscre': {
- 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
- 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
- 'DIALECTFLAGS': '-ansi',
- 'LIBS': 'pthread',
- 'WARNINGFLAGS': '-w'
- }
- }
- },
'msvc': {
'debug': {
'default': {
counters.cc
dateparser.cc
debug.cc
+disassembler.cc
execution.cc
factory.cc
flags.cc
PLATFORM_DEPENDENT_SOURCES = {
- 'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'disasm-arm.cc', 'disassembler-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'macro-assembler-arm.cc', 'simulator-arm.cc', 'stub-cache-arm.cc'],
- 'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'cpu-ia32.cc', 'disasm-ia32.cc', 'disassembler-ia32.cc', 'frames-ia32.cc', 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'simulator-ia32.cc', 'stub-cache-ia32.cc'],
+ 'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'disasm-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'macro-assembler-arm.cc', 'simulator-arm.cc', 'stub-cache-arm.cc'],
+ 'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'cpu-ia32.cc', 'disasm-ia32.cc', 'frames-ia32.cc', 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'simulator-ia32.cc', 'stub-cache-ia32.cc'],
'os:linux': ['platform-linux.cc'],
'os:macos': ['platform-macos.cc'],
'os:win32': ['platform-win32.cc']
sys.exit(1)
-def BuildObject(env, input, **kw):
+def ConfigureObject(env, input, **kw):
if library_type == 'static':
return env.StaticObject(input, **kw)
elif library_type == 'shared':
return env.Object(input, **kw)
-def ConfigureBuild():
+def ConfigureObjectFiles():
env = Environment()
options = BUILD_OPTIONS_MAP[toolchain][mode]['default']
env.Replace(**options)
source_files += PLATFORM_DEPENDENT_SOURCES["os:%s" % os]
full_source_files = [s for s in source_files]
- # Combine the javascript library files into a single C++ file and
+ # Combine the JavaScript library files into a single C++ file and
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files)
- libraries_obj = BuildObject(env, libraries_src, CPPPATH=['.'])
+ libraries_obj = ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Build JSCRE.
jscre_env = env.Copy()
jscre_options = BUILD_OPTIONS_MAP[toolchain][mode]['jscre']
jscre_env.Replace(**jscre_options)
jscre_files = [join('third_party', 'jscre', s) for s in JSCRE_FILES]
- jscre_obj = BuildObject(jscre_env, jscre_files)
+ jscre_obj = ConfigureObject(jscre_env, jscre_files)
# Build dtoa.
dtoa_env = env.Copy()
dtoa_options = BUILD_OPTIONS_MAP[toolchain][mode]['dtoa']
dtoa_env.Replace(**dtoa_options)
dtoa_files = ['dtoa-config.c']
- dtoa_obj = BuildObject(dtoa_env, dtoa_files)
+ dtoa_obj = ConfigureObject(dtoa_env, dtoa_files)
- full_source_objs = BuildObject(env, full_source_files)
+ full_source_objs = ConfigureObject(env, full_source_files)
non_snapshot_files = [jscre_obj, dtoa_obj, full_source_objs]
# Create snapshot if necessary.
- empty_snapshot_obj = BuildObject(env, 'snapshot-empty.cc')
+ empty_snapshot_obj = ConfigureObject(env, 'snapshot-empty.cc')
if use_snapshot:
mksnapshot_src = 'mksnapshot.cc'
mksnapshot = env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
- snapshot_obj = BuildObject(env, snapshot_cc, CPPPATH=['.'])
- libraries_obj = BuildObject(env, libraries_empty_src, CPPPATH=['.'])
+ snapshot_obj = ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
+ libraries_obj = ConfigureObject(env, libraries_empty_src, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
- all_files = [non_snapshot_files, libraries_obj, snapshot_obj]
- if library_type == 'static':
- env.StaticLibrary('v8', all_files)
- elif library_type == 'shared':
- # There seems to be a glitch in the way scons decides where to put
- # .pdb files when compiling using msvc so we specify it manually.
- # This should not affect any other platforms.
- env.SharedLibrary('v8', all_files, PDB='v8.dll.pdb')
- else:
- env.Library('v8', all_files)
+ # Return all the object files needed to link the library.
+ return [non_snapshot_files, libraries_obj, snapshot_obj]
-ConfigureBuild()
+object_files = ConfigureObjectFiles()
+Return('object_files')
}
+const char* v8::V8::GetVersion() {
+ return "0.2.0 (129146)";
+}
+
+
Persistent<Context> v8::Context::New(v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
}
+int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+ if (IsDeadCheck("v8::V8::AdjustAmountOfExternalAllocatedMemory()")) return 0;
+ return i::Heap::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
+}
+
+
void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
i::Heap::SetGlobalGCPrologueCallback(callback);
// Exception-generating instructions and debugging support
void Assembler::stop(const char* msg) {
+#if !defined(__arm__)
+ // The simulator handles these special instructions and stops execution.
emit(15 << 28 | ((intptr_t) msg));
+#else
+ // Just issue a simple break instruction for now. Alternatively we could use
+ // the swi(0x9f0001) instruction on Linux.
+ bkpt(0);
+#endif
}
// Exception-generating instructions and debugging support
void stop(const char* msg);
- void untested(const char* msg);
- void unimplemented(const char* msg);
- void unreachable(const char* msg);
void bkpt(uint32_t imm16); // v5 and above
void swi(uint32_t imm24, Condition cond = al);
- // To generate a breakpoint on ARM Linux you can use swi(0x9f0001).
- // For some reason stepi or cont will not work in gdb until you have done:
- // set $pc = $pc + 4
- inline void int3() { swi(0x9f0001); }
// Coprocessor instructions
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
+
void Assembler::set_target_address_at(Address pc, Address target) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
CPU::FlushICache(p, sizeof(int32_t));
}
+
+Displacement Assembler::disp_at(Label* L) {
+ return Displacement(long_at(L->pos()));
+}
+
+
+void Assembler::disp_at_put(Label* L, Displacement disp) {
+ long_at_put(L->pos(), disp.data());
+}
+
+
+void Assembler::emit_disp(Label* L, Displacement::Type type) {
+ Displacement disp(L, type);
+ L->link_to(pc_offset());
+ emit(static_cast<int>(disp.data()));
+}
+
+
void Operand::set_modrm(int mod, // reg == 0
Register rm) {
ASSERT((mod & -4) == 0);
// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 3 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....1|.......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- private:
- enum Type {
- UNCONDITIONAL_JUMP,
- OTHER
- };
-
- int data_;
-
- class TypeField: public BitField<Type, 0, 1> {};
- class NextField: public BitField<int, 1, 32-1> {};
-
- void init(Label* L, Type type) {
- ASSERT(!L->is_bound());
- int next = 0;
- if (L->is_linked()) {
- next = L->pos();
- ASSERT(next > 0); // Displacements must be at positions > 0
- }
- // Ensure that we _never_ overflow the next field.
- ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
- data_ = NextField::encode(next) | TypeField::encode(type);
- }
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
-
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// TODO(1236137): Stop using macros here. The reason for using them is
-// to avoid declaring the Displacement class in the .h file and have
-// functions on the assembler that returns them. Maybe that's not a
-// big issue?
-#define disp_at(L) \
- Displacement(long_at((L)->pos()))
-
-#define disp_at_put(L, disp) \
- long_at_put((L)->pos(), (disp).data())
-
-#define emit_disp(L, type) { \
- Displacement disp((L), (type)); \
- (L)->link_to(pc_offset()); \
- emit(static_cast<int>(disp.data())); \
+// Implementation of Displacement
+
+void Displacement::init(Label* L, Type type) {
+ ASSERT(!L->is_bound());
+ int next = 0;
+ if (L->is_linked()) {
+ next = L->pos();
+ ASSERT(next > 0); // Displacements must be at positions > 0
}
+ // Ensure that we _never_ overflow the next field.
+ ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+ data_ = NextField::encode(next) | TypeField::encode(type);
+}
// -----------------------------------------------------------------------------
};
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 2 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....1|.......0|
+// [ next | type |
+
+class Displacement BASE_EMBEDDED {
+ public:
+ enum Type {
+ UNCONDITIONAL_JUMP,
+ OTHER
+ };
+
+ int data() const { return data_; }
+ Type type() const { return TypeField::decode(data_); }
+ void next(Label* L) const {
+ int n = NextField::decode(data_);
+ n > 0 ? L->link_to(n) : L->Unuse();
+ }
+ void link_to(Label* L) { init(L, type()); }
+
+ explicit Displacement(int data) { data_ = data; }
+
+ Displacement(Label* L, Type type) { init(L, type); }
+
+ void print() {
+ PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+ NextField::decode(data_));
+ }
+
+ private:
+ int data_;
+
+ class TypeField: public BitField<Type, 0, 1> {};
+ class NextField: public BitField<int, 1, 32-1> {};
+
+ void init(Label* L, Type type);
+};
+
+
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
+ // displacements
+ inline Displacement disp_at(Label* L);
+ inline void disp_at_put(Label* L, Displacement disp);
+ inline void emit_disp(Label* L, Displacement::Type type);
+
// record reloc info for current pc_
void RecordRelocInfo(RelocMode rmode, intptr_t data = 0);
enum RelocMode {
- // Please note the order is important (see is_code_target).
+ // Please note the order is important (see is_code_target, is_gc_reloc_mode).
js_construct_call, // code target that is an exit JavaScript frame stub.
exit_js_frame, // code target that is an exit JavaScript frame stub.
code_target_context, // code target used for contextual loads.
// Pseudo-types
reloc_mode_count,
- last_code_enum = code_target
+ last_code_enum = code_target,
+ last_gced_enum = embedded_string
};
}
+// Is the relocation mode affected by GC?
+inline bool is_gc_reloc_mode(RelocMode mode) {
+ return mode <= last_gced_enum;
+}
+
+
inline bool is_js_return(RelocMode mode) {
return mode == js_return;
}
{ // --- E m p t y ---
Handle<Code> call_code =
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+ Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
empty_function->set_code(*call_code);
+ empty_function->shared()->set_script(*Factory::NewScript(source));
+ empty_function->shared()->set_start_position(0);
+ empty_function->shared()->set_end_position(source->length());
global_context()->function_map()->set_prototype(*empty_function);
global_context()->function_instance_map()->set_prototype(*empty_function);
void Genesis::MakeFunctionInstancePrototypeWritable() {
// Make a new function map so all future functions
- // will have settable prototype properties.
+ // will have settable and enumerable prototype properties.
HandleScope scope;
Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(false);
+ ComputeFunctionInstanceDescriptor(false, true);
Handle<Map> fm = Factory::CopyMap(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors);
Top::context()->global_context()->set_function_map(*fm);
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
- __ EnterJSFrame(0, 0);
+ __ EnterJSFrame(0);
// Allocate the new receiver object.
__ push(r0);
// Remove receiver from the stack, remove caller arguments, and
// return.
__ bind(&exit);
- __ ExitJSFrame(RETURN, 0);
+ __ ExitJSFrame(RETURN);
// Compute the offset from the beginning of the JSConstructCall
// builtin code object to the return address after the call.
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// TODO(1233523): Implement. Unused for now.
- __ int3();
+ __ stop("Builtins::Generate_FunctionApply");
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// TODO(1233523): Implement. Unused for now.
- __ int3();
+ __ stop("Builtins::Generate_ArgumentsAdaptorTrampoline");
Label return_site;
__ bind(&return_site);
__ mov(r2, Operand(cp)); // context to be saved
// push in reverse order: context (r2), args_len (r3), caller_pp, caller_fp,
- // sp_on_exit (ip == pp), return address, prolog_pc
+ // sp_on_exit (ip == pp), return address
__ stm(db_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit() |
- ip.bit() | lr.bit() | pc.bit());
+ ip.bit() | lr.bit());
// Setup new frame pointer.
__ add(fp, sp, Operand(-StandardFrameConstants::kContextOffset));
__ mov(pp, Operand(ip)); // setup new parameter pointer
// Generate the new code.
MacroAssembler masm(NULL, 256);
- bool needs_check_for_stub_calls = !AllowsStubCalls();
- if (needs_check_for_stub_calls) {
- // Nested stubs are not allowed for leafs.
- ASSERT(!masm.generating_stub());
- masm.set_generating_stub(true);
- }
+ // Nested stubs are not allowed for leafs.
+ masm.set_allow_stub_calls(AllowsStubCalls());
// Generate the code for the stub.
+ masm.set_generating_stub(true);
Generate(&masm);
- if (needs_check_for_stub_calls) masm.set_generating_stub(false);
-
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
- // Copy the generated code into a heap object.
- // TODO(1238541): Simplify this somewhat complicated encoding.
- CodeStub::Major major = MajorKey();
- // Lower three bits in state field.
- InlineCacheState state = static_cast<InlineCacheState>(major & 0x07);
- // Upper two bits in type field.
- PropertyType type = static_cast<PropertyType>((major >> 3) & 0x03);
- // Compute flags with state and type used to hold majr key.
- Code::Flags flags = Code::ComputeFlags(Code::STUB, state, type);
-
+ // Copy the generated code into a heap object, and store the major key.
+ Code::Flags flags = Code::ComputeFlags(Code::STUB);
Handle<Code> code = Factory::NewCode(desc, NULL, flags);
+ code->set_major_key(MajorKey());
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, &masm);
switch (major_key) {
case CallFunction:
return "CallFunction";
- case InlinedGenericOp:
- return "InlinedGenericOp";
+ case GenericBinaryOp:
+ return "GenericBinaryOp";
case SmiOp:
return "SmiOp";
case Compare:
return "Compare";
case RecordWrite:
return "RecordWrite";
- case GenericOp:
- return "GenericOp";
case StackCheck:
return "StackCheck";
case UnarySub:
return "UnarySub";
case RevertToNumber:
return "RevertToNumber";
+ case ToBoolean:
+ return "ToBoolean";
case CounterOp:
return "CounterOp";
case ArgumentsAccess:
public:
enum Major {
CallFunction,
- InlinedGenericOp,
+ GenericBinaryOp,
SmiOp,
Compare,
RecordWrite, // Last stub that allows stub calls inside.
- GenericOp,
StackCheck,
UnarySub,
RevertToNumber,
+ ToBoolean,
CounterOp,
ArgumentsAccess,
Runtime,
namespace v8 { namespace internal {
-DEFINE_bool(optimize_locals, true,
- "optimize locals by allocating them in registers");
DEFINE_bool(trace, false, "trace function calls");
DECLARE_bool(debug_info);
DECLARE_bool(debug_code);
-DECLARE_bool(optimize_locals);
#ifdef DEBUG
DECLARE_bool(gc_greedy);
Scope* scope_;
Condition cc_reg_;
CodeGenState* state_;
- RegList reg_locals_; // the list of registers used to hold locals
- int num_reg_locals_; // the number of registers holding locals
int break_stack_height_;
// Labels
MemOperand FunctionOperand() const { return ParameterOperand(-2); }
- Register SlotRegister(int slot_index);
MemOperand SlotOperand(Slot* slot, Register tmp);
void LoadCondition(Expression* x, CodeGenState::AccessType access,
void AccessReferenceProperty(Expression* key,
CodeGenState::AccessType access);
- void GenericOperation(Token::Value op);
+ void GenericBinaryOperation(Token::Value op);
void Comparison(Condition cc, bool strict = false);
void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
void RecordStatementPosition(Node* node);
// Activation frames
- void EnterJSFrame(int argc, RegList callee_saved); // preserves r1
- void ExitJSFrame(RegList callee_saved,
- ExitJSFlag flag = RETURN); // preserves r0-r2
+ void EnterJSFrame(int argc); // preserves r1
+ void ExitJSFrame(ExitJSFlag flag = RETURN); // preserves r0-r2
virtual void GenerateShiftDownAndTailCall(ZoneList<Expression*>* args);
virtual void GenerateSetThisFunction(ZoneList<Expression*>* args);
virtual void GenerateValueOf(ZoneList<Expression*>* args);
virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+ virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
};
state_ = &state;
scope_ = scope;
cc_reg_ = al;
- if (FLAG_optimize_locals) {
- num_reg_locals_ = scope->num_stack_slots() < kNumJSCalleeSaved
- ? scope->num_stack_slots()
- : kNumJSCalleeSaved;
- reg_locals_ = JSCalleeSavedList(num_reg_locals_);
- } else {
- num_reg_locals_ = 0;
- reg_locals_ = 0;
- }
// Entry
// stack: function, receiver, arguments, return address
// cp: callee's context
{ Comment cmnt(masm_, "[ enter JS frame");
- EnterJSFrame(scope->num_parameters(), reg_locals_);
+ EnterJSFrame(scope->num_parameters());
}
// tos: code slot
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ bkpt(0); // not supported before v5, but illegal instruction works too
+ __ stop("stop-at");
}
#endif
// Allocate space for locals and initialize them.
- if (scope->num_stack_slots() > num_reg_locals_) {
+ if (scope->num_stack_slots() > 0) {
Comment cmnt(masm_, "[ allocate space for locals");
// Pushing the first local materializes the code slot on the stack
// (formerly stored in tos register r0).
__ Push(Operand(Factory::undefined_value()));
// The remaining locals are pushed using the fact that r0 (tos)
// already contains the undefined value.
- for (int i = scope->num_stack_slots(); i-- > num_reg_locals_ + 1;) {
+ for (int i = 1; i < scope->num_stack_slots(); i++) {
__ push(r0);
}
}
- // Initialize locals allocated in registers
- if (num_reg_locals_ > 0) {
- if (scope->num_stack_slots() > num_reg_locals_) {
- // r0 contains 'undefined'
- __ mov(SlotRegister(0), Operand(r0));
- } else {
- __ mov(SlotRegister(0), Operand(Factory::undefined_value()));
- }
- for (int i = num_reg_locals_ - 1; i > 0; i--) {
- __ mov(SlotRegister(i), Operand(SlotRegister(0)));
- }
- }
if (scope->num_heap_slots() > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
__ Push(FunctionOperand());
- __ CallRuntime(Runtime::kNewContext, 2);
+ __ CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
+
+ if (kDebug) {
+ Label verified_true;
+ __ cmp(r0, Operand(cp));
+ __ b(eq, &verified_true);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ __ bind(&verified_true);
+ }
+ __ pop(r0); // restore TOS
// Update context local.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Push(Operand(Factory::undefined_value()));
__ bind(&function_return_);
if (FLAG_trace) __ CallRuntime(Runtime::kTraceExit, 1);
- ExitJSFrame(reg_locals_);
+ ExitJSFrame();
// Code generation state must be reset.
scope_ = NULL;
}
-Register ArmCodeGenerator::SlotRegister(int slot_index) {
- Register reg;
- reg.code_ = JSCalleeSavedCode(slot_index);
- return reg;
-}
-
-
MemOperand ArmCodeGenerator::SlotOperand(Slot* slot, Register tmp) {
// Currently, this assertion will fail if we try to assign to
// a constant variable that is constant because it is read-only
case Slot::LOCAL: {
ASSERT(0 <= index &&
index < scope_->num_stack_slots() &&
- index >= num_reg_locals_);
+ index >= 0);
int local_offset = JavaScriptFrameConstants::kLocal0Offset -
- (index - num_reg_locals_) * kPointerSize;
+ index * kPointerSize;
return MemOperand(fp, local_offset);
}
}
-void GenericOpStub::Generate(MacroAssembler* masm) {
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ explicit GenericBinaryOpStub(Token::Value op) : op_(op) { }
+
+ private:
+ Token::Value op_;
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() { return static_cast<int>(op_); }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ default: return "GenericBinaryOpStub";
+ }
+ }
+
+#ifdef DEBUG
+ void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); }
+#endif
+};
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
switch (op_) {
case Token::ADD: {
Label slow, exit;
public:
enum Kind { Inc, Dec, ToNumber };
- JSExitStub(int num_callee_saved, RegList callee_saved, ExitJSFlag flag)
- : num_callee_saved_(num_callee_saved),
- callee_saved_(callee_saved),
- flag_(flag) { }
+ explicit JSExitStub(ExitJSFlag flag) : flag_(flag) { }
private:
- int num_callee_saved_;
- RegList callee_saved_;
ExitJSFlag flag_;
Major MajorKey() { return JSExit; }
- int MinorKey() { return (num_callee_saved_ << 3) | static_cast<int>(flag_); }
+ int MinorKey() { return static_cast<int>(flag_); }
void Generate(MacroAssembler* masm);
const char* GetName() { return "JSExitStub"; }
#ifdef DEBUG
void Print() {
- PrintF("JSExitStub (num_callee_saved %d, flag %d)\n",
- num_callee_saved_,
- static_cast<int>(flag_));
+ PrintF("JSExitStub flag %d)\n", static_cast<int>(flag_));
}
#endif
};
void JSExitStub::Generate(MacroAssembler* masm) {
- __ ExitJSFrame(flag_, callee_saved_);
+ __ ExitJSFrame(flag_);
masm->StubReturn(1);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// r0 holds exception
ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
- if (FLAG_optimize_locals) {
- // Locals are allocated in callee-saved registers, so we need to restore
- // saved callee-saved registers by unwinding the stack
- static JSCalleeSavedBuffer regs;
- intptr_t arg0 = reinterpret_cast<intptr_t>(®s);
- __ push(r0);
- __ mov(r0, Operand(arg0)); // exception in r0 (TOS) is pushed, r0 == arg0
- // Do not push a second C entry frame, but call directly
- __ Call(FUNCTION_ADDR(StackFrameIterator::RestoreCalleeSavedForTopHandler),
- runtime_entry); // passing r0
- // Frame::RestoreJSCalleeSaved returns arg0 (TOS)
- __ mov(r1, Operand(r0));
- __ pop(r0); // r1 holds arg0, r0 holds exception
- __ ldm(ia, r1, kJSCalleeSaved); // restore callee-saved registers
- }
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
__ pop(r2); // pop next in chain
__ mov(r3, Operand(Top::context_address()));
__ ldr(cp, MemOperand(r3));
__ mov(sp, Operand(fp)); // respect ABI stack constraint
- __ ldm(ia, sp, kJSCalleeSaved | pp.bit() | fp.bit() | sp.bit() | pc.bit());
+ __ ldm(ia, sp, pp.bit() | fp.bit() | sp.bit() | pc.bit());
// check if we should retry or throw exception
Label retry;
// all JS callee-saved are saved and traversed by GC; push in reverse order:
// JS callee-saved, caller_pp, caller_fp, sp_on_exit (ip==pp), caller_pc
- __ stm(db_w, sp, kJSCalleeSaved | pp.bit() | fp.bit() | ip.bit() | lr.bit());
+ __ stm(db_w, sp, pp.bit() | fp.bit() | ip.bit() | lr.bit());
__ mov(fp, Operand(sp)); // setup new frame pointer
// Store the current context in top.
}
-void ArmCodeGenerator::GenericOperation(Token::Value op) {
+void ArmCodeGenerator::GenericBinaryOperation(Token::Value op) {
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
case Token::ADD: // fall through.
case Token::SUB: // fall through.
case Token::MUL: {
- GenericOpStub stub(op);
+ GenericBinaryOpStub stub(op);
__ CallStub(&stub);
break;
}
__ mov(ip, Operand(value));
__ push(ip);
}
- GenericOperation(op);
+ GenericBinaryOperation(op);
break;
}
}
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void ArmCodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- int position) {
- Label fast, slow, exit;
+class CallFunctionStub: public CodeStub {
+ public:
+ explicit CallFunctionStub(int argc) : argc_(argc) {}
- // Push the arguments ("left-to-right") on the stack.
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+
+ const char* GetName() { return "CallFuntionStub"; }
+
+#if defined(DEBUG)
+ void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
+#endif // defined(DEBUG)
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+};
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
// Push the number of arguments.
- __ Push(Operand(args->length()));
+ masm->Push(Operand(argc_));
// Get the function to call from the stack.
- // +1 ~ receiver.
- __ ldr(r1, MemOperand(sp, (args->length() + 1) * kPointerSize));
+ // function, receiver [, arguments], argc_
+ masm->ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ masm->tst(r1, Operand(kSmiTagMask));
+ masm->b(eq, &slow);
+ // Get the map of the function object.
+ masm->ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ masm->ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ masm->cmp(r2, Operand(JS_FUNCTION_TYPE));
+ masm->b(ne, &slow);
+
+ // Fast-case: Invoke the function now.
+ masm->ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ masm->ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ masm->ldr(r1,
+ MemOperand(r1, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ masm->add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ masm->Jump(r1); // Callee will return to the original call site directly.
- // Check that the function really is a JavaScript function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); // get the map
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
- __ b(eq, &fast);
+ // Slow-case: Non-function called.
+ masm->bind(&slow);
+ masm->InvokeBuiltin("CALL_NON_FUNCTION", 0, JUMP_JS);
+}
- __ RecordPosition(position);
- // Slow-case: Non-function called.
- __ bind(&slow);
- __ InvokeBuiltin("CALL_NON_FUNCTION", 0, CALL_JS);
- __ b(&exit);
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void ArmCodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
- // Fast-case: Get the code from the function, call the first
- // instruction in it, and pop function.
- __ bind(&fast);
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r1, MemOperand(r1, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
- __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r1);
+ // Record the position for debugging purposes.
+ __ RecordPosition(position);
+
+ // Use the shared code stub to call the function.
+ CallFunctionStub call_function(args->length());
+ __ CallStub(&call_function);
// Restore context and pop function from the stack.
- __ bind(&exit);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ add(sp, sp, Operand(kPointerSize)); // discard
}
Comment cmnt(masm_, "[ WithEnterStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
Load(node->expression());
- __ CallRuntime(Runtime::kPushContext, 2);
+ __ CallRuntime(Runtime::kPushContext, 1);
+ if (kDebug) {
+ Label verified_true;
+ __ cmp(r0, Operand(cp));
+ __ b(eq, &verified_true);
+ __ stop("PushContext: r0 is expected to be the same as cp");
+ __ bind(&verified_true);
+ }
+ __ pop(r0); // restore TOS
// Update context local.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
case CodeGenState::LOAD: // fall through
case CodeGenState::LOAD_TYPEOF_EXPR:
// Special handling for locals allocated in registers.
- if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
- node->index() < num_reg_locals_) {
- __ Push(Operand(SlotRegister(node->index())));
- } else {
- __ Push(SlotOperand(node, r2));
- }
+ __ Push(SlotOperand(node, r2));
if (node->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't
// been initialized yet) which needs to be converted into the
// the code is identical to a normal store (see below).
{ Comment cmnt(masm_, "[ Init const");
Label L;
- if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
- node->index() < num_reg_locals_) {
- __ mov(r2, Operand(SlotRegister(node->index())));
- } else {
- __ ldr(r2, SlotOperand(node, r2));
- }
+ __ ldr(r2, SlotOperand(node, r2));
__ cmp(r2, Operand(Factory::the_hole_value()));
__ b(ne, &L);
// We must execute the store.
- if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
- node->index() < num_reg_locals_) {
- __ mov(SlotRegister(node->index()), Operand(r0));
- } else {
- // r2 may be loaded with context; used below in RecordWrite.
- __ str(r0, SlotOperand(node, r2));
- }
+ // r2 may be loaded with context; used below in RecordWrite.
+ __ str(r0, SlotOperand(node, r2));
if (node->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
Label exit;
// Variable::CONST because of const declarations which will
// initialize consts to 'the hole' value and by doing so, end
// up calling this code.
- if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
- node->index() < num_reg_locals_) {
- __ mov(SlotRegister(node->index()), Operand(r0));
- } else {
- // r2 may be loaded with context; used below in RecordWrite.
- __ str(r0, SlotOperand(node, r2));
- }
+ // r2 may be loaded with context; used below in RecordWrite.
+ __ str(r0, SlotOperand(node, r2));
if (node->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
Label exit;
SmiOperation(node->binary_op(), literal->handle(), false);
} else {
Load(node->value());
- GenericOperation(node->binary_op());
+ GenericBinaryOperation(node->binary_op());
}
}
__ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
// Reset parameter pointer and frame pointer to previous frame
- ExitJSFrame(reg_locals_, DO_NOT_RETURN);
+ ExitJSFrame(DO_NOT_RETURN);
// Jump (tail-call) to the function in register r1.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
__ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
__ add(pc, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- return;
}
}
+// This should generate code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It is not yet implemented on ARM, so it always goes to the slow case.
+void ArmCodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ __ push(r0);
+ __ mov(r0, Operand(Factory::undefined_value()));
+}
+
+
+
// This is used in the implementation of apply on ia32 but it is not
// used on ARM yet.
void ArmCodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- __ int3();
+ __ stop("ArmCodeGenerator::GenerateIsArray");
cc_reg_ = eq;
}
// Get the 'this' function and exit the frame without returning.
__ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
- ExitJSFrame(reg_locals_, DO_NOT_RETURN);
+ ExitJSFrame(DO_NOT_RETURN);
// return address in lr
// Move arguments one element down the stack.
} else {
Load(node->left());
Load(node->right());
- GenericOperation(node->op());
+ GenericBinaryOperation(node->op());
}
}
}
}
-void ArmCodeGenerator::EnterJSFrame(int argc, RegList callee_saved) {
- __ EnterJSFrame(argc, callee_saved);
+void ArmCodeGenerator::EnterJSFrame(int argc) {
+ __ EnterJSFrame(argc);
}
-void ArmCodeGenerator::ExitJSFrame(RegList callee_saved, ExitJSFlag flag) {
- // The JavaScript debugger expects ExitJSFrame to be implemented as a stub,
- // so that a breakpoint can be inserted at the end of a function.
- int num_callee_saved = NumRegs(callee_saved);
-
- // We support a fixed number of register variable configurations
- ASSERT(num_callee_saved <= 5 &&
- JSCalleeSavedList(num_callee_saved) == callee_saved);
-
- JSExitStub stub(num_callee_saved, callee_saved, flag);
+void ArmCodeGenerator::ExitJSFrame(ExitJSFlag flag) {
+ JSExitStub stub(flag);
__ CallJSExitStub(&stub);
}
void AccessReferenceProperty(Expression* key,
CodeGenState::AccessType access);
- void GenericOperation(Token::Value op,
- OverwriteMode overwrite_mode = NO_OVERWRITE);
-
- bool InlinedGenericOperation(
+ void GenericBinaryOperation(
Token::Value op,
const OverwriteMode overwrite_mode = NO_OVERWRITE,
bool negate_result = false);
virtual void GenerateValueOf(ZoneList<Expression*>* args);
virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+ virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
};
// Allocate local context.
// Get outer context and create a new context based on it.
__ push(FunctionOperand());
- __ CallRuntime(Runtime::kNewContext, 2);
- __ push(eax);
+ __ CallRuntime(Runtime::kNewContext, 1); // eax holds the result
+
+ if (kDebug) {
+ Label verified_true;
+ // Verify eax and esi are the same in debug mode
+ __ cmp(eax, Operand(esi));
+ __ j(equal, &verified_true);
+ __ int3();
+ __ bind(&verified_true);
+ }
+
// Update context local.
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Restore the arguments array pointer, if any.
}
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void Ia32CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
- // Note: The generated code snippet cannot change 'reg'.
- // Only the condition code should be set.
+#undef __
+#define __ masm->
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+
+ Major MajorKey() { return ToBoolean; }
+
+ int MinorKey() { return 0; }
+
+ const char* GetName() { return "ToBooleanStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("ToBooleanStub\n");
+ }
+#endif
+};
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ebx, 1 << Map::kIsUndetectable);
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ cmp(ecx, JS_OBJECT_TYPE);
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ cmp(ecx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, ¬_string);
+ __ and_(ecx, kStringSizeMask);
+ __ cmp(ecx, kShortStringTag);
+ __ j(not_equal, &true_result); // Empty string is always short.
+ __ mov(edx, FieldOperand(eax, String::kLengthOffset));
+ __ shr(edx, String::kShortLengthShift);
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(¬_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ __ fldz();
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fucompp();
+ __ push(eax);
+ __ fnstsw_ax();
+ __ sahf();
+ __ pop(eax);
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ // Return 1/0 for true/false in eax.
+ __ bind(&true_result);
+ __ mov(eax, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ mov(eax, 0);
+ __ ret(1 * kPointerSize);
+}
+#undef __
+#define __ masm_->
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void Ia32CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
Comment cmnt(masm_, "[ ToBoolean");
- // the value to convert should be popped from the stack
+ // The value to convert should be popped from the stack.
__ pop(eax);
- // Fast case checks
+ // Fast case checks.
- // Check if value is 'false'.
+ // 'false' => false.
__ cmp(eax, Factory::false_value());
__ j(equal, false_target);
- // Check if value is 'true'.
+ // 'true' => true.
__ cmp(eax, Factory::true_value());
__ j(equal, true_target);
- // Check if reg is 'undefined'.
+ // 'undefined' => false.
__ cmp(eax, Factory::undefined_value());
__ j(equal, false_target);
- // Check if reg is 'null'.
- __ cmp(eax, Factory::null_value());
- __ j(equal, false_target);
-
- // Check if value is a Smi.
- __ cmp(eax, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
- __ j(equal, false_target);
+ // Smi => false iff zero.
+ ASSERT(kSmiTag == 0);
+ __ test(eax, Operand(eax));
+ __ j(zero, false_target);
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, true_target, taken);
+ __ j(zero, true_target);
- // Slow case: call the runtime.
- __ push(eax); // undo the pop(eax) from above
- __ CallRuntime(Runtime::kToBool, 1);
- // Convert result (eax) to condition code
- __ cmp(eax, Factory::false_value());
+ // Call the stub for all other cases.
+ __ push(eax); // Undo the pop(eax) from above.
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ // Convert result (eax) to condition code.
+ __ test(eax, Operand(eax));
ASSERT(not_equal == not_zero);
cc_reg_ = not_equal;
class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading floating point values. Input values must
- // be either Smi or heap number objects (fp values). Requirements:
+ // be either smi or heap number objects (fp values). Requirements:
// operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
// floating point numbers on FPU stack.
static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
- // Test if operands are Smi or number objects (fp). Requirements:
+ // Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
static void CheckFloatOperands(MacroAssembler* masm,
};
-class InlinedGenericOpStub: public CodeStub {
+class GenericBinaryOpStub: public CodeStub {
public:
- InlinedGenericOpStub(Token::Value op, OverwriteMode mode, bool negate_result)
+ GenericBinaryOpStub(Token::Value op, OverwriteMode mode, bool negate_result)
: op_(op), mode_(mode), negate_result_(negate_result) { }
private:
#ifdef DEBUG
void Print() {
- PrintF("InlinedGenericOpStub (op %s), (mode %d), (negate_result %s)\n",
+ PrintF("GenericBinaryOpStub (op %s), (mode %d), (negate_result %s)\n",
Token::String(op_),
static_cast<int>(mode_),
negate_result_ ? "true" : "false");
class ModeBits: public BitField<OverwriteMode, 1, 2> {};
class OpBits: public BitField<Token::Value, 3, 13> {};
- Major MajorKey() { return InlinedGenericOp; }
+ Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the three parameters in a unique 16 bit value.
return NegateBits::encode(negate_result_) |
};
-const char* InlinedGenericOpStub::GetName() {
+const char* GenericBinaryOpStub::GetName() {
switch (op_) {
- case Token::ADD: return "InlinedGenericOpStub_ADD";
- case Token::SUB: return "InlinedGenericOpStub_SUB";
- case Token::MUL: return "InlinedGenericOpStub_MUL";
- case Token::DIV: return "InlinedGenericOpStub_DIV";
- case Token::BIT_OR: return "InlinedGenericOpStub_BIT_OR";
- case Token::BIT_AND: return "InlinedGenericOpStub_BIT_AND";
- case Token::BIT_XOR: return "InlinedGenericOpStub_BIT_XOR";
- case Token::SAR: return "InlinedGenericOpStub_SAR";
- case Token::SHL: return "InlinedGenericOpStub_SHL";
- case Token::SHR: return "InlinedGenericOpStub_SHR";
- default: return "InlinedGenericOpStub";
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
}
}
-void InlinedGenericOpStub::Generate(MacroAssembler* masm) {
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
-
+ if (negate_result_ && op_ != Token::MUL) UNIMPLEMENTED();
__ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
__ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
+
+ // 1. Smi case.
switch (op_) {
case Token::ADD: {
// eax: y.
// edx: x.
- if (negate_result_) UNIMPLEMENTED();
Label revert;
__ mov(ecx, Operand(eax));
__ or_(ecx, Operand(edx)); // ecx = x | y.
__ add(eax, Operand(edx)); // Add y optimistically.
// Go slow-path in case of overflow.
__ j(overflow, &revert, not_taken);
- // Go slow-path in case of non-Smi operands.
+ // Go slow-path in case of non-smi operands.
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &revert, not_taken);
__ sub(eax, Operand(edx));
break;
}
-
case Token::SUB: {
// eax: y.
// edx: x.
- if (negate_result_) UNIMPLEMENTED();
Label revert;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
__ sub(edx, Operand(eax)); // Subtract y optimistically.
// Go slow-path in case of overflow.
__ j(overflow, &revert, not_taken);
- // Go slow-path in case of non-Smi operands.
+ // Go slow-path in case of non-smi operands.
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &revert, not_taken);
__ add(edx, Operand(eax));
break;
}
-
case Token::MUL: {
// eax: y
// edx: x
- // a) both operands SMI and result fits into a SMI -> return.
- // b) at least one of operans non-SMI -> non_smi_operands.
- // c) result does not fit in a SMI -> non_smi_result.
+ // a) both operands smi and result fits into a smi -> return.
+ // b) at least one of operans non-smi -> non_smi_operands.
+ // c) result does not fit in a smi -> non_smi_result.
Label non_smi_operands, non_smi_result;
// Tag check.
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
ASSERT(kSmiTag == 0); // Adjust code below.
__ test(ecx, Immediate(kSmiTagMask));
- // Jump if not both Smi; check if float numbers.
+ // Jump if not both smi; check if float numbers.
__ j(not_zero, &non_smi_operands, not_taken);
// Get copies of operands.
__ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
-
case Token::DIV: {
// eax: y
// edx: x
- if (negate_result_) UNIMPLEMENTED();
Label non_smi_operands, non_smi_result, division_by_zero;
__ mov(ebx, Operand(eax)); // Get y
__ mov(eax, Operand(edx)); // Get x
__ or_(ecx, Operand(eax)); // ecx = x | y.
ASSERT(kSmiTag == 0); // Adjust code below.
__ test(ecx, Immediate(kSmiTagMask));
- // Jump if not both Smi; check if float numbers.
+ // Jump if not both smi; check if float numbers.
__ j(not_zero, &non_smi_operands, not_taken);
__ test(ebx, Operand(ebx)); // Check for 0 divisor.
__ j(zero, &division_by_zero, not_taken);
__ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
+ case Token::MOD: {
+ Label slow;
+ __ mov(ebx, Operand(eax)); // get y
+ __ mov(eax, Operand(edx)); // get x
+ __ cdq(); // sign extend eax into edx:eax
+ // tag check
+ __ mov(ecx, Operand(ebx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ __ test(ebx, Operand(ebx)); // test for y == 0
+ __ j(zero, &slow);
+ // Fast case: Do integer division and use remainder.
+ __ idiv(ebx);
+ __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
+ __ mov(eax, Operand(edx));
+ __ ret(2 * kPointerSize);
+
+ // Slow case: Call runtime operator implementation.
+ __ bind(&slow);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ jmp(&call_runtime);
+ break;
+ }
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
// Smi-case for bitops should already have been inlined.
break;
}
-
default: {
UNREACHABLE();
}
}
- // eax: y
- // edx: x
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
-
- // Fast-case: Both operands are numbers.
-
- // Allocate a heap number, if needed.
- // Bitops allocate _after_ computation to allow for smi results.
- if (!Token::IsBitOp(op_)) {
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, ecx, edx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- }
-
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
+ // 2. Floating point case.
switch (op_) {
- case Token::ADD: {
- __ faddp(1);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::SUB: {
- __ fsubp(1);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
- break;
- }
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ // eax: y
+ // edx: x
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ // Fast-case: Both operands are numbers.
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm,
+ &call_runtime,
+ ecx,
+ edx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
- case Token::MUL: {
- __ fmulp(1);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
if (negate_result_) __ fchs();
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(2 * kPointerSize);
- break;
}
-
- case Token::DIV: {
- __ fdivp(1);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
break;
}
-
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SAR:
case Token::SHL:
case Token::SHR: {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
Label non_int32_operands, non_smi_result, skip_allocation;
// Reserve space for converted numbers.
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
-
default: UNREACHABLE(); break;
}
- // Slow-case: Use the runtime system to get the right result.
+ // 3. If all else fails, use the runtime system to get the correct result.
__ bind(&call_runtime);
- if (negate_result_) {
- switch (op_) {
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MULNEG, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(negate_result_ ? Builtins::MULNEG
+ : Builtins::MUL,
+ JUMP_FUNCTION);
break;
-
- default:
- UNREACHABLE();
- }
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
}
}
Label* non_float,
Register scratch) {
Label test_other, done;
- // test if both operands are floats or Smi -> scratch=k_is_float;
- // otherwise scratch=k_not_float
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &test_other, not_taken); // argument in edx is OK
__ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
}
-// TODO(1217800): Implement MOD like ADD/SUB/MUL/DIV
-// and get rid of GenericOpStub.
-void GenericOpStub::Generate(MacroAssembler* masm) {
- switch (op_) {
- case Token::MOD: {
- Label fast, slow;
- __ mov(ebx, Operand(eax)); // get y
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // get x
- __ cdq(); // sign extend eax into edx:eax
- // tag check
- __ mov(ecx, Operand(ebx));
- __ or_(ecx, Operand(eax)); // ecx = x | y;
- ASSERT(kSmiTag == 0); // adjust code below
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
- __ test(ebx, Operand(ebx)); // test for y == 0
- __ j(not_zero, &fast, taken);
-
- // Slow case: Call native operator implementation.
- __ bind(&slow);
- __ pop(ecx); // pop return address
- __ push(ebx);
- __ push(ecx); // push return address
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-
- // Fast case: Do integer division and use remainder.
- __ bind(&fast);
- __ idiv(ebx);
- __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
- __ mov(eax, Operand(edx));
- break;
- }
-
- default: UNREACHABLE();
- }
- masm->StubReturn(2);
-}
-
-
class ArgumentsAccessStub: public CodeStub {
public:
explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { }
#define __ masm_->
-// Return true if code was generated for operation 'type'.
-// NOTE: The code below assumes that the slow cases (calls to runtime)
-// never return a constant/immutable object.
-// TODO(1217800): MOD is not yet implemented.
-bool Ia32CodeGenerator::InlinedGenericOperation(
- Token::Value op,
- const OverwriteMode overwrite_mode,
- bool negate_result) {
- const char* comment = NULL;
- if (negate_result) {
- switch (op) {
- case Token::ADD: comment = "[ GenericOpCode Token::ADDNEG"; break;
- case Token::SUB: comment = "[ GenericOpCode Token::SUBNEG"; break;
- case Token::MUL: comment = "[ GenericOpCode Token::MULNEG"; break;
- case Token::DIV: comment = "[ GenericOpCode Token::DIVNEG"; break;
- default: return false;
- }
- } else {
- switch (op) {
- case Token::ADD: comment = "[ GenericOpCode Token::ADD"; break;
- case Token::SUB: comment = "[ GenericOpCode Token::SUB"; break;
- case Token::MUL: comment = "[ GenericOpCode Token::MUL"; break;
- case Token::DIV: comment = "[ GenericOpCode Token::DIV"; break;
- default: return false;
- }
- }
- Comment cmnt(masm_, comment);
- InlinedGenericOpStub stub(op, overwrite_mode, negate_result);
- __ CallStub(&stub);
- __ push(eax);
- return true;
-}
-
-
-void Ia32CodeGenerator::GenericOperation(Token::Value op,
- OverwriteMode overwrite_mode) {
- // Stub is entered with a call: 'return address' is on stack.
+void Ia32CodeGenerator::GenericBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode,
+ bool negate_result) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Comment cmnt_token(masm_, Token::String(op));
+ if (negate_result && op != Token::MUL) UNIMPLEMENTED();
switch (op) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
case Token::MOD: {
- GenericOpStub stub(op);
- __ pop(eax);
+ GenericBinaryOpStub stub(op, overwrite_mode, negate_result);
__ CallStub(&stub);
__ push(eax);
break;
}
-
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR: {
Label slow, exit;
__ pop(eax); // get y
__ pop(edx); // get x
- __ mov(ecx, Operand(edx)); // prepare smi check
+ __ mov(ecx, Operand(edx)); // Prepare smi check.
// tag check
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ bind(&slow);
__ push(edx); // restore stack slots
__ push(eax);
- InlinedGenericOpStub stub(op, overwrite_mode, false);
+ GenericBinaryOpStub stub(op, overwrite_mode, false);
__ CallStub(&stub);
__ bind(&exit);
__ push(eax); // push the result to the stack
break;
}
-
case Token::SHL:
case Token::SHR:
case Token::SAR: {
__ sar(ebx);
// no checks of result necessary
break;
-
case Token::SHR:
__ shr(ebx);
- // check that the *unsigned* result fits in a smi
+ // Check that the *unsigned* result fits in a smi.
// neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x80000000: high bit would be lost when smi tagging.
// - 0x40000000: this number would convert to negative when
// smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
+ // by 0 or 1 when handed a valid smi.
__ test(ebx, Immediate(0xc0000000));
__ j(not_zero, &slow, not_taken);
break;
-
case Token::SHL:
__ shl(ebx);
- // check that the *signed* result fits in a smi
+ // Check that the *signed* result fits in a smi.
__ lea(ecx, Operand(ebx, 0x40000000));
__ test(ecx, Immediate(0x80000000));
__ j(not_zero, &slow, not_taken);
break;
-
default: UNREACHABLE();
}
// tag result and store it in TOS (eax)
__ bind(&slow);
__ push(eax); // restore stack
__ push(edx);
- InlinedGenericOpStub stub(op, overwrite_mode, false);
+ GenericBinaryOpStub stub(op, overwrite_mode, false);
__ CallStub(&stub);
__ bind(&exit);
__ push(eax);
break;
}
-
case Token::COMMA: {
// simply discard left value
__ pop(eax);
__ push(eax);
break;
}
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
+ default: UNREACHABLE();
}
}
virtual void Generate() {
__ push(eax);
__ push(Immediate(Smi::FromInt(value_)));
- InlinedGenericOpStub igostub(op_, overwrite_mode_, false);
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, false);
__ CallStub(&igostub);
}
virtual void Generate() {
__ push(Immediate(Smi::FromInt(value_)));
__ push(eax);
- InlinedGenericOpStub igostub(op_, overwrite_mode_, false);
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, false);
__ CallStub(&igostub);
}
__ sub(Operand(eax), immediate);
__ push(eax);
__ push(immediate);
- InlinedGenericOpStub igostub(Token::ADD, overwrite_mode_, false);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, false);
__ CallStub(&igostub);
}
__ sub(Operand(eax), immediate);
__ push(immediate);
__ push(eax);
- InlinedGenericOpStub igostub(Token::ADD, overwrite_mode_, false);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, false);
__ CallStub(&igostub);
}
__ add(Operand(eax), immediate);
__ push(eax);
__ push(immediate);
- InlinedGenericOpStub igostub(Token::SUB, overwrite_mode_, false);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, false);
__ CallStub(&igostub);
}
__ add(eax, Operand(tos_reg_));
__ push(eax);
__ push(Operand(tos_reg_));
- InlinedGenericOpStub igostub(Token::SUB, overwrite_mode_, false);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, false);
__ CallStub(&igostub);
}
__ pop(eax);
__ push(Immediate(value));
__ push(eax);
- GenericOperation(op);
+ GenericBinaryOperation(op, overwrite_mode);
} else {
int shift_value = int_value & 0x1f; // only least significant 5 bits
DeferredCode* deferred =
__ pop(eax);
__ push(Immediate(value));
__ push(eax);
- GenericOperation(op);
+ GenericBinaryOperation(op, overwrite_mode);
} else {
int shift_value = int_value & 0x1f; // only least significant 5 bits
DeferredCode* deferred =
__ pop(eax);
__ push(Immediate(value));
__ push(eax);
- GenericOperation(op);
+ GenericBinaryOperation(op, overwrite_mode);
} else {
int shift_value = int_value & 0x1f; // only least significant 5 bits
DeferredCode* deferred =
__ push(Immediate(value));
__ push(eax);
}
- bool done = InlinedGenericOperation(op, overwrite_mode,
- false /*negate_result*/);
- if (!done) GenericOperation(op);
+ GenericBinaryOperation(op, overwrite_mode);
break;
}
}
__ push(ecx);
// Inlined floating point compare.
- // Call builtin if operands are not floating point or SMI.
+ // Call builtin if operands are not floating point or smi.
FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
FloatingPointHelper::LoadFloatOperands(masm, ecx);
__ FCmp();
void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow, fast;
+ Label slow;
// Get the function to call from the stack.
// +2 ~ receiver, return address
Comment cmnt(masm_, "[ WithEnterStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
Load(node->expression());
- __ CallRuntime(Runtime::kPushContext, 2);
- __ push(eax);
+ __ CallRuntime(Runtime::kPushContext, 1);
+
+ if (kDebug) {
+ Label verified_true;
+ // Verify eax and esi are the same in debug mode
+ __ cmp(eax, Operand(esi));
+ __ j(equal, &verified_true);
+ __ int3();
+ __ bind(&verified_true);
+ }
+
// Update context local.
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
}
__ j(equal, &exit);
// Stack layout in body:
- // [iteration counter (Smi)] <- slot 0
+ // [iteration counter (smi)] <- slot 0
// [length of array] <- slot 1
// [FixedArray] <- slot 2
// [Map or 0] <- slot 3
if (shadows[i]->is_linked()) nof_unlinks++;
}
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // Make sure that there's nothing left on the stack above the
+ // handler structure.
+ if (FLAG_debug_code) {
+ __ mov(eax, Operand::StaticVariable(handler_address));
+ __ lea(eax, Operand(eax, StackHandlerConstants::kAddressDisplacement));
+ __ cmp(esp, Operand(eax));
+ __ Assert(equal, "stack pointer should point to top handler");
+ }
+
// Unlink from try chain.
__ pop(eax);
- ExternalReference handler_address(Top::k_handler_address);
__ mov(Operand::StaticVariable(handler_address), eax); // TOS == next_sp
__ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
// next_sp popped.
SmiOperation(node->binary_op(), literal->handle(), false, NO_OVERWRITE);
} else {
Load(node->value());
- bool done = InlinedGenericOperation(node->binary_op(), NO_OVERWRITE,
- false /*negate_result*/);
- if (!done) {
- GenericOperation(node->binary_op());
- }
+ GenericBinaryOperation(node->binary_op());
}
}
}
+// This generates code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It can handle flat and sliced strings, 8 and 16 bit characters and
+// cons strings where the answer is found in the left hand branch of the
+// cons. The slow case will flatten the string, which will ensure that
+// the answer is in the left hand side the next time around.
+void Ia32CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ Label slow_case;
+ Label end;
+ Label not_a_flat_string;
+ Label not_a_cons_string_either;
+ Label try_again_with_new_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // Load the string into eax.
+ Load(args->at(0));
+ __ pop(eax);
+ // If the receiver is a smi return undefined.
+ ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &slow_case, not_taken);
+
+ // Load the index into ebx.
+ Load(args->at(1));
+ __ pop(ebx);
+
+ // Check for negative or non-smi index.
+ ASSERT(kSmiTag == 0);
+ __ test(ebx, Immediate(kSmiTagMask | 0x80000000));
+ __ j(not_zero, &slow_case, not_taken);
+ // Get rid of the smi tag on the index.
+ __ sar(ebx, kSmiTagSize);
+
+ __ bind(&try_again_with_new_string);
+ // Get the type of the heap object into ecx.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // We don't handle non-strings.
+ __ test(ecx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &slow_case, not_taken);
+
+ // Get the length field.
+ __ mov(edx, FieldOperand(eax, String::kLengthOffset));
+ Label long_string;
+ Label medium_string;
+ Label string_length_shifted;
+ // The code assumes the tags are disjoint.
+ ASSERT((kLongStringTag & kMediumStringTag) == 0);
+ ASSERT(kShortStringTag == 0);
+ __ test(ecx, Immediate(kLongStringTag));
+ __ j(not_zero, &long_string, not_taken);
+ __ test(ecx, Immediate(kMediumStringTag));
+ __ j(not_zero, &medium_string, taken);
+ // Short string.
+ __ shr(edx, String::kShortLengthShift);
+ __ jmp(&string_length_shifted);
+
+ // Medium string.
+ __ bind(&medium_string);
+ __ shr(edx, String::kMediumLengthShift - String::kLongLengthShift);
+ // Fall through to long string.
+ __ bind(&long_string);
+ __ shr(edx, String::kLongLengthShift);
+
+ __ bind(&string_length_shifted);
+ ASSERT(kSmiTag == 0);
+ // edx is now the length of the string.
+
+ // Check for index out of range.
+ __ cmp(ebx, Operand(edx));
+ __ j(greater_equal, &slow_case, not_taken);
+
+ // We need special handling for non-flat strings.
+ ASSERT(kSeqStringTag == 0);
+ __ test(ecx, Immediate(kStringRepresentationMask));
+ __ j(not_zero, ¬_a_flat_string, not_taken);
+
+ // Check for 1-byte or 2-byte string.
+ __ test(ecx, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string, taken);
+
+ // 2-byte string.
+ // Load the 2-byte character code.
+ __ movzx_w(eax, FieldOperand(eax, ebx, times_2, TwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ __ bind(&ascii_string);
+ // Load the byte.
+ __ movzx_b(eax, FieldOperand(eax, ebx, times_1, AsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ ASSERT(kSmiTag == 0);
+ __ shl(eax, kSmiTagSize);
+ __ push(eax);
+ __ jmp(&end);
+
+
+ // Handle non-flat strings.
+ __ bind(¬_a_flat_string);
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kConsStringTag);
+ __ j(not_equal, ¬_a_cons_string_either, not_taken);
+
+ // ConsString.
+ // Get the first of the two strings.
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(¬_a_cons_string_either);
+ __ cmp(ecx, kSlicedStringTag);
+ __ j(not_equal, &slow_case, not_taken);
+
+ // SlicedString.
+ // Add the offset to the index.
+ __ add(ebx, FieldOperand(eax, SlicedString::kStartOffset));
+ __ j(overflow, &slow_case);
+ // Get the underlying string.
+ __ mov(eax, FieldOperand(eax, SlicedString::kBufferOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow_case);
+ __ push(Immediate(Factory::undefined_value()));
+
+ __ bind(&end);
+}
+
+
void Ia32CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Label answer;
// We need the CC bits to come out as not_equal in the case where the
- // object is a Smi. This can't be done with the usual test opcode so
+ // object is a smi. This can't be done with the usual test opcode so
// we copy the object to ecx and do some destructive ops on it that
// result in the right CC bits.
__ pop(eax);
}
case Token::BIT_NOT: {
- // smi check
+ // Smi check.
Label smi_label;
Label continue_label;
__ pop(eax);
__ jmp(&continue_label);
__ bind(&smi_label);
__ not_(eax);
- __ and_(eax, ~kSmiTagMask); // remove inverted smi-tag
+ __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
__ bind(&continue_label);
__ push(eax);
break;
}
} else {
+ // NOTE: The code below assumes that the slow cases (calls to runtime)
+ // never return a constant/immutable object.
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (node->left()->AsBinaryOperation() != NULL &&
node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
Load(node->left());
Load(node->right());
}
- const bool done = InlinedGenericOperation(node->op(), overwrite_mode,
- negate_result);
- if (!done) {
- // Defer negation implemented only for inlined generic ops.
- ASSERT(!negate_result);
- GenericOperation(node->op(), overwrite_mode);
- }
+ GenericBinaryOperation(node->op(), overwrite_mode, negate_result);
}
}
}
{&v8::internal::CodeGenerator::GenerateValueOf,
"_ValueOf"},
{&v8::internal::CodeGenerator::GenerateSetValueOf,
- "_SetValueOf"}
+ "_SetValueOf"},
+ {&v8::internal::CodeGenerator::GenerateFastCharCodeAt,
+ "_FastCharCodeAt"}
};
if (node->name()->length() > 0 && node->name()->Get(0) == '_') {
for (unsigned i = 0;
}
-const char* GenericOpStub::GetName() {
- switch (op_) {
- case Token::ADD: return "GenericOpStub_ADD";
- case Token::SUB: return "GenericOpStub_SUB";
- case Token::MUL: return "GenericOpStub_MUL";
- case Token::DIV: return "GenericOpStub_DIV";
- default: return "GenericOpStub";
- }
-}
-
-
} } // namespace v8::internal
virtual void GenerateValueOf(ZoneList<Expression*>* args) = 0;
virtual void GenerateSetValueOf(ZoneList<Expression*>* args) = 0;
+ // Fast support for charCodeAt(n).
+ virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args) = 0;
+
private:
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
};
-class GenericOpStub : public CodeStub {
- public:
- explicit GenericOpStub(Token::Value op) : op_(op) { }
-
- private:
- Token::Value op_;
-
- Major MajorKey() { return GenericOp; }
- int MinorKey() { return static_cast<int>(op_); }
- void Generate(MacroAssembler* masm);
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() { PrintF("GenericOpStub (token %s)\n", Token::String(op_)); }
-#endif
-};
-
-
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }
namespace v8 { namespace internal {
-
-// Helper class for building result strings in a character buffer. The
-// purpose of the class is to use safe operations that checks the
-// buffer bounds on all operations in debug mode.
-class StringBuilder {
- public:
- // Create a string builder with a buffer of the given size. The
- // buffer is allocated through NewArray<char> and must be
- // deallocated by the caller of Finalize().
- explicit StringBuilder(int size);
-
- StringBuilder(char* buffer, int size)
- : buffer_(buffer), size_(size), position_(0) { }
-
- ~StringBuilder() { if (!is_finalized()) Finalize(); }
-
- // Get the current position in the builder.
- inline int position() const;
-
- // Add a single character to the builder. It is not allowed to add
- // 0-characters; use the Finalize() method to terminate the string
- // instead.
- inline void AddCharacter(char c);
-
- // Add an entire string to the builder. Uses strlen() internally to
- // compute the length of the input string.
- void AddString(const char* s);
-
- // Add the first 'n' characters of the given string 's' to the
- // builder. The input string must have enough characters.
- void AddSubstring(const char* s, int n);
-
- // Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
-
- // Add character padding to the builder. If count is non-positive,
- // nothing is added to the builder.
- void AddPadding(char c, int count);
-
- // Finalize the string by 0-terminating it and returning the buffer.
- char* Finalize();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-
- char* buffer_;
- int size_;
- int position_;
-
- bool is_finalized() const { return position_ < 0; }
-};
-
-
-StringBuilder::StringBuilder(int size) {
- buffer_ = NewArray<char>(size);
- size_ = size;
- position_ = 0;
-}
-
-
-inline int StringBuilder::position() const {
- ASSERT(!is_finalized());
- return position_;
-}
-
-
-inline void StringBuilder::AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(!is_finalized() && position_ < size_);
- buffer_[position_++] = c;
-}
-
-
-void StringBuilder::AddString(const char* s) {
- AddSubstring(s, strlen(s));
-}
-
-
-void StringBuilder::AddSubstring(const char* s, int n) {
- ASSERT(!is_finalized() && position_ + n < size_);
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- memcpy(&buffer_[position_], s, n * kCharSize);
- position_ += n;
-}
-
-
-void StringBuilder::AddFormatted(const char* format, ...) {
- ASSERT(!is_finalized() && position_ < size_);
- va_list args;
- va_start(args, format);
- int remaining = size_ - position_;
- int n = OS::VSNPrintF(&buffer_[position_], remaining, format, args);
- va_end(args);
- if (n < 0 || n >= remaining) {
- position_ = size_;
- } else {
- position_ += n;
- }
-}
-
-
-void StringBuilder::AddPadding(char c, int count) {
- for (int i = 0; i < count; i++) {
- AddCharacter(c);
- }
-}
-
-
-char* StringBuilder::Finalize() {
- ASSERT(!is_finalized() && position_ < size_);
- buffer_[position_] = '\0';
- // Make sure nobody managed to add a 0-character to the
- // buffer while building the string.
- ASSERT(strlen(buffer_) == static_cast<size_t>(position_));
- position_ = -1;
- ASSERT(is_finalized());
- return buffer_;
-}
-
-
int HexValue(uc32 c) {
if ('0' <= c && c <= '9')
return c - '0';
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
-
+#if defined(__arm__)
#include <sys/syscall.h> // for cache flushing.
+#endif
#include "v8.h"
#include "global-handles.h"
#include "natives.h"
#include "stub-cache.h"
+#include "log.h"
namespace v8 { namespace internal {
DEFINE_int(debug_port, 5858, "port for remote debugging");
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response");
DECLARE_bool(allow_natives_syntax);
+DECLARE_bool(log_debugger);
static void PrintLn(v8::Local<v8::Value> value) {
}
-PendingRequest::PendingRequest(const uint16_t* json_request, int length)
- : json_request_(Vector<uint16_t>::empty()),
- next_(NULL) {
- // Copy the request.
- json_request_ =
- Vector<uint16_t>(const_cast<uint16_t *>(json_request), length).Clone();
-}
-
-
-PendingRequest::~PendingRequest() {
- // Deallocate what was allocated.
- if (!json_request_.is_empty()) {
- json_request_.Dispose();
- }
-}
-
-Handle<String> PendingRequest::request() {
- // Create a string in the heap from the pending request.
- if (!json_request_.is_empty()) {
- return Factory::NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(json_request_.start()),
- json_request_.length()));
- } else {
- return Handle<String>();
- }
-}
-
-
static Handle<Code> ComputeCallDebugBreak(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc), Code);
}
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = GetCodeTarget(addr);
- return code->state() == DEBUG_BREAK;
+ return code->ic_state() == DEBUG_BREAK;
}
v8::DebugMessageHandler Debugger::debug_message_handler_ = NULL;
void* Debugger::debug_message_handler_data_ = NULL;
-Mutex* Debugger::pending_requests_access_ = OS::CreateMutex();
-PendingRequest* Debugger::pending_requests_head_ = NULL;
-PendingRequest* Debugger::pending_requests_tail_ = NULL;
-
-
-void Debugger::DebugRequest(const uint16_t* json_request, int length) {
- // Create a pending request.
- PendingRequest* pending_request = new PendingRequest(json_request, length);
-
- // Add the pending request to list.
- Guard with(pending_requests_access_);
- if (pending_requests_head_ == NULL) {
- ASSERT(pending_requests_tail_ == NULL);
- pending_requests_head_ = pending_request;
- pending_requests_tail_ = pending_request;
- } else {
- ASSERT(pending_requests_tail_ != NULL);
- pending_requests_tail_->set_next(pending_request);
- pending_requests_tail_ = pending_request;
- }
-
- // Set the pending request flag to force the VM to stop soon.
- v8::Debug::DebugBreak();
-}
-
-
-bool Debugger::ProcessPendingRequests() {
- HandleScope scope;
-
- // Lock access to pending requests list while processing them. Typically
- // there will be either zero or one pending request.
- Guard with(pending_requests_access_);
-
- EnterDebuggerContext enter;
-
- // Get the current execution state.
- bool caught_exception;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
- return false;
- }
-
- // Process the list of pending requests.
- bool plain_break = false;
- PendingRequest* pending_request = pending_requests_head_;
- if (pending_request == NULL) {
- // If no pending commands plain break issued some other way (e.g. debugger
- // statement).
- plain_break = true;
- }
- while (pending_request != NULL) {
- Handle<String> response = ProcessRequest(exec_state,
- pending_request->request(),
- false);
- OnPendingRequestProcessed(response);
-
- // Check whether one of the commands is a plain break request.
- if (!plain_break) {
- plain_break = IsPlainBreakRequest(pending_request->request());
- }
-
- // Move to the next item in the list.
- PendingRequest* next = pending_request->next();
- delete pending_request;
- pending_request = next;
- }
-
- // List processed.
- pending_requests_head_ = NULL;
- pending_requests_tail_ = NULL;
-
- return plain_break;
-}
-
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
if (caught_exception) {
return false;
}
-
return *result == Heap::true_value();
}
if (caught_exception) {
return;
}
-
// Bail out based on state or if there is no listener for this event
if (Debug::InDebugger()) return;
if (!Debugger::EventActive(v8::AfterCompile)) return;
if (caught_exception) {
return;
}
-
// Process debug event
ProcessDebugEvent(v8::AfterCompile, event_data);
}
if (caught_exception) {
return;
}
-
// Process debug event.
ProcessDebugEvent(v8::NewFunction, event_data);
}
-void Debugger::OnPendingRequestProcessed(Handle<Object> event_data) {
- // Process debug event.
- ProcessDebugEvent(v8::PendingRequestProcessed, event_data);
-}
-
-
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data) {
// Create the execution state.
if (caught_exception) {
return;
}
-
// First notify the builtin debugger.
if (message_thread_ != NULL) {
message_thread_->DebugEvent(event, exec_state, event_data);
}
-
// Notify registered debug event listeners. The list can contain both C and
// JavaScript functions.
v8::NeanderArray listeners(Factory::debug_event_listeners());
}
+// Posts an output message from the debugger to the debug_message_handler
+// callback. This callback is part of the public API. Messages are
+// kept internally as Vector<uint16_t> strings, which are allocated in various
+// places and deallocated by the calling function sometime after this call.
void Debugger::SendMessage(Vector< uint16_t> message) {
if (debug_message_handler_ != NULL) {
debug_message_handler_(message.start(), message.length(),
for (int i = 0; i < length && !active_listener; i++) {
active_listener = !listeners.get(i)->IsUndefined();
}
-
set_debugger_active((Debugger::message_thread_ != NULL &&
Debugger::debug_message_handler_ != NULL) ||
active_listener);
DebugMessageThread::DebugMessageThread()
: host_running_(true),
- event_json_(Vector<uint16_t>::empty()),
- command_(Vector<uint16_t>::empty()),
- result_(Vector<uint16_t>::empty()) {
+ command_queue_(kQueueInitialSize),
+ message_queue_(kQueueInitialSize) {
command_received_ = OS::CreateSemaphore(0);
- debug_event_ = OS::CreateSemaphore(0);
- debug_command_ = OS::CreateSemaphore(0);
- debug_result_ = OS::CreateSemaphore(0);
+ message_received_ = OS::CreateSemaphore(0);
}
-
+// Does not free resources held by DebugMessageThread
+// because this cannot be done thread-safely.
DebugMessageThread::~DebugMessageThread() {
}
-void DebugMessageThread::SetEventJSON(Vector<uint16_t> event_json) {
- SetVector(&event_json_, event_json);
+// Puts an event coming from V8 on the queue. Creates
+// a copy of the JSON formatted event string managed by the V8.
+// Called by the V8 thread.
+// The new copy of the event string is destroyed in Run().
+void DebugMessageThread::SendMessage(Vector<uint16_t> message) {
+ Vector<uint16_t> message_copy = message.Clone();
+ if (FLAG_log_debugger) {
+ Logger::StringEvent("Put message on event message_queue.", "");
+ }
+ message_queue_.Put(message_copy);
+ message_received_->Signal();
}
v8::Local<v8::Function> fun =
v8::Function::Cast(*api_event_data->Get(fun_name));
v8::TryCatch try_catch;
- v8::Local<v8::Value> json_result = *fun->Call(api_event_data, 0, NULL);
- v8::Local<v8::String> json_result_string;
+ v8::Local<v8::Value> json_event = *fun->Call(api_event_data, 0, NULL);
+ v8::Local<v8::String> json_event_string;
if (!try_catch.HasCaught()) {
- if (!json_result->IsUndefined()) {
- json_result_string = json_result->ToString();
+ if (!json_event->IsUndefined()) {
+ json_event_string = json_event->ToString();
if (FLAG_trace_debug_json) {
- PrintLn(json_result_string);
+ PrintLn(json_event_string);
}
- v8::String::Value val(json_result_string);
+ v8::String::Value val(json_event_string);
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
- json_result_string->Length());
- SetEventJSON(str);
+ json_event_string->Length());
+ SendMessage(str);
} else {
- SetEventJSON(Vector<uint16_t>::empty());
+ SendMessage(Vector<uint16_t>::empty());
}
} else {
PrintLn(try_catch.Exception());
- SetEventJSON(Vector<uint16_t>::empty());
- }
-}
-
-
-void DebugMessageThread::SetCommand(Vector<uint16_t> command) {
- SetVector(&command_, command);
-}
-
-
-void DebugMessageThread::SetResult(const char* result) {
- int len = strlen(result);
- uint16_t* tmp = NewArray<uint16_t>(len);
- for (int i = 0; i < len; i++) {
- tmp[i] = result[i];
- }
- SetResult(Vector<uint16_t>(tmp, len));
- DeleteArray(tmp);
-}
-
-
-void DebugMessageThread::SetResult(Vector<uint16_t> result) {
- SetVector(&result_, result);
-}
-
-
-void DebugMessageThread::SetVector(Vector<uint16_t>* vector,
- Vector<uint16_t> value) {
- // Deallocate current result.
- if (!vector->is_empty()) {
- vector->Dispose();
- *vector = Vector<uint16_t>::empty();
- }
-
- // Allocate a copy of the new result.
- if (!value.is_empty()) {
- *vector = value.Clone();
+ SendMessage(Vector<uint16_t>::empty());
}
}
}
-void DebugMessageThread::CommandResult(Vector<uint16_t> result) {
- SetResult(result);
- debug_result_->Signal();
-}
-
-
void DebugMessageThread::Run() {
- // Process commands and debug events.
+ // Sends debug events to an installed debugger message callback.
while (true) {
- // Set the current command prompt
- Semaphore* sems[2];
- sems[0] = command_received_;
- sems[1] = debug_event_;
- int signal = Select(2, sems).WaitSingle();
- if (signal == 0) {
- if (command_.length() > 0) {
- HandleCommand();
- if (result_.length() > 0) {
- Debugger::SendMessage(result_);
- SetResult(Vector<uint16_t>::empty());
- }
- }
- } else {
- // Send the the current event as JSON to the debugger.
- Debugger::SendMessage(event_json_);
+ // Wait and Get are paired so that semaphore count equals queue length.
+ message_received_->Wait();
+ if (FLAG_log_debugger) {
+ Logger::StringEvent("Get message from event message_queue.", "");
+ }
+ Vector<uint16_t> message = message_queue_.Get();
+ if (message.length() > 0) {
+ Debugger::SendMessage(message);
}
}
}
break;
case v8::NewFunction:
break;
- case v8::PendingRequestProcessed: {
- // For a processed pending request the event_data is the JSON response
- // string.
- v8::Handle<v8::String> str =
- v8::Handle<v8::String>(
- Utils::ToLocal(Handle<String>::cast(event_data)));
- v8::String::Value val(str);
- SetEventJSON(Vector<uint16_t>(reinterpret_cast<uint16_t*>(*val),
- str->Length()));
- debug_event_->Signal();
- break;
- }
default:
UNREACHABLE();
}
return;
}
- // Notify the debug session thread that a debug event has occoured.
+ // First process all pending commands in the queue. During this processing
+ // each message is checked to see if it is a plain break command. If there is
+ // a plain break request in the queue or if the queue is empty a break event
+ // is sent to the debugger.
+ bool plain_break = false;
+ if (command_queue_.IsEmpty()) {
+ plain_break = true;
+ } else {
+ // Drain queue.
+ while (!command_queue_.IsEmpty()) {
+ command_received_->Wait();
+ if (FLAG_log_debugger) {
+ Logger::StringEvent(
+ "Get command from command_queue, in drain queue loop.",
+ "");
+ }
+ Vector<uint16_t> command = command_queue_.Get();
+ // Support for sending a break command as just "break" instead of an
+ // actual JSON break command.
+ // If break is made into a separate API call, function
+ // TwoByteEqualsASCII can be removed.
+ if (TwoByteEqualsAscii(command, "break")) {
+ plain_break = true;
+ continue;
+ }
+
+ // Get the command as a string object.
+ Handle<String> command_string;
+ if (!command.is_empty()) {
+ command_string = Factory::NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(
+ command.start()),
+ command.length()));
+ } else {
+ command_string = Handle<String>();
+ }
+
+ // Process the request.
+ Handle<String> message_string = Debugger::ProcessRequest(exec_state,
+ command_string,
+ false);
+ // Convert text result to UTF-16 string and send it.
+ v8::String::Value val(Utils::ToLocal(message_string));
+ Vector<uint16_t> message(reinterpret_cast<uint16_t*>(*val),
+ message_string->length());
+ SendMessage(message);
+
+ // Check whether one of the commands is a plain break request.
+ if (!plain_break) {
+ plain_break = Debugger::IsPlainBreakRequest(message_string);
+ }
+ }
+ }
+
+ // If this break event is not to go to the debugger just return.
+ if (!plain_break) return;
+
+ // Notify the debugger that a debug event has occoured.
host_running_ = false;
- event_ = event;
SetEventJSONFromEvent(event_data);
- debug_event_->Signal();
- // Wait for commands from the debug session.
+ // Wait for commands from the debugger.
while (true) {
- debug_command_->Wait();
+ command_received_->Wait();
+ if (FLAG_log_debugger) {
+ Logger::StringEvent(
+ "Get command from command queue, in interactive loop.",
+ "");
+ }
+ Vector<uint16_t> command = command_queue_.Get();
ASSERT(!host_running_);
if (!Debugger::debugger_active()) {
host_running_ = true;
}
// Invoke the JavaScript to convert the debug command line to a JSON
- // request, invoke the JSON request and convert the JSON respose to a text
+ // request, invoke the JSON request and convert the JSON response to a text
// representation.
v8::Local<v8::String> fun_name;
v8::Local<v8::Function> fun;
v8::TryCatch try_catch;
fun_name = v8::String::New("processDebugCommand");
fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
- args[0] = v8::String::New(reinterpret_cast<uint16_t*>(command_.start()),
- command_.length());
+ args[0] = v8::String::New(reinterpret_cast<uint16_t*>(command.start()),
+ command.length());
v8::Local<v8::Value> result_val = fun->Call(cmd_processor, 1, args);
// Get the result of the command.
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
result_string->Length());
- // Change the prompt if VM is running after this command.
- if (running) {
- host_running_ = true;
- }
+ // Set host_running_ correctly for nested debugger evaluations.
+ host_running_ = running;
// Return the result.
- CommandResult(str);
+ SendMessage(str);
// Return from debug event processing is VM should be running.
if (running) {
}
-void DebugMessageThread::HandleCommand() {
- // Handle the command.
- if (TwoByteEqualsAscii(command_, "b") ||
- TwoByteEqualsAscii(command_, "break")) {
+// Puts a command coming from the public API on the queue. Creates
+// a copy of the command string managed by the debugger. Up to this
+// point, the command data was managed by the API client. Called
+// by the API client thread. This is where the API client hands off
+// processing of the command to the DebugMessageThread thread.
+// The new copy of the command is destroyed in HandleCommand().
+void DebugMessageThread::ProcessCommand(Vector<uint16_t> command) {
+ Vector<uint16_t> command_copy = command.Clone();
+ if (FLAG_log_debugger) {
+ Logger::StringEvent("Put command on command_queue.", "");
+ }
+ command_queue_.Put(command_copy);
+ // If not in a break schedule a break and send the "request queued" response.
+ if (host_running_) {
v8::Debug::DebugBreak();
- SetResult("request queued");
- } else if (host_running_) {
- // Send the JSON command to the running VM.
- Debugger::DebugRequest(command_.start(), command_.length());
- SetResult("request queued");
- } else {
- debug_command_->Signal();
- debug_result_->Wait();
+ uint16_t buffer[14] = {'r', 'e', 'q', 'u', 'e', 's', 't', ' ',
+ 'q', 'u', 'e', 'u', 'e', 'd'};
+ SendMessage(Vector<uint16_t>(buffer, 14));
}
-}
-
-
-void DebugMessageThread::ProcessCommand(Vector<uint16_t> command) {
- SetCommand(command);
command_received_->Signal();
}
void DebugMessageThread::OnDebuggerInactive() {
+ // Send an empty command to the debugger if in a break to make JavaScript run
+ // again if the debugger is closed.
if (!host_running_) {
- debug_command_->Signal();
- SetResult("");
+ ProcessCommand(Vector<uint16_t>::empty());
+ }
+}
+
+
+MessageQueue::MessageQueue(int size) : start_(0), end_(0), size_(size) {
+ messages_ = NewArray<Vector<uint16_t> >(size);
+}
+
+
+MessageQueue::~MessageQueue() {
+ DeleteArray(messages_);
+}
+
+
+Vector<uint16_t> MessageQueue::Get() {
+ ASSERT(!IsEmpty());
+ int result = start_;
+ start_ = (start_ + 1) % size_;
+ return messages_[result];
+}
+
+
+void MessageQueue::Put(const Vector<uint16_t>& message) {
+ if ((end_ + 1) % size_ == start_) {
+ Expand();
+ }
+ messages_[end_] = message;
+ end_ = (end_ + 1) % size_;
+}
+
+
+void MessageQueue::Expand() {
+ MessageQueue new_queue(size_ * 2);
+ while (!IsEmpty()) {
+ new_queue.Put(Get());
}
+ Vector<uint16_t>* array_to_free = messages_;
+ *this = new_queue;
+ new_queue.messages_ = array_to_free;
+ // Automatic destructor called on new_queue, freeing array_to_free.
+}
+
+
+LockingMessageQueue::LockingMessageQueue(int size) : queue_(size) {
+ lock_ = OS::CreateMutex();
+}
+
+
+LockingMessageQueue::~LockingMessageQueue() {
+ delete lock_;
+}
+
+
+bool LockingMessageQueue::IsEmpty() const {
+ ScopedLock sl(lock_);
+ return queue_.IsEmpty();
}
+
+Vector<uint16_t> LockingMessageQueue::Get() {
+ ScopedLock sl(lock_);
+ Vector<uint16_t> result = queue_.Get();
+ // Logging code for debugging debugger.
+ if (FLAG_log_debugger) {
+ LogQueueOperation("Get", result);
+ }
+
+ return result;
+}
+
+
+void LockingMessageQueue::Put(const Vector<uint16_t>& message) {
+ ScopedLock sl(lock_);
+ queue_.Put(message);
+ // Logging code for debugging debugger.
+ if (FLAG_log_debugger) {
+ LogQueueOperation("Put", message);
+ }
+}
+
+
+void LockingMessageQueue::Clear() {
+ ScopedLock sl(lock_);
+ queue_.Clear();
+}
+
+
+void LockingMessageQueue::LogQueueOperation(const char* operation_name,
+ Vector<uint16_t> parameter) {
+ StringBuilder s(23+parameter.length()+strlen(operation_name) +1);
+ s.AddFormatted("Time: %f15.3 %s ", OS::TimeCurrentMillis(), operation_name);
+ for (int i = 0; i < parameter.length(); ++i) {
+ s.AddCharacter(static_cast<char>(parameter[i]));
+ }
+ char* result_string = s.Finalize();
+ Logger::StringEvent(result_string, "");
+ DeleteArray(result_string);
+}
+
+
} } // namespace v8::internal
return reinterpret_cast<Address *>(®isters_[r]);
}
- // Addres of the debug break return entry code.
+ // Address of the debug break return entry code.
static Code* debug_break_return_entry() { return debug_break_return_entry_; }
// Support for getting the address of the debug break on return code.
};
-class PendingRequest;
class DebugMessageThread;
-
class Debugger {
public:
static void DebugRequest(const uint16_t* json_request, int length);
- static bool ProcessPendingRequests();
static Handle<Object> MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
static void OnAfterCompile(Handle<Script> script,
Handle<JSFunction> fun);
static void OnNewFunction(Handle<JSFunction> fun);
- static void OnPendingRequestProcessed(Handle<Object> event_data);
static void ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data);
static void SetMessageHandler(v8::DebugMessageHandler handler, void* data);
static DebugMessageThread* message_thread_;
static v8::DebugMessageHandler debug_message_handler_;
static void* debug_message_handler_data_;
-
- // Head and tail of linked list of pending commands. The list is protected
- // by a mutex as it can be updated/read from different threads.
- static Mutex* pending_requests_access_;
- static PendingRequest* pending_requests_head_;
- static PendingRequest* pending_requests_tail_;
};
-// Linked list of pending requests issued by debugger while V8 was running.
-class PendingRequest {
+// A Queue of Vector<uint16_t> objects. A thread-safe version is
+// LockingMessageQueue, based on this class.
+class MessageQueue BASE_EMBEDDED {
public:
- PendingRequest(const uint16_t* json_request, int length);
- ~PendingRequest();
+ explicit MessageQueue(int size);
+ ~MessageQueue();
+ bool IsEmpty() const { return start_ == end_; }
+ Vector<uint16_t> Get();
+ void Put(const Vector<uint16_t>& message);
+ void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
+ private:
+ // Doubles the size of the message queue, and copies the messages.
+ void Expand();
+
+ Vector<uint16_t>* messages_;
+ int start_;
+ int end_;
+ int size_; // The size of the queue buffer. Queue can hold size-1 messages.
+};
- PendingRequest* next() { return next_; }
- void set_next(PendingRequest* next) { next_ = next; }
- Handle<String> request();
+// LockingMessageQueue is a thread-safe circular buffer of Vector<uint16_t>
+// messages. The message data is not managed by LockingMessageQueue.
+// Pointers to the data are passed in and out. Implemented by adding a
+// Mutex to MessageQueue.
+class LockingMessageQueue BASE_EMBEDDED {
+ public:
+ explicit LockingMessageQueue(int size);
+ ~LockingMessageQueue();
+ bool IsEmpty() const;
+ Vector<uint16_t> Get();
+ void Put(const Vector<uint16_t>& message);
+ void Clear();
private:
- Vector<uint16_t> json_request_; // Request string.
- PendingRequest* next_; // Next pointer for linked list.
+ // Logs a timestamp, operation name, and operation argument
+ void LogQueueOperation(const char* operation_name,
+ Vector<uint16_t> parameter);
+ MessageQueue queue_;
+ Mutex* lock_;
+ DISALLOW_EVIL_CONSTRUCTORS(LockingMessageQueue);
};
+/* This class is the data for a running thread that serializes
+ * event messages and command processing for the debugger.
+ * All uncommented methods are called only from this message thread.
+ */
class DebugMessageThread: public Thread {
public:
- DebugMessageThread();
- virtual ~DebugMessageThread();
-
+ DebugMessageThread(); // Called from API thread.
+ virtual ~DebugMessageThread(); // Never called.
+ // Called by V8 thread. Reports events from V8 VM.
+ // Also handles command processing in stopped state of V8,
+ // when host_running_ is false.
void DebugEvent(v8::DebugEvent,
Handle<Object> exec_state,
Handle<Object> event_data);
- void SetEventJSON(Vector<uint16_t> event_json);
+ // Puts event on the output queue. Called by V8.
+ // This is where V8 hands off
+ // processing of the event to the DebugMessageThread thread,
+ // which forwards it to the debug_message_handler set by the API.
+ void SendMessage(Vector<uint16_t> event_json);
+ // Formats an event into JSON, and calls SendMessage.
void SetEventJSONFromEvent(Handle<Object> event_data);
- void SetCommand(Vector<uint16_t> command);
- void SetResult(const char* result);
- void SetResult(Vector<uint16_t> result);
- void CommandResult(Vector<uint16_t> result);
-
+ // Puts a command coming from the public API on the queue. Called
+ // by the API client thread. This is where the API client hands off
+ // processing of the command to the DebugMessageThread thread.
void ProcessCommand(Vector<uint16_t> command);
-
void OnDebuggerInactive();
- protected:
+ // Main function of DebugMessageThread thread.
void Run();
- void HandleCommand();
-
- bool host_running_; // Is the debugging host running or stopped
- v8::DebugEvent event_; // Active event
- Semaphore* command_received_; // Signal from the telnet connection
- Semaphore* debug_event_; // Signal from the V8 thread
- Semaphore* debug_command_; // Signal to the V8 thread
- Semaphore* debug_result_; // Signal from the V8 thread
+ bool host_running_; // Is the debugging host running or stopped?
+ Semaphore* command_received_; // Non-zero when command queue is non-empty.
+ Semaphore* message_received_; // Exactly equal to message queue length.
private:
- void SetVector(Vector<uint16_t>* vector, Vector<uint16_t> value);
bool TwoByteEqualsAscii(Vector<uint16_t> two_byte, const char* ascii);
- Vector<uint16_t> event_json_; // Active event JSON.
- Vector<uint16_t> command_; // Current command.
- Vector<uint16_t> result_; // Result of processing command.
+ static const int kQueueInitialSize = 4;
+ LockingMessageQueue command_queue_;
+ LockingMessageQueue message_queue_;
DISALLOW_EVIL_CONSTRUCTORS(DebugMessageThread);
};
int rm = instr->RmField();
PrintRegister(rm);
- if ((shift != LSL) || (shift_amount != 0)) {
- if (instr->RegShiftField() == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- Print(", RRX");
- return;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- out_buffer_size_ - out_buffer_pos_,
- ", %s #%d",
- shift_names[shift], shift_amount);
- } else {
- // by register
- int rs = instr->RsField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- out_buffer_size_ - out_buffer_pos_,
- ", %s ", shift_names[shift]);
- PrintRegister(rs);
+
+ if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
+ // Special case for using rm only.
+ return;
+ }
+ if (instr->RegShiftField() == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ Print(", RRX");
+ return;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
}
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift], shift_amount);
+ } else {
+ // by register
+ int rs = instr->RsField();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ ", %s ", shift_names[shift]);
+ PrintRegister(rs);
}
}
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
if (instr->ConditionField() == special_condition) {
Format(instr, "break 'msg");
return Instr::kInstrSize;
}
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+ int instruction_bits = *(reinterpret_cast<int*>(instruction));
+ if ((instruction_bits & 0xfff00000) == 0x03000000) {
+ return instruction_bits & 0x0000ffff;
+ } else {
+ return -1;
+ }
+}
+
+
void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
Disassembler d;
for (byte* pc = begin; pc < end;) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, sizeof buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer);
+ fprintf(f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
}
}
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
+
+#include "v8.h"
#include "disasm.h"
namespace disasm {
-// Windows is missing the stdint.h header file
-#ifdef WIN32
-typedef signed char int8_t;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef int int32_t;
-#endif
-
-
-#define UNIMPLEMENTED() \
- assert(false)
-
-#define UNREACHABLE() \
- assert(false)
-
enum OperandOrder {
UNSET_OP_ORDER = 0,
REG_OPER_OP_ORDER,
const char* mnem = "?";
switch (regop) {
case eax: mnem = "fild_s"; break;
+ case edx: mnem = "fist_s"; break;
case ebx: mnem = "fistp_s"; break;
default: UnimplementedInstruction();
}
}
AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
return 2;
+ } else if (b1 == 0xDA && b2 == 0xE9) {
+ const char* mnem = "fucompp";
+ AppendToBuffer("%s", mnem);
+ return 2;
}
AppendToBuffer("Unknown FP instruction");
return 2;
break;
case 0xD9: // fall through
+ case 0xDA: // fall through
case 0xDB: // fall through
case 0xDC: // fall through
case 0xDD: // fall through
}
int instr_len = data - instr;
- if (instr_len == 0) instr_len = 1; // parse at least a byte
-#ifdef WIN32
- _snprintf(out_buffer, out_buffer_size, "%s", tmp_buffer_);
-#else
- snprintf(out_buffer, out_buffer_size, "%s", tmp_buffer_);
-#endif
+ ASSERT(instr_len > 0); // Ensure progress.
+
+ int outp = 0;
+ // Instruction bytes.
+ for (byte* bp = instr; bp < data; bp++) {
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+ out_buffer_size - outp,
+ "%02x",
+ *bp);
+ }
+ for (int i = 6 - instr_len; i >= 0; i--) {
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+ out_buffer_size - outp,
+ " ");
+ }
+
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+ out_buffer_size - outp,
+ " %s",
+ tmp_buffer_);
return instr_len;
}
}
+// The IA-32 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
Disassembler d;
for (byte* pc = begin; pc < end;) {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(char* buffer, const int buffer_size, byte* instruction);
+ // Returns -1 if instruction does not mark the beginning of a constant pool,
+ // or the number of entries in the constant pool beginning here.
+ int ConstantPoolSizeAt(byte* instruction);
+
// Write disassembly into specified file 'f' using specified NameConverter
// (see constructor).
static void Disassemble(FILE* f, byte* begin, byte* end);
+++ /dev/null
-// Copyright 2006-2008 Google Inc. All Rights Reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "debug.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-#include "string-stream.h"
-
-namespace v8 { namespace internal {
-
-#ifdef ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
- for (byte* pc = begin; pc < end; pc++) {
- if (f == NULL) {
- PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
- } else {
- fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
- }
- }
-}
-
-
-class V8NameConverter: public disasm::NameConverter {
- public:
- explicit V8NameConverter(Code* code) : code_(code) {}
- virtual const char* NameOfAddress(byte* pc) const;
- virtual const char* NameInCode(byte* addr) const;
- Code* code() const { return code_; }
- private:
- Code* code_;
-};
-
-
-const char* V8NameConverter::NameOfAddress(byte* pc) const {
- static char buffer[128];
-
- const char* name = Builtins::Lookup(pc);
- if (name != NULL) {
- OS::SNPrintF(buffer, sizeof buffer, "%s (%p)", name, pc);
- return buffer;
- }
-
- if (code_ != NULL) {
- int offs = pc - code_->instruction_start();
- // print as code offset, if it seems reasonable
- if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(buffer, sizeof buffer, "%d (%p)", offs, pc);
- return buffer;
- }
- }
-
- return disasm::NameConverter::NameOfAddress(pc);
-}
-
-
-const char* V8NameConverter::NameInCode(byte* addr) const {
- // If the V8NameConverter is used for well known code, so we can "safely"
- // dereference pointers in generated code.
- return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
-}
-
-
-static void DumpBuffer(FILE* f, char* buff) {
- if (f == NULL) {
- PrintF("%s", buff);
- } else {
- fprintf(f, "%s", buff);
- }
-}
-
-static const int kOutBufferSize = 1024;
-static const int kRelocInfoPosition = 57;
-
-static int DecodeIt(FILE* f,
- const V8NameConverter& converter,
- byte* begin,
- byte* end) {
- ExternalReferenceEncoder ref_encoder;
- char decode_buffer[128];
- char out_buffer[kOutBufferSize];
- const int sob = sizeof out_buffer;
- byte* pc = begin;
- disasm::Disassembler d(converter);
- RelocIterator* it = NULL;
- if (converter.code() != NULL) {
- it = new RelocIterator(converter.code());
- } else {
- // No relocation information when printing code stubs.
- }
- int constants = -1; // no constants being decoded at the start
-
- while (pc < end) {
- // First decode instruction so that we know its length.
- byte* prev_pc = pc;
- if (constants > 0) {
- OS::SNPrintF(decode_buffer, sizeof(decode_buffer), "%s", "constant");
- constants--;
- pc += 4;
- } else {
- int instruction_bits = *(reinterpret_cast<int*>(pc));
- if ((instruction_bits & 0xfff00000) == 0x03000000) {
- OS::SNPrintF(decode_buffer, sizeof(decode_buffer),
- "%s", "constant pool begin");
- constants = instruction_bits & 0x0000ffff;
- pc += 4;
- } else {
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
- }
- }
-
- // Collect RelocInfo for this instruction (prev_pc .. pc-1)
- List<const char*> comments(4);
- List<byte*> pcs(1);
- List<RelocMode> rmodes(1);
- List<intptr_t> datas(1);
- if (it != NULL) {
- while (!it->done() && it->rinfo()->pc() < pc) {
- if (is_comment(it->rinfo()->rmode())) {
- // For comments just collect the text.
- comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
- } else {
- // For other reloc info collect all data.
- pcs.Add(it->rinfo()->pc());
- rmodes.Add(it->rinfo()->rmode());
- datas.Add(it->rinfo()->data());
- }
- it->next();
- }
- }
-
- int outp = 0; // pointer into out_buffer, implements append operation.
-
- // Comments.
- for (int i = 0; i < comments.length(); i++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " %s\n", comments[i]);
- }
-
- // Write out comments, resets outp so that we can format the next line.
- if (outp > 0) {
- DumpBuffer(f, out_buffer);
- outp = 0;
- }
-
- // Instruction address and instruction offset.
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- "%p %4d ", prev_pc, prev_pc - begin);
-
- // Instruction bytes.
- ASSERT(pc - prev_pc == 4);
- outp += OS::SNPrintF(out_buffer + outp,
- sob - outp,
- "%08x",
- *reinterpret_cast<intptr_t*>(prev_pc));
-
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
- }
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " %s", decode_buffer);
-
- // Print all the reloc info for this instruction which are not comments.
- for (int i = 0; i < pcs.length(); i++) {
- // Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
-
- // Indent the printing of the reloc info.
- if (i == 0) {
- // The first reloc info is printed after the disassembled instruction.
- for (int p = outp; p < kRelocInfoPosition; p++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
- }
- } else {
- // Additional reloc infos are printed on separate lines.
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
- for (int p = 0; p < kRelocInfoPosition; p++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
- }
- }
-
- if (is_position(relocinfo.rmode())) {
- outp += OS::SNPrintF(out_buffer + outp,
- sob - outp,
- " ;; debug: statement %d",
- relocinfo.data());
- } else if (relocinfo.rmode() == embedded_object) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- relocinfo.target_object()->ShortPrint(&accumulator);
- SmartPointer<char> obj_name = accumulator.ToCString();
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; object: %s",
- *obj_name);
- } else if (relocinfo.rmode() == external_reference) {
- const char* reference_name =
- ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; external reference (%s)",
- reference_name);
- } else if (relocinfo.rmode() == code_target) {
- outp +=
- OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; code target (%s)",
- converter.NameOfAddress(relocinfo.target_address()));
- } else {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; %s%s",
-#if defined(DEBUG)
- RelocInfo::RelocModeName(relocinfo.rmode()),
-#else
- "reloc_info",
-#endif
- "");
- }
- }
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
-
- if (outp > 0) {
- ASSERT(outp < kOutBufferSize);
- DumpBuffer(f, out_buffer);
- outp = 0;
- }
- }
-
- delete it;
- return pc - begin;
-}
-
-
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
- V8NameConverter defaultConverter(NULL);
- return DecodeIt(f, defaultConverter, begin, end);
-}
-
-
-void Disassembler::Decode(FILE* f, Code* code) {
- byte* begin = Code::cast(code)->instruction_start();
- byte* end = begin + Code::cast(code)->instruction_size();
- V8NameConverter v8NameConverter(code);
- DecodeIt(f, v8NameConverter, begin, end);
-}
-
-#else // ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
-void Disassembler::Decode(FILE* f, Code* code) {}
-
-#endif // ENABLE_DISASSEMBLER
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2006-2008 Google Inc. All Rights Reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "debug.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-#include "string-stream.h"
-
-namespace v8 { namespace internal {
-
-#ifdef ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
- for (byte* pc = begin; pc < end; pc++) {
- if (f == NULL) {
- PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
- } else {
- fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
- }
- }
-}
-
-
-class V8NameConverter: public disasm::NameConverter {
- public:
- explicit V8NameConverter(Code* code) : code_(code) {}
- virtual const char* NameOfAddress(byte* pc) const;
- Code* code() const { return code_; }
- private:
- Code* code_;
-};
-
-
-const char* V8NameConverter::NameOfAddress(byte* pc) const {
- static char buffer[128];
-
- const char* name = Builtins::Lookup(pc);
- if (name != NULL) {
- OS::SNPrintF(buffer, sizeof buffer, "%s (%p)", name, pc);
- return buffer;
- }
-
- if (code_ != NULL) {
- int offs = pc - code_->instruction_start();
- // print as code offset, if it seems reasonable
- if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(buffer, sizeof buffer, "%d (%p)", offs, pc);
- return buffer;
- }
- }
-
- return disasm::NameConverter::NameOfAddress(pc);
-}
-
-
-static void DumpBuffer(FILE* f, char* buff) {
- if (f == NULL) PrintF("%s", buff);
- else fprintf(f, "%s", buff);
-}
-
-static const int kOutBufferSize = 1024;
-static const int kRelocInfoPosition = 57;
-
-static int DecodeIt(FILE* f,
- const V8NameConverter& converter,
- byte* begin,
- byte* end) {
- NoHandleAllocation ha;
- AssertNoAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder;
-
- char decode_buffer[128];
- char out_buffer[kOutBufferSize];
- const int sob = sizeof out_buffer;
- byte* pc = begin;
- disasm::Disassembler d(converter);
- RelocIterator* it = NULL;
- if (converter.code() != NULL) {
- it = new RelocIterator(converter.code());
- } else {
- // No relocation information when printing code stubs.
- }
-
- while (pc < end) {
- // First decode instruction so that we know its length.
- byte* prev_pc = pc;
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
-
- // Collect RelocInfo for this instruction (prev_pc .. pc-1)
- List<const char*> comments(4);
- List<byte*> pcs(1);
- List<RelocMode> rmodes(1);
- List<intptr_t> datas(1);
- if (it != NULL) {
- while (!it->done() && it->rinfo()->pc() < pc) {
- if (is_comment(it->rinfo()->rmode())) {
- // For comments just collect the text.
- comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
- } else {
- // For other reloc info collect all data.
- pcs.Add(it->rinfo()->pc());
- rmodes.Add(it->rinfo()->rmode());
- datas.Add(it->rinfo()->data());
- }
- it->next();
- }
- }
-
- int outp = 0; // pointer into out_buffer, implements append operation.
-
- // Comments.
- for (int i = 0; i < comments.length(); i++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " %s\n", comments[i]);
- }
-
- // Write out comments, resets outp so that we can format the next line.
- if (outp > 0) {
- DumpBuffer(f, out_buffer);
- outp = 0;
- }
-
- // Instruction address and instruction offset.
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- "%p %4d ", prev_pc, prev_pc - begin);
-
- // Instruction bytes.
- for (byte* bp = prev_pc; bp < pc; bp++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, "%02x", *bp);
- }
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
- }
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " %s", decode_buffer);
-
- // Print all the reloc info for this instruction which are not comments.
- for (int i = 0; i < pcs.length(); i++) {
- // Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
-
- // Indent the printing of the reloc info.
- if (i == 0) {
- // The first reloc info is printed after the disassembled instruction.
- for (int p = outp; p < kRelocInfoPosition; p++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
- }
- } else {
- // Additional reloc infos are printed on separate lines.
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
- for (int p = 0; p < kRelocInfoPosition; p++) {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
- }
- }
-
- if (is_position(relocinfo.rmode())) {
- outp += OS::SNPrintF(out_buffer + outp,
- sob - outp,
- " ;; debug: statement %d",
- relocinfo.data());
- } else if (relocinfo.rmode() == embedded_object) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- relocinfo.target_object()->ShortPrint(&accumulator);
- SmartPointer<char> obj_name = accumulator.ToCString();
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; object: %s",
- *obj_name);
- } else if (relocinfo.rmode() == external_reference) {
- const char* reference_name =
- ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; external reference (%s)",
- reference_name);
- } else {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " ;; %s",
- RelocInfo::RelocModeName(relocinfo.rmode()));
- if (is_code_target(relocinfo.rmode())) {
- Code* code = Debug::GetCodeTarget(relocinfo.target_address());
- Code::Kind kind = code->kind();
- if (kind == Code::STUB) {
- // Reverse lookup required as the minor key cannot be retrieved
- // from the code object.
- Object* obj = Heap::code_stubs()->SlowReverseLookup(code);
- if (obj != Heap::undefined_value()) {
- ASSERT(obj->IsSmi());
- // Get the STUB key and extract major and minor key.
- uint32_t key = Smi::cast(obj)->value();
- CodeStub::Major major_key = code->major_key();
- uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " (%s, %s, ",
- Code::Kind2String(kind),
- CodeStub::MajorName(code->major_key()));
- switch (code->major_key()) {
- case CodeStub::CallFunction:
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- "argc = %d)",
- minor_key);
- break;
- case CodeStub::Runtime: {
- Runtime::FunctionId id =
- static_cast<Runtime::FunctionId>(minor_key);
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- "%s)",
- Runtime::FunctionForId(id)->name);
- break;
- }
- default:
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- "minor: %d)",
- minor_key);
- }
- }
- } else {
- outp += OS::SNPrintF(out_buffer + outp, sob - outp,
- " (%s)",
- Code::Kind2String(kind));
- }
- }
- }
- }
- outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
-
- if (outp > 0) {
- ASSERT(outp < kOutBufferSize);
- DumpBuffer(f, out_buffer);
- outp = 0;
- }
- }
-
- delete it;
- return pc - begin;
-}
-
-
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
- V8NameConverter defaultConverter(NULL);
- return DecodeIt(f, defaultConverter, begin, end);
-}
-
-
-// Called by Code::CodePrint
-void Disassembler::Decode(FILE* f, Code* code) {
- byte* begin = Code::cast(code)->instruction_start();
- byte* end = begin + Code::cast(code)->instruction_size();
- V8NameConverter v8NameConverter(code);
- DecodeIt(f, v8NameConverter, begin, end);
-}
-#else // ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
-void Disassembler::Decode(FILE* f, Code* code) {}
-
-#endif // ENABLE_DISASSEMBLER
-
-} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "string-stream.h"
+
+namespace v8 { namespace internal {
+
+#ifdef ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
+ for (byte* pc = begin; pc < end; pc++) {
+ if (f == NULL) {
+ PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
+ } else {
+ fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
+ }
+ }
+}
+
+
+class V8NameConverter: public disasm::NameConverter {
+ public:
+ explicit V8NameConverter(Code* code) : code_(code) {}
+ virtual const char* NameOfAddress(byte* pc) const;
+ virtual const char* NameInCode(byte* addr) const;
+ Code* code() const { return code_; }
+ private:
+ Code* code_;
+};
+
+
+const char* V8NameConverter::NameOfAddress(byte* pc) const {
+ static char buffer[128];
+
+ const char* name = Builtins::Lookup(pc);
+ if (name != NULL) {
+ OS::SNPrintF(buffer, sizeof buffer, "%s (%p)", name, pc);
+ return buffer;
+ }
+
+ if (code_ != NULL) {
+ int offs = pc - code_->instruction_start();
+ // print as code offset, if it seems reasonable
+ if (0 <= offs && offs < code_->instruction_size()) {
+ OS::SNPrintF(buffer, sizeof buffer, "%d (%p)", offs, pc);
+ return buffer;
+ }
+ }
+
+ return disasm::NameConverter::NameOfAddress(pc);
+}
+
+
+const char* V8NameConverter::NameInCode(byte* addr) const {
+ // The V8NameConverter is used for well known code, so we can "safely"
+ // dereference pointers in generated code.
+ return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
+}
+
+
+static void DumpBuffer(FILE* f, char* buff) {
+ if (f == NULL) {
+ PrintF("%s", buff);
+ } else {
+ fprintf(f, "%s", buff);
+ }
+}
+
+static const int kOutBufferSize = 256 + String::kMaxShortPrintLength;
+static const int kRelocInfoPosition = 57;
+
+static int DecodeIt(FILE* f,
+ const V8NameConverter& converter,
+ byte* begin,
+ byte* end) {
+ NoHandleAllocation ha;
+ AssertNoAllocation no_alloc;
+ ExternalReferenceEncoder ref_encoder;
+
+ char decode_buffer[128];
+ char out_buffer[kOutBufferSize];
+ byte* pc = begin;
+ disasm::Disassembler d(converter);
+ RelocIterator* it = NULL;
+ if (converter.code() != NULL) {
+ it = new RelocIterator(converter.code());
+ } else {
+ // No relocation information when printing code stubs.
+ }
+ int constants = -1; // no constants being decoded at the start
+
+ while (pc < end) {
+ // First decode instruction so that we know its length.
+ byte* prev_pc = pc;
+ if (constants > 0) {
+ OS::SNPrintF(decode_buffer,
+ sizeof(decode_buffer),
+ "%08x constant",
+ *reinterpret_cast<int32_t*>(pc));
+ constants--;
+ pc += 4;
+ } else {
+ int num_const = d.ConstantPoolSizeAt(pc);
+ if (num_const >= 0) {
+ OS::SNPrintF(decode_buffer,
+ sizeof(decode_buffer),
+ "%08x constant pool begin",
+ *reinterpret_cast<int32_t*>(pc));
+ constants = num_const;
+ pc += 4;
+ } else {
+ decode_buffer[0] = '\0';
+ pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
+ }
+ }
+
+ // Collect RelocInfo for this instruction (prev_pc .. pc-1)
+ List<const char*> comments(4);
+ List<byte*> pcs(1);
+ List<RelocMode> rmodes(1);
+ List<intptr_t> datas(1);
+ if (it != NULL) {
+ while (!it->done() && it->rinfo()->pc() < pc) {
+ if (is_comment(it->rinfo()->rmode())) {
+ // For comments just collect the text.
+ comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
+ } else {
+ // For other reloc info collect all data.
+ pcs.Add(it->rinfo()->pc());
+ rmodes.Add(it->rinfo()->rmode());
+ datas.Add(it->rinfo()->data());
+ }
+ it->next();
+ }
+ }
+
+ StringBuilder out(out_buffer, sizeof(out_buffer));
+
+ // Comments.
+ for (int i = 0; i < comments.length(); i++) {
+ out.AddFormatted(" %s\n", comments[i]);
+ }
+
+ // Write out comments, resets outp so that we can format the next line.
+ DumpBuffer(f, out.Finalize());
+ out.Reset();
+
+ // Instruction address and instruction offset.
+ out.AddFormatted("%p %4d ", prev_pc, prev_pc - begin);
+
+ // Instruction.
+ out.AddFormatted("%s", decode_buffer);
+
+ // Print all the reloc info for this instruction which are not comments.
+ for (int i = 0; i < pcs.length(); i++) {
+ // Put together the reloc info
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+
+ // Indent the printing of the reloc info.
+ if (i == 0) {
+ // The first reloc info is printed after the disassembled instruction.
+ out.AddPadding(' ', kRelocInfoPosition - out.position());
+ } else {
+ // Additional reloc infos are printed on separate lines.
+ out.AddFormatted("\n");
+ out.AddPadding(' ', kRelocInfoPosition);
+ }
+
+ if (is_position(relocinfo.rmode())) {
+ out.AddFormatted(" ;; debug: statement %d", relocinfo.data());
+ } else if (relocinfo.rmode() == embedded_object) {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ relocinfo.target_object()->ShortPrint(&accumulator);
+ SmartPointer<char> obj_name = accumulator.ToCString();
+ out.AddFormatted(" ;; object: %s", *obj_name);
+ } else if (relocinfo.rmode() == external_reference) {
+ const char* reference_name =
+ ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
+ out.AddFormatted(" ;; external reference (%s)", reference_name);
+ } else {
+ out.AddFormatted(" ;; %s",
+ RelocInfo::RelocModeName(relocinfo.rmode()));
+ if (is_code_target(relocinfo.rmode())) {
+ Code* code = Debug::GetCodeTarget(relocinfo.target_address());
+ Code::Kind kind = code->kind();
+ if (kind == Code::STUB) {
+ // Reverse lookup required as the minor key cannot be retrieved
+ // from the code object.
+ Object* obj = Heap::code_stubs()->SlowReverseLookup(code);
+ if (obj != Heap::undefined_value()) {
+ ASSERT(obj->IsSmi());
+ // Get the STUB key and extract major and minor key.
+ uint32_t key = Smi::cast(obj)->value();
+ CodeStub::Major major_key = code->major_key();
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
+ ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
+ out.AddFormatted(" (%s, %s, ",
+ Code::Kind2String(kind),
+ CodeStub::MajorName(code->major_key()));
+ switch (code->major_key()) {
+ case CodeStub::CallFunction:
+ out.AddFormatted("argc = %d)", minor_key);
+ break;
+ case CodeStub::Runtime: {
+ Runtime::FunctionId id =
+ static_cast<Runtime::FunctionId>(minor_key);
+ out.AddFormatted("%s)", Runtime::FunctionForId(id)->name);
+ break;
+ }
+ default:
+ out.AddFormatted("minor: %d)", minor_key);
+ }
+ }
+ } else {
+ out.AddFormatted(" (%s)", Code::Kind2String(kind));
+ }
+ }
+ }
+ }
+ out.AddString("\n");
+ DumpBuffer(f, out.Finalize());
+ out.Reset();
+ }
+
+ delete it;
+ return pc - begin;
+}
+
+
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
+ V8NameConverter defaultConverter(NULL);
+ return DecodeIt(f, defaultConverter, begin, end);
+}
+
+
+// Called by Code::CodePrint.
+void Disassembler::Decode(FILE* f, Code* code) {
+ byte* begin = Code::cast(code)->instruction_start();
+ byte* end = begin + Code::cast(code)->instruction_size();
+ V8NameConverter v8NameConverter(code);
+ DecodeIt(f, v8NameConverter, begin, end);
+}
+
+#else // ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
+void Disassembler::Decode(FILE* f, Code* code) {}
+
+#endif // ENABLE_DISASSEMBLER
+
+} } // namespace v8::internal
ASSERT(thread_local_.climit_ == kIllegalLimit);
thread_local_.initial_jslimit_ = thread_local_.jslimit_ =
- GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ // NOTE: The check for overflow is not safe as there is no guarentee that
+ // the running thread has its stack in all memory up to address 0x00000000.
thread_local_.initial_climit_ = thread_local_.climit_ =
- reinterpret_cast<uintptr_t>(this) - kLimitSize;
+ reinterpret_cast<uintptr_t>(this) >= kLimitSize ?
+ reinterpret_cast<uintptr_t>(this) - kLimitSize : 0;
if (thread_local_.interrupt_flags_ != 0) {
set_limits(kInterruptLimit, access);
void StackGuard::Interrupt() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= INTERRUPT;
- if (!Top::is_break_no_lock()) {
- set_limits(kInterruptLimit, access);
- }
+ set_limits(kInterruptLimit, access);
}
void StackGuard::Preempt() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= PREEMPT;
- if (!Top::is_break_no_lock()) {
- set_limits(kInterruptLimit, access);
- }
+ set_limits(kInterruptLimit, access);
}
void StackGuard::DebugBreak() {
ExecutionAccess access;
- if (!Top::is_break_no_lock()) {
- thread_local_.interrupt_flags_ |= DEBUGBREAK;
- set_limits(kInterruptLimit, access);
- }
+ thread_local_.interrupt_flags_ |= DEBUGBREAK;
+ set_limits(kInterruptLimit, access);
}
static void EnableInterrupts();
static void DisableInterrupts();
- static const int kLimitSize = 512 * KB;
+ static const uintptr_t kLimitSize = 512 * KB;
static const uintptr_t kInterruptLimit = 0xfffffffe;
static const uintptr_t kIllegalLimit = 0xffffffff;
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Traverse pointers in the callee-saved registers.
- const int offset = ExitFrameConstants::kSavedRegistersOffset;
- Object** base = &Memory::Object_at(fp() + offset);
- Object** limit = base + kNumJSCalleeSaved;
- v->VisitPointers(base, limit);
-}
-
-
-void ExitFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
- // The callee-saved registers in an exit frame are pointed to by the
- // frame pointer. See the implementations of C entry runtime stubs.
- const int offset = ExitFrameConstants::kSavedRegistersOffset;
- memcpy(buffer, fp() + offset, kNumJSCalleeSaved * kPointerSize);
+ // Do nothing
}
}
-RegList JavaScriptFrame::FindCalleeSavedRegisters() const {
- const unsigned kRegListTag = 1; // pc values have bit 0 cleared (no thumb)
- const unsigned kRegListTagSize = 1;
- const unsigned kRegListTagMask = (1 << kRegListTagSize) - 1;
-
- // The prologue pc (or the cached register list) is available as a
- // slot in the fixed part of the stack frame.
- const int offset = +4 * kPointerSize;
-
- // Once the register list has been calculated for a frame, it is
- // cached in the prologue pc stack slot. Check the cache before
- // doing the more expensive instruction decoding.
- uint32_t cache = Memory::int_at(fp() + offset);
- if ((cache & kRegListTagMask) == kRegListTag) {
- return static_cast<RegList>(cache >> kRegListTagSize);
- }
-
- // If we can't find the register list in the instruction stream, we
- // assume it's the empty list. [NOTE: Is this really a smart thing
- // to do? Don't all JavaScript frames have the instruction?]
- RegList result = 0;
-
- // Compute the address of the stm (store multiple) instruction.
- Address stm_address = AddressFrom<Address>(cache - PcStoreOffset());
- ASSERT((Memory::int32_at(stm_address) & 0xffffcc00) == 0xe92dcc00);
-
- // Fetch the instruction preceeding the stm - if it is also a stm
- // instruction we read the register list from there.
- uint32_t instruction = Memory::int32_at(stm_address - 4);
- if ((instruction & 0xfffffc00) == 0xe92d0000) {
- // The register list shouldn't be empty and must consist only of JS
- // callee-saved registers.
- result = instruction & 0xffff;
- ASSERT(result != 0 && (result & ~kJSCalleeSaved) == 0);
- }
-
- // Cache the result in the prologue pc stack slot before returning
- // it. This way future access to the register list is a bit faster.
- Memory::int_at(fp() + offset) = (result << kRegListTagSize) | kRegListTag;
- return result;
-}
-
-
-void JavaScriptFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
- // The callee-saved registers in java script frames are in the fixed
- // part of the frame below the frame pointer.
- const int n = NumRegs(FindCalleeSavedRegisters());
- const int offset = 5 * kPointerSize;
- memcpy(buffer, fp() + offset, n * kPointerSize);
-}
-
-
Code* JavaScriptFrame::FindCode() const {
const int offset = StandardFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp() + offset);
int JSCallerSavedCode(int n);
-// Callee-saved registers available for variable allocation in JavaScript code
-static const RegList kJSCalleeSaved =
- 1 << 4 | // r4 v1
- 1 << 5 | // r5 v2
- 1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
- kR9Available << 9 ; // r9 v6
-
-static const int kNumJSCalleeSaved = 4 + kR9Available;
-
-
-typedef Object* JSCalleeSavedBuffer[kNumJSCalleeSaved];
-
-
// Callee-saved registers preserved when switching from C to JavaScript
-static const RegList kCalleeSaved = kJSCalleeSaved |
+static const RegList kCalleeSaved =
+ 1 << 4 | // r4 v1
+ 1 << 5 | // r5 v2
+ 1 << 6 | // r6 v3
+ 1 << 7 | // r7 v4
1 << 8 | // r8 v5 (cp in JavaScript code)
+ kR9Available
+ << 9 | // r9 v6
1 << 10 | // r10 v7 (pp in JavaScript code)
1 << 11 ; // r11 v8 (fp in JavaScript code)
-static const int kNumCalleeSaved = kNumJSCalleeSaved + 3;
+static const int kNumCalleeSaved = 7 + kR9Available;
// ----------------------------------------------------
static const int kSavedRegistersOffset = 0 * kPointerSize;
// Let the parameters pointer for exit frames point just below the
- // frame structure on the stack (includes callee saved registers).
- static const int kPPDisplacement = (4 + kNumJSCalleeSaved) * kPointerSize;
-
- // The frame pointer for exit frames points to the JavaScript callee
- // saved registers. The caller fields are below those on the stack.
- static const int kCallerPPOffset = (0 + kNumJSCalleeSaved) * kPointerSize;
- static const int kCallerFPOffset = (1 + kNumJSCalleeSaved) * kPointerSize;
- static const int kCallerPCOffset = (3 + kNumJSCalleeSaved) * kPointerSize;
+ // frame structure on the stack.
+ static const int kPPDisplacement = 4 * kPointerSize;
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerPPOffset = +0 * kPointerSize;
+ static const int kCallerFPOffset = +1 * kPointerSize;
+ static const int kCallerPCOffset = +3 * kPointerSize;
};
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kArgsLengthOffset = -1 * kPointerSize;
+ // 0 * kPointerSize : StandardFrameConstants::kCallerPPOffset
+ // 1 * kPointersize : StandardFrameConstents::kCallerFPOffset
static const int kSPOnExitOffset = +2 * kPointerSize;
- static const int kSavedRegistersOffset = +5 * kPointerSize;
+ // 3 * kPointerSize : StandardFrameConstants::kCallerPCOffset
+ static const int kSavedRegistersOffset = +4 * kPointerSize;
// PP-relative.
static const int kParam0Offset = -2 * kPointerSize;
}
-inline Object** StackFrameIterator::register_buffer() const {
- static Object* buffer[kNumJSCalleeSaved];
- return buffer;
-}
-
-
// ----------------------------------------------------
// ----------- +=============+ <--- sp (stack pointer)
// | function |
// +-------------+
+ // +-------------+
// | |
// | expressions |
// | |
// m 2 | sp_on_exit | (pp if return, caller_sp if no return)
// e +-------------+
// 3 | caller_pc |
- // +-------------+
- // 4 | prolog_pc | (used to find list of callee-saved regs)
- // +-------------+
- // 5 | |
- // |callee-saved | (only saved if clobbered by this function,
- // | regs | must be traversed during GC)
- // | |
// +-------------+ <--- caller_sp (incl. parameters)
// | |
// | parameters |
// | parameters | (first 4 args are passed in r0-r3)
// | |
// +-------------+ <--- fp (frame pointer)
- // C 0 | r4 | r4-r7, r9 are potentially holding JS locals
- // +-------------+
- // 1 | r5 | and must be traversed by the GC for proper
- // e +-------------+
- // n 2 | r6 | relocation
- // t +-------------+
- // r 3 | r7 |
- // y +-------------+
- // [ 4 | r9 | ] only if r9 available
- // +-------------+
// f 4/5 | caller_fp |
// r +-------------+
// a 5/6 | sp_on_exit | (pp)
}
-void ExitFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
- // Do nothing.
-}
-
-
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
}
-RegList JavaScriptFrame::FindCalleeSavedRegisters() const {
- return 0;
-}
-
-
-void JavaScriptFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
- // Do nothing.
-}
-
-
Code* JavaScriptFrame::FindCode() const {
JSFunction* function = JSFunction::cast(this->function());
return function->shared()->code();
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-// Callee-saved registers available for variable allocation in JavaScript code
-static const RegList kJSCalleeSaved = 0;
-
-static const int kNumJSCalleeSaved = 0;
-
-
// ----------------------------------------------------
}
-Object** StackFrameIterator::register_buffer() const {
- ASSERT(kNumJSCalleeSaved == 0);
- return NULL;
-}
-
// ----------------------------------------------------
}
-inline Object** StackFrame::top_register_buffer() const {
- return iterator_->register_buffer();
-}
-
-
inline Object* StandardFrame::GetExpression(int index) const {
return Memory::Object_at(GetExpressionAddress(index));
}
#define INITIALIZE_SINGLETON(type, field) field##_(this),
StackFrameIterator::StackFrameIterator()
: STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread(Top::GetCurrentThread()) {
+ frame_(NULL), handler_(NULL), thread_(Top::GetCurrentThread()) {
Reset();
}
StackFrameIterator::StackFrameIterator(ThreadLocalTop* t)
: STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread(t) {
+ frame_(NULL), handler_(NULL), thread_(t) {
Reset();
}
#undef INITIALIZE_SINGLETON
StackFrame::State state;
StackFrame::Type type = frame_->GetCallerState(&state);
- // Restore any callee-saved registers to the register buffer. Avoid
- // the virtual call if the platform doesn't have any callee-saved
- // registers.
- if (kNumJSCalleeSaved > 0) {
- frame_->RestoreCalleeSavedRegisters(register_buffer());
- }
-
// Unwind handlers corresponding to the current frame.
StackHandlerIterator it(frame_, handler_);
while (!it.done()) it.Advance();
void StackFrameIterator::Reset() {
- Address fp = Top::c_entry_fp(thread);
+ Address fp = Top::c_entry_fp(thread_);
StackFrame::State state;
StackFrame::Type type = ExitFrame::GetStateForFramePointer(fp, &state);
frame_ = SingletonFor(type, &state);
- handler_ = StackHandler::FromAddress(Top::handler(thread));
- // Zap the register buffer in debug mode.
- if (kDebug) {
- Object** buffer = register_buffer();
- for (int i = 0; i < kNumJSCalleeSaved; i++) {
- buffer[i] = reinterpret_cast<Object*>(kZapValue);
- }
- }
-}
-
-
-Object** StackFrameIterator::RestoreCalleeSavedForTopHandler(Object** buffer) {
- ASSERT(kNumJSCalleeSaved > 0);
- // Traverse the frames until we find the frame containing the top
- // handler. Such a frame is guaranteed to always exists by the
- // callers of this function.
- for (StackFrameIterator it; true; it.Advance()) {
- StackHandlerIterator handlers(it.frame(), it.handler());
- if (!handlers.done()) {
- memcpy(buffer, it.register_buffer(), kNumJSCalleeSaved * kPointerSize);
- return buffer;
- }
- }
+ handler_ = StackHandler::FromAddress(Top::handler(thread_));
}
}
-RegList ExitFrame::FindCalleeSavedRegisters() const {
- // Exit frames save all - if any - callee-saved registers.
- return kJSCalleeSaved;
-}
-
-
Address StandardFrame::GetExpressionAddress(int n) const {
- ASSERT(0 <= n && n < ComputeExpressionsCount());
- if (kNumJSCalleeSaved > 0 && n < kNumJSCalleeSaved) {
- return reinterpret_cast<Address>(top_register_buffer() + n);
- } else {
- const int offset = StandardFrameConstants::kExpressionsOffset;
- return fp() + offset - (n - kNumJSCalleeSaved) * kPointerSize;
- }
+ const int offset = StandardFrameConstants::kExpressionsOffset;
+ return fp() + offset - n * kPointerSize;
}
Address limit = sp();
ASSERT(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
- return (base - limit) / kPointerSize + kNumJSCalleeSaved;
+ return (base - limit) / kPointerSize;
}
int JavaScriptFrame::ComputeParametersCount() const {
Address base = pp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
- int result = (base - limit) / kPointerSize;
- if (kNumJSCalleeSaved > 0) {
- return result - NumRegs(FindCalleeSavedRegisters());
- } else {
- return result;
- }
+ return (base - limit) / kPointerSize;
}
}
// Print the expression stack.
- int expressions_start = Max(stack_locals_count, kNumJSCalleeSaved);
+ int expressions_start = stack_locals_count;
if (expressions_start < expressions_count) {
accumulator->Add(" // expression stack (top to bottom)\n");
}
}
-int JSCalleeSavedCode(int n) {
- static int reg_code[kNumJSCalleeSaved + 1]; // avoid zero-size array error
- static bool initialized = false;
- if (!initialized) {
- initialized = true;
- int i = 0;
- for (int r = 0; r < kNumRegs; r++)
- if ((kJSCalleeSaved & (1 << r)) != 0)
- reg_code[i++] = r;
-
- ASSERT(i == kNumJSCalleeSaved);
- }
- ASSERT(0 <= n && n < kNumJSCalleeSaved);
- return reg_code[n];
-}
-
-
-RegList JSCalleeSavedList(int n) {
- // avoid zero-size array error
- static RegList reg_list[kNumJSCalleeSaved + 1];
- static bool initialized = false;
- if (!initialized) {
- initialized = true;
- reg_list[0] = 0;
- for (int i = 0; i < kNumJSCalleeSaved; i++)
- reg_list[i+1] = reg_list[i] + (1 << JSCalleeSavedCode(i));
- }
- ASSERT(0 <= n && n <= kNumJSCalleeSaved);
- return reg_list[n];
-}
-
-
} } // namespace v8::internal
// Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n);
-int JSCalleeSavedCode(int n);
-
-// Return the list of the first n callee-saved registers available to
-// JavaScript.
-RegList JSCalleeSavedList(int n);
// Forward declarations.
PrintMode mode,
int index);
- // Find callee-saved registers for this frame.
- virtual RegList FindCalleeSavedRegisters() const { return 0; }
-
- // Restore state of callee-saved registers to the provided buffer.
- virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const { }
-
// Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const;
- inline Object** top_register_buffer() const;
// Compute the stack frame type for the given state.
static Type ComputeType(State* state);
virtual Address GetCallerStackPointer() const;
- virtual RegList FindCalleeSavedRegisters() const;
- virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const;
-
private:
virtual Type GetCallerState(State* state) const;
virtual Address GetCallerStackPointer() const;
- // Find the callee-saved registers for this JavaScript frame. This
- // may require traversing the instruction stream and decoding
- // certain instructions.
- virtual RegList FindCalleeSavedRegisters() const;
-
- // Restore callee-saved registers.
- virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const;
-
private:
friend class StackFrameIterator;
};
// Go back to the first frame.
void Reset();
- // Computes the state of the callee-saved registers for the top
- // stack handler structure. Used for restoring register state when
- // unwinding due to thrown exceptions.
- static Object** RestoreCalleeSavedForTopHandler(Object** buffer);
-
private:
#define DECLARE_SINGLETON(ignore, type) type type##_;
STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
#undef DECLARE_SINGLETON
StackFrame* frame_;
StackHandler* handler_;
- ThreadLocalTop* thread;
+ ThreadLocalTop* thread_;
StackHandler* handler() const {
ASSERT(!done());
// Get the type-specific frame singleton in a given state.
StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
- // The register buffer contains the state of callee-saved registers
- // for the current frame. It is computed as the stack frame
- // iterators advances through stack frames.
- inline Object** register_buffer() const;
-
friend class StackFrame;
DISALLOW_EVIL_CONSTRUCTORS(StackFrameIterator);
};
const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte;
-// Bits used by the mark-compact collector, PLEASE READ.
-//
-// The first word of a heap object is a map pointer. The last two bits are
-// tagged as '01' (kHeapObjectTag). We reuse the last two bits to mark an
-// object as live and/or overflowed:
-// last bit = 0, marked as alive
-// second bit = 1, overflowed
-// An object is only marked as overflowed when it is marked as live while
-// the marking stack is overflowed.
-
-const int kMarkingBit = 0; // marking bit
-const int kMarkingMask = (1 << kMarkingBit); // marking mask
-const int kOverflowBit = 1; // overflow bit
-const int kOverflowMask = (1 << kOverflowBit); // overflow mask
-
// Zap-value: The value used for zapping dead objects. Should be a recognizable
// illegal heap object pointer.
// Miscellaneous
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
-// consecutive and that NEW_SPACE is the first.
+// consecutive.
enum AllocationSpace {
NEW_SPACE,
OLD_SPACE,
CODE_SPACE,
MAP_SPACE,
LO_SPACE,
+ FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE
};
const int kSpaceTagSize = 3;
}
+Object* Heap::AllocateForDeserialization(int size_in_bytes,
+ AllocationSpace space) {
+ ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ PagedSpace* where;
+
+ switch (space) {
+ case NEW_SPACE:
+ return new_space_->AllocateRaw(size_in_bytes);
+ case LO_SPACE:
+ return lo_space_->AllocateRaw(size_in_bytes);
+ case OLD_SPACE:
+ where = old_space_;
+ break;
+ case CODE_SPACE:
+ where = code_space_;
+ break;
+ case MAP_SPACE:
+ where = map_space_;
+ break;
+ }
+
+ // Only paged spaces fall through.
+ return where->AllocateForDeserialization(size_in_bytes);
+}
+
+
Object* Heap::NumberFromInt32(int32_t value) {
if (Smi::IsValid(value)) return Smi::FromInt(value);
// Bypass NumberFromDouble to avoid various redundant checks.
}
+AllocationSpace Heap::TargetSpace(HeapObject* object) {
+ // Heap numbers and sequential strings are promoted to code space, all
+ // other object types are promoted to old space. We do not use
+ // object->IsHeapNumber() and object->IsSeqString() because we already
+ // know that object has the heap object tag.
+ InstanceType type = object->map()->instance_type();
+ ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
+ bool has_pointers =
+ type != HEAP_NUMBER_TYPE &&
+ (type >= FIRST_NONSTRING_TYPE ||
+ String::cast(object)->representation_tag() != kSeqStringTag);
+ return has_pointers ? OLD_SPACE : CODE_SPACE;
+}
+
+
#define GC_GREEDY_CHECK() \
ASSERT(!FLAG_gc_greedy \
|| v8::internal::Heap::disallow_allocation_failure() \
int Heap::promoted_space_limit_ = 0;
int Heap::old_gen_exhausted_ = false;
+int Heap::amount_of_external_allocated_memory_ = 0;
+int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
+
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
int Heap::semispace_size_ = 1*MB;
int Heap::scavenge_count_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
-#ifdef DEBUG
-bool Heap::allocation_allowed_ = true;
int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0;
+#ifdef DEBUG
+bool Heap::allocation_allowed_ = true;
+
int Heap::allocation_timeout_ = 0;
bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG
}
// Is enough data promoted to justify a global GC?
- if (PromotedSpaceSize() > promoted_space_limit_) {
+ if (PromotedSpaceSize() + PromotedExternalMemorySize()
+ > promoted_space_limit_) {
Counters::gc_compactor_caused_by_promoted_data.Increment();
return MARK_COMPACTOR;
}
void Heap::GarbageCollectionPrologue() {
RegExpImpl::NewSpaceCollectionPrologue();
+ gc_count_++;
#ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
allow_allocation(false);
- gc_count_++;
if (FLAG_verify_heap) {
Verify();
}
-// GCTracer collects and prints ONE line after each garbage collector
-// invocation IFF --trace_gc is used.
-
-class GCTracer BASE_EMBEDDED {
- public:
- GCTracer() : start_time_(0.0), start_size_(0.0) {
- if (!FLAG_trace_gc) return;
- start_time_ = OS::TimeCurrentMillis();
- start_size_ = SizeOfHeapObjects();
- }
-
- ~GCTracer() {
- if (!FLAG_trace_gc) return;
- // Printf ONE line iff flag is set.
- PrintF("%s %.1f -> %.1f MB, %d ms.\n",
- CollectorString(),
- start_size_, SizeOfHeapObjects(),
- static_cast<int>(OS::TimeCurrentMillis() - start_time_));
- }
-
- // Sets the collector.
- void set_collector(GarbageCollector collector) {
- collector_ = collector;
- }
-
- private:
-
- // Returns a string matching the collector.
- const char* CollectorString() {
- switch (collector_) {
- case SCAVENGER:
- return "Scavenge";
- case MARK_COMPACTOR:
- return MarkCompactCollector::HasCompacted() ? "Mark-compact"
- : "Mark-sweep";
- }
- return "Unknown GC";
- }
-
- // Returns size of object in heap (in MB).
- double SizeOfHeapObjects() {
- return (static_cast<double>(Heap::SizeOfObjects())) / MB;
- }
-
- double start_time_; // Timestamp set in the constructor.
- double start_size_; // Size of objects in heap set in constructor.
- GarbageCollector collector_; // Type of collector.
-};
-
-
-
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
{ GCTracer tracer;
GarbageCollectionPrologue();
+ // The GC count was incremented in the prologue. Tell the tracer about
+ // it.
+ tracer.set_gc_count(gc_count_);
GarbageCollector collector = SelectGarbageCollector(space);
+ // Tell the tracer which collector we've selected.
tracer.set_collector(collector);
StatsRate* rate = (collector == SCAVENGER)
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
- PerformGarbageCollection(space, collector);
+ PerformGarbageCollection(space, collector, &tracer);
rate->Stop();
GarbageCollectionEpilogue();
}
+void Heap::PerformScavenge() {
+ GCTracer tracer;
+ PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
+}
+
+
void Heap::PerformGarbageCollection(AllocationSpace space,
- GarbageCollector collector) {
+ GarbageCollector collector,
+ GCTracer* tracer) {
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
global_gc_prologue_callback_();
}
if (collector == MARK_COMPACTOR) {
- MarkCompact();
+ MarkCompact(tracer);
int promoted_space_size = PromotedSpaceSize();
promoted_space_limit_ =
// Process weak handles post gc.
GlobalHandles::PostGarbageCollectionProcessing();
+ if (collector == MARK_COMPACTOR) {
+ // Register the amount of external allocated memory.
+ amount_of_external_allocated_memory_at_last_global_gc_ =
+ amount_of_external_allocated_memory_;
+ }
+
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_);
global_gc_epilogue_callback_();
}
-void Heap::MarkCompact() {
+void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
-#ifdef DEBUG
mc_count_++;
-#endif
+ tracer->set_full_gc_count(mc_count_);
LOG(ResourceEvent("markcompact", "begin"));
MarkCompactPrologue();
- MarkCompactCollector::CollectGarbage();
+ MarkCompactCollector::CollectGarbage(tracer);
MarkCompactEpilogue();
if (obj->IsFailure()) {
obj = lo_space_->FindObject(a);
}
+ ASSERT(!obj->IsFailure());
return obj;
}
*dst++ = *src++;
} while (counter-- > 0);
- // Set forwarding pointers, cannot use Map::cast because it asserts
- // the value type to be Map.
- (*source_p)->set_map(reinterpret_cast<Map*>(target));
+ // Set the forwarding address.
+ (*source_p)->set_map_word(MapWord::FromForwardingAddress(target));
// Update NewSpace stats if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
HeapObject* object = *p;
- // We use the first word (where the map pointer usually is) of a
- // HeapObject to record the forwarding pointer. A forwarding pointer can
+ // We use the first word (where the map pointer usually is) of a heap
+ // object to record the forwarding pointer. A forwarding pointer can
// point to the old space, the code space, or the to space of the new
// generation.
- HeapObject* first_word = object->map();
+ MapWord first_word = object->map_word();
- // If the first word (where the map pointer is) is not a map pointer, the
- // object has already been copied. We do not use first_word->IsMap()
- // because we know that first_word always has the heap object tag.
- if (first_word->map()->instance_type() != MAP_TYPE) {
- *p = first_word;
+ // If the first word is a forwarding address, the object has already been
+ // copied.
+ if (first_word.IsForwardingAddress()) {
+ *p = first_word.ToForwardingAddress();
return;
}
// Optimization: Bypass ConsString objects where the right-hand side is
// Heap::empty_string(). We do not use object->IsConsString because we
// already know that object has the heap object tag.
- InstanceType type = Map::cast(first_word)->instance_type();
+ InstanceType type = first_word.ToMap()->instance_type();
if (type < FIRST_NONSTRING_TYPE &&
String::cast(object)->representation_tag() == kConsStringTag &&
ConsString::cast(object)->second() == Heap::empty_string()) {
// After patching *p we have to repeat the checks that object is in the
// active semispace of the young generation and not already copied.
if (!InFromSpace(object)) return;
- first_word = object->map();
- if (first_word->map()->instance_type() != MAP_TYPE) {
- *p = first_word;
+ first_word = object->map_word();
+ if (first_word.IsForwardingAddress()) {
+ *p = first_word.ToForwardingAddress();
return;
}
- type = Map::cast(first_word)->instance_type();
+ type = first_word.ToMap()->instance_type();
}
- int object_size = object->SizeFromMap(Map::cast(first_word));
+ int object_size = object->SizeFromMap(first_word.ToMap());
Object* result;
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
- // Heap numbers and sequential strings are promoted to code space, all
- // other object types are promoted to old space. We do not use
- // object->IsHeapNumber() and object->IsSeqString() because we already
- // know that object has the heap object tag.
- bool has_pointers =
- type != HEAP_NUMBER_TYPE &&
- (type >= FIRST_NONSTRING_TYPE ||
- String::cast(object)->representation_tag() != kSeqStringTag);
- if (has_pointers) {
+ AllocationSpace target_space = Heap::TargetSpace(object);
+ if (target_space == OLD_SPACE) {
result = old_space_->AllocateRaw(object_size);
} else {
+ ASSERT(target_space == CODE_SPACE);
result = code_space_->AllocateRaw(object_size);
}
if (!result->IsFailure()) {
*p = MigrateObject(p, HeapObject::cast(result), object_size);
- if (has_pointers) {
+ if (target_space == OLD_SPACE) {
// Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger.
promoted_top -= kPointerSize;
}
+bool Heap::ConfigureHeapDefault() {
+ return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
+}
+
+
int Heap::PromotedSpaceSize() {
return old_space_->Size()
+ code_space_->Size()
}
+int Heap::PromotedExternalMemorySize() {
+ if (amount_of_external_allocated_memory_
+ <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
+ return amount_of_external_allocated_memory_
+ - amount_of_external_allocated_memory_at_last_global_gc_;
+}
+
+
bool Heap::Setup(bool create_heap_objects) {
// Initialize heap spaces and initial maps and objects. Whenever something
// goes wrong, just return false. The caller should check the results and
// size) and old-space-size if set or the initial values of semispace_size_
// and old_generation_size_ otherwise.
if (!heap_configured) {
- if (!ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size)) return false;
+ if (!ConfigureHeapDefault()) return false;
}
// Setup memory allocator and allocate an initial chunk of memory. The
int old_space_size = new_space_start - old_space_start;
int code_space_size = young_generation_size_ - old_space_size;
- // Initialize new space.
- new_space_ = new NewSpace(initial_semispace_size_, semispace_size_);
+ // Initialize new space. It will not contain code.
+ new_space_ = new NewSpace(initial_semispace_size_,
+ semispace_size_,
+ NEW_SPACE,
+ false);
if (new_space_ == NULL) return false;
if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
// Initialize old space, set the maximum capacity to the old generation
- // size.
- old_space_ = new OldSpace(old_generation_size_, OLD_SPACE);
+ // size. It will not contain code.
+ old_space_ = new OldSpace(old_generation_size_, OLD_SPACE, false);
if (old_space_ == NULL) return false;
if (!old_space_->Setup(old_space_start, old_space_size)) return false;
// Initialize the code space, set its maximum capacity to the old
- // generation size.
- code_space_ = new OldSpace(old_generation_size_, CODE_SPACE);
+ // generation size. It needs executable memory.
+ code_space_ = new OldSpace(old_generation_size_, CODE_SPACE, true);
if (code_space_ == NULL) return false;
if (!code_space_->Setup(code_space_start, code_space_size)) return false;
// Initialize map space.
- map_space_ = new MapSpace(kMaxMapSpaceSize);
+ map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
if (map_space_ == NULL) return false;
// Setting up a paged space without giving it a virtual memory range big
// enough to hold at least a page will cause it to allocate.
if (!map_space_->Setup(NULL, 0)) return false;
- lo_space_ = new LargeObjectSpace();
+ // The large object space may contain code, so it needs executable memory.
+ lo_space_ = new LargeObjectSpace(LO_SPACE, true);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
#endif
+SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
+}
+
+
+SpaceIterator::~SpaceIterator() {
+ // Delete active iterator if any.
+ delete iterator_;
+}
+
+
+bool SpaceIterator::has_next() {
+ // Iterate until no more spaces.
+ return current_space_ != LAST_SPACE;
+}
+
+
+ObjectIterator* SpaceIterator::next() {
+ if (iterator_ != NULL) {
+ delete iterator_;
+ iterator_ = NULL;
+ // Move to the next space
+ current_space_++;
+ if (current_space_ > LAST_SPACE) {
+ return NULL;
+ }
+ }
+
+ // Return iterator for the new current space.
+ return CreateIterator();
+}
+
+
+// Create an iterator for the space to iterate.
+ObjectIterator* SpaceIterator::CreateIterator() {
+ ASSERT(iterator_ == NULL);
+
+ switch (current_space_) {
+ case NEW_SPACE:
+ iterator_ = new SemiSpaceIterator(Heap::new_space());
+ break;
+ case OLD_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::old_space());
+ break;
+ case CODE_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::code_space());
+ break;
+ case MAP_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::map_space());
+ break;
+ case LO_SPACE:
+ iterator_ = new LargeObjectIterator(Heap::lo_space());
+ break;
+ }
+
+ // Return the newly allocated iterator;
+ ASSERT(iterator_ != NULL);
+ return iterator_;
+}
+
+
HeapIterator::HeapIterator() {
Init();
}
#endif
+GCTracer::GCTracer()
+ : start_time_(0.0),
+ start_size_(0.0),
+ gc_count_(0),
+ full_gc_count_(0),
+ is_compacting_(false),
+ marked_count_(0) {
+ // These two fields reflect the state of the previous full collection.
+ // Set them before they are changed by the collector.
+ previous_has_compacted_ = MarkCompactCollector::HasCompacted();
+ previous_marked_count_ = MarkCompactCollector::previous_marked_count();
+ if (!FLAG_trace_gc) return;
+ start_time_ = OS::TimeCurrentMillis();
+ start_size_ = SizeOfHeapObjects();
+}
+
+
+GCTracer::~GCTracer() {
+ if (!FLAG_trace_gc) return;
+ // Printf ONE line iff flag is set.
+ PrintF("%s %.1f -> %.1f MB, %d ms.\n",
+ CollectorString(),
+ start_size_, SizeOfHeapObjects(),
+ static_cast<int>(OS::TimeCurrentMillis() - start_time_));
+}
+
+
+const char* GCTracer::CollectorString() {
+ switch (collector_) {
+ case SCAVENGER:
+ return "Scavenge";
+ case MARK_COMPACTOR:
+ return MarkCompactCollector::HasCompacted() ? "Mark-compact"
+ : "Mark-sweep";
+ }
+ return "Unknown GC";
+}
+
+
} } // namespace v8::internal
V(zero_symbol, "0")
+// Forward declaration of the GCTracer class.
+class GCTracer;
+
+
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
// Configure heap size before setup. Return false if the heap has been
// setup already.
static bool ConfigureHeap(int semispace_size, int old_gen_size);
+ static bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
// also creates the basic non-mutable objects.
// Please note this function does not perform a garbage collection.
static inline Object* AllocateRaw(int size_in_bytes, AllocationSpace space);
+
+ // Allocate an unitialized object during deserialization. Performs linear
+ // allocation (ie, guaranteed no free list allocation) and assumes the
+ // spaces are all preexpanded so allocation should not fail.
+ static inline Object* AllocateForDeserialization(int size_in_bytes,
+ AllocationSpace space);
+
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
- static void PerformScavenge() {
- PerformGarbageCollection(NEW_SPACE, SCAVENGER);
- }
+ static void PerformScavenge();
static void SetGlobalGCPrologueCallback(GCCallback callback) {
global_gc_prologue_callback_ = callback;
static bool InSpace(Address addr, AllocationSpace space);
static bool InSpace(HeapObject* value, AllocationSpace space);
+ // Finds out which space an object should get promoted to based on its type.
+ static inline AllocationSpace TargetSpace(HeapObject* object);
+
// Sets the stub_cache_ (only used when expanding the dictionary).
static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
// Write barrier support for address[offset] = o.
inline static void RecordWrite(Address address, int offset);
- // Given an address in the heap, returns a pointer to the object which
- // body contains the address. Returns Failure::Exception() if the
- // operation fails.
+ // Given an address occupied by a live code object, return that object.
static Object* FindCodeObject(Address a);
// Invoke Shrink on shrinkable spaces.
static void TracePathToGlobal();
#endif
- // Helper for Serialization/Deserialization that restricts memory allocation
- // to the predictable LINEAR_ONLY policy
- static void SetLinearAllocationOnly(bool linear_only) {
- old_space_->SetLinearAllocationOnly(linear_only);
- code_space_->SetLinearAllocationOnly(linear_only);
- map_space_->SetLinearAllocationOnly(linear_only);
- }
-
// Callback function pased to Heap::Iterate etc. Copies an object if
// necessary, the object might be promoted to an old space. The caller must
// ensure the precondition that the object is (a) a heap object and (b) in
// Entries in the cache. Must be a power of 2.
static const int kNumberStringCacheSize = 64;
+ // Adjusts the amount of registered external memory.
+ // Returns the adjusted value.
+ static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+ int amount = amount_of_external_allocated_memory_ + change_in_bytes;
+ if (change_in_bytes >= 0) {
+ // Avoid overflow.
+ if (amount > amount_of_external_allocated_memory_) {
+ amount_of_external_allocated_memory_ = amount;
+ }
+ } else {
+ // Avoid underflow.
+ if (amount >= 0) {
+ amount_of_external_allocated_memory_ = amount;
+ }
+ }
+ ASSERT(amount_of_external_allocated_memory_ >= 0);
+ return amount_of_external_allocated_memory_;
+ }
+
private:
static int semispace_size_;
static int initial_semispace_size_;
// Returns the size of object residing in non new spaces.
static int PromotedSpaceSize();
-#ifdef DEBUG
- static bool allocation_allowed_;
+ // Returns the amount of external memory registered since last global gc.
+ static int PromotedExternalMemorySize();
+
static int mc_count_; // how many mark-compact collections happened
static int gc_count_; // how many gc happened
+#ifdef DEBUG
+ static bool allocation_allowed_;
+
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
// Promotion limit that trigger a global GC
static int promoted_space_limit_;
+ // The amount of external memory registered through the API kept alive
+ // by global handles
+ static int amount_of_external_allocated_memory_;
+
+ // Caches the amount of external memory registered at the last global gc.
+ static int amount_of_external_allocated_memory_at_last_global_gc_;
+
// Indicates that an allocation has failed in the old generation since the
// last GC.
static int old_gen_exhausted_;
// Performs garbage collection
static void PerformGarbageCollection(AllocationSpace space,
- GarbageCollector collector);
+ GarbageCollector collector,
+ GCTracer* tracer);
// Returns either a Smi or a Number object from 'value'. If 'new_object'
// is false, it may return a preallocated immutable object.
static void Scavenge();
// Performs a major collection in the whole heap.
- static void MarkCompact();
+ static void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
static void MarkCompactPrologue();
#endif
+// Space iterator for iterating over all spaces of the heap.
+// For each space an object iterator is provided. The deallocation of the
+// returned object iterators is handled by the space iterator.
+
+class SpaceIterator : public Malloced {
+ public:
+ SpaceIterator();
+ virtual ~SpaceIterator();
+
+ bool has_next();
+ ObjectIterator* next();
+
+ private:
+ ObjectIterator* CreateIterator();
+
+ int current_space_; // from enum AllocationSpace.
+ ObjectIterator* iterator_; // object iterator for the current space.
+};
+
+
// A HeapIterator provides iteration over the whole heap It aggregates a the
// specific iterators for the different spaces as these can only iterate over
// one space only.
};
-// ----------------------------------------------------------------------------
-// Functions and constants used for marking live objects.
-//
-
-// Many operations (eg, Object::Size()) are based on an object's map. When
-// objects are marked as live or overflowed, their map pointer is changed.
-// Use clear_mark_bit and/or clear_overflow_bit to recover the original map
-// word.
-static inline intptr_t clear_mark_bit(intptr_t map_word) {
- return map_word | kMarkingMask;
-}
-
-
-static inline intptr_t clear_overflow_bit(intptr_t map_word) {
- return map_word & ~kOverflowMask;
-}
-
-
-// True if the object is marked live.
-static inline bool is_marked(HeapObject* obj) {
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- return (map_word & kMarkingMask) == 0;
-}
-
-
-// Mutate an object's map pointer to indicate that the object is live.
-static inline void set_mark(HeapObject* obj) {
- ASSERT(!is_marked(obj));
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- obj->set_map(reinterpret_cast<Map*>(map_word & ~kMarkingMask));
-}
-
-
-// Mutate an object's map pointer to remove the indication that the object
-// is live, ie, (partially) restore the map pointer.
-static inline void clear_mark(HeapObject* obj) {
- ASSERT(is_marked(obj));
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- obj->set_map(reinterpret_cast<Map*>(clear_mark_bit(map_word)));
-}
-
-
-// True if the object is marked overflowed.
-static inline bool is_overflowed(HeapObject* obj) {
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- return (map_word & kOverflowMask) != 0;
-}
-
-
-// Mutate an object's map pointer to indicate that the object is overflowed.
-// Overflowed objects have been reached during marking of the heap but not
-// pushed on the marking stack (and thus their children have not necessarily
-// been marked).
-static inline void set_overflow(HeapObject* obj) {
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- obj->set_map(reinterpret_cast<Map*>(map_word | kOverflowMask));
-}
-
-
-// Mutate an object's map pointer to remove the indication that the object
-// is overflowed, ie, (partially) restore the map pointer.
-static inline void clear_overflow(HeapObject* obj) {
- ASSERT(is_overflowed(obj));
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- obj->set_map(reinterpret_cast<Map*>(clear_overflow_bit(map_word)));
-}
-
-
// A helper class to document/test C++ scopes where we do not
// expect a GC. Usage:
//
};
#endif
+// GCTracer collects and prints ONE line after each garbage collector
+// invocation IFF --trace_gc is used.
+
+class GCTracer BASE_EMBEDDED {
+ public:
+ GCTracer();
+
+ ~GCTracer();
+
+ // Sets the collector.
+ void set_collector(GarbageCollector collector) { collector_ = collector; }
+
+ // Sets the GC count.
+ void set_gc_count(int count) { gc_count_ = count; }
+
+ // Sets the full GC count.
+ void set_full_gc_count(int count) { full_gc_count_ = count; }
+
+ // Sets the flag that this is a compacting full GC.
+ void set_is_compacting() { is_compacting_ = true; }
+
+ // Increment and decrement the count of marked objects.
+ void increment_marked_count() { ++marked_count_; }
+ void decrement_marked_count() { --marked_count_; }
+
+ int marked_count() { return marked_count_; }
+
+ private:
+ // Returns a string matching the collector.
+ const char* CollectorString();
+
+ // Returns size of object in heap (in MB).
+ double SizeOfHeapObjects() {
+ return (static_cast<double>(Heap::SizeOfObjects())) / MB;
+ }
+
+ double start_time_; // Timestamp set in the constructor.
+ double start_size_; // Size of objects in heap set in constructor.
+ GarbageCollector collector_; // Type of collector.
+
+ // A count (including this one, eg, the first collection is 1) of the
+ // number of garbage collections.
+ int gc_count_;
+
+ // A count (including this one) of the number of full garbage collections.
+ int full_gc_count_;
+
+ // True if the current GC is a compacting full collection, false
+ // otherwise.
+ bool is_compacting_;
+
+ // True if the *previous* full GC cwas a compacting collection (will be
+ // false if there has not been a previous full GC).
+ bool previous_has_compacted_;
+
+ // On a full GC, a count of the number of marked objects. Incremented
+ // when an object is marked and decremented when an object's mark bit is
+ // cleared. Will be zero on a scavenge collection.
+ int marked_count_;
+
+ // The count from the end of the previous full GC. Will be zero if there
+ // was no previous full GC.
+ int previous_marked_count_;
+};
+
+
} } // namespace v8::internal
#endif // V8_HEAP_H_
__ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r1, MemOperand(ip, 0 * kPointerSize));
- __ EnterJSFrame(0, 0);
+ __ EnterJSFrame(0);
// Push the receiver and the name of the function.
__ ldr(r0, MemOperand(pp, 0));
__ ldr(r0, MemOperand(v8::internal::fp, // fp is shadowed by IC::fp
JavaScriptFrameConstants::kArgsLengthOffset));
- __ ExitJSFrame(DO_NOT_RETURN, 0);
+ __ ExitJSFrame(DO_NOT_RETURN);
// Patch the function on the stack; 1 ~ receiver.
__ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
IC::State IC::StateFrom(Code* target, Object* receiver) {
- IC::State state = target->state();
+ IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
- if (target->state() == DEBUG_BREAK) return;
+ if (target->ic_state() == DEBUG_BREAK) return;
switch (target->kind()) {
case Code::LOAD_IC: return LoadIC::Clear(address, target);
void CallIC::Clear(Address address, Code* target) {
- if (target->state() == UNINITIALIZED) return;
+ if (target->ic_state() == UNINITIALIZED) return;
Code* code = StubCache::FindCallInitialize(target->arguments_count());
SetTargetAtAddress(address, code);
}
void KeyedLoadIC::Clear(Address address, Code* target) {
- if (target->state() == UNINITIALIZED) return;
+ if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void LoadIC::Clear(Address address, Code* target) {
- if (target->state() == UNINITIALIZED) return;
+ if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void StoreIC::Clear(Address address, Code* target) {
- if (target->state() == UNINITIALIZED) return;
+ if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void KeyedStoreIC::Clear(Address address, Code* target) {
- if (target->state() == UNINITIALIZED) return;
+ if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
DEFINE_bool(log_api, false, "Log API events to the log file.");
DEFINE_bool(log_code, false,
"Log code events to the log file without profiling.");
+DEFINE_bool(log_debugger, false, "Log debugger internal messages.");
DEFINE_bool(log_gc, false,
"Log heap samples on garbage collection for the hp2ps tool.");
-DEFINE_bool(log_suspect, false, "Log suspect operations.");
DEFINE_bool(log_handles, false, "Log global handle events.");
DEFINE_bool(log_state_changes, false, "Log state changes.");
+DEFINE_bool(log_suspect, false, "Log suspect operations.");
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).");
DEFINE_bool(sliding_state_window, false,
}
-//
-// Synchronize class used for ensuring block structured
-// locking for the Logger::*Event functions.
-//
-
-class Synchronize {
- public:
- explicit Synchronize(Mutex* mutex) {
- mutex_ = mutex;
- mutex_->Lock();
- }
- ~Synchronize() {
- mutex_->Unlock();
- }
- private:
- // Mutex used for enforcing block structured access.
- Mutex* mutex_;
-};
-
-
//
// Logger class implementation.
//
void Logger::Preamble(const char* content) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "%s", content);
#endif
}
void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "%s,\"%s\"\n", name, value);
#endif
}
void Logger::IntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "%s,%d\n", name, value);
#endif
}
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_handles) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "%s,0x%x\n", name,
reinterpret_cast<unsigned int>(location));
#endif
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
ASSERT(logfile_ != NULL && FLAG_log_api);
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
va_list ap;
va_start(ap, format);
vfprintf(logfile_, format, ap);
unsigned end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_prof) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
start, end);
#endif
unsigned end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_prof) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
start, end);
#endif
void Logger::NewEvent(const char* name, void* object, size_t size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "new,%s,0x%x,%u\n", name,
reinterpret_cast<unsigned int>(object),
static_cast<unsigned int>(size));
void Logger::DeleteEvent(const char* name, void* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "delete,%s,0x%x\n", name,
reinterpret_cast<unsigned int>(object));
#endif
void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "code-creation,%s,0x%x,%d,\"", tag,
reinterpret_cast<unsigned int>(code->address()),
void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
fprintf(logfile_, "code-creation,%s,0x%x,%d,\"%s\"\n", tag,
void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "code-creation,%s,0x%x,%d,\"args_count: %d\"\n", tag,
reinterpret_cast<unsigned int>(code->address()),
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "code-move,0x%x,0x%x\n",
reinterpret_cast<unsigned int>(from),
reinterpret_cast<unsigned int>(to));
void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "code-delete,0x%x\n", reinterpret_cast<unsigned int>(from));
#endif
}
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "%s,%s,", name, tag);
uint32_t sec, usec;
void Logger::SuspectReadEvent(String* name, String* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_suspect) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "suspect-read,");
obj->PrintOn(logfile_);
fprintf(logfile_, ",\"");
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_gc) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
#endif
}
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_gc) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "heap-sample-end,\"%s\",\"%s\"\n", space, kind);
#endif
}
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_gc) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "heap-sample-item,%s,%d,%d\n", type, number, bytes);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (logfile_ == NULL) return;
- Synchronize s(mutex_);
+ ScopedLock sl(mutex_);
fprintf(logfile_, "tick,0x%x,0x%x,%d", sample->pc, sample->sp,
static_cast<int>(sample->state));
if (overflow) fprintf(logfile_, ",overflow");
// Each of the individual log flags implies --log. Check after
// checking --log-all and --prof in case they set --log-code.
- if (FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
+ if (FLAG_log_api || FLAG_log_code || FLAG_log_debugger || FLAG_log_gc ||
FLAG_log_handles || FLAG_log_suspect) {
FLAG_log = true;
}
// Log code (create, move, and delete) events to the logfile, default is off.
// --log-code implies --log.
//
+// --log-debugger
+// Log the internal activity of the debugger, to aid in debugging the debugger.
+//
// --log-gc
// Log GC heap samples after each GC that can be processed by hp2ps, default
// is off. --log-gc implies --log.
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
- generating_stub_(false) {
+ generating_stub_(false),
+ allow_stub_calls_(true) {
}
Label fast, done;
- // First, test that the start address is not in the new space. We cannot
- // set remembered set bits in the new space.
+ // First, test that the object is not in the new space. We cannot set
+ // remembered set bits in the new space.
+ // object: heap object pointer (with tag)
+ // offset: offset to store location from the object
and_(scratch, object, Operand(Heap::NewSpaceMask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
b(eq, &done);
- mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
// Compute the bit offset in the remembered set.
- and_(scratch, object, Operand(ip));
- add(offset, scratch, Operand(offset));
+ // object: heap object pointer (with tag)
+ // offset: offset to store location from the object
+ mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
+ and_(scratch, object, Operand(ip)); // offset into page of the object
+ add(offset, scratch, Operand(offset)); // add offset into the object
mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
// Compute the page address from the heap object pointer.
+ // object: heap object pointer (with tag)
+ // offset: bit offset of store position in the remembered set
bic(object, object, Operand(ip));
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
+ // object: page start
+ // offset: bit offset of store position in the remembered set
cmp(offset, Operand(Page::kPageSize / kPointerSize));
b(lt, &fast);
add(object, object, Operand(scratch));
bind(&fast);
- // Now object is the address of the start of the remembered set and offset
- // is the bit offset from that start.
// Get address of the rset word.
- add(object, object, Operand(offset, LSR, kRSetWordShift));
- // Get bit offset in the word.
+ // object: start of the remembered set (page start for the fast case)
+ // offset: bit offset of store position in the remembered set
+ bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
+ add(object, object, Operand(scratch, LSR, kRSetWordShift));
+ // Get bit offset in the rset word.
+ // object: address of remembered set word
+ // offset: bit offset of store position
and_(offset, offset, Operand(kBitsPerInt - 1));
ldr(scratch, MemOperand(object));
}
-void MacroAssembler::EnterJSFrame(int argc, RegList callee_saved) {
+void MacroAssembler::EnterJSFrame(int argc) {
// Generate code entering a JS function called from a JS function
// stack: receiver, arguments
// r0: number of arguments (not including function, nor receiver)
mov(r3, Operand(r0)); // args_len to be saved
mov(r2, Operand(cp)); // context to be saved
- // Make sure there are no instructions between both stm instructions, because
- // the callee_saved list is obtained during stack unwinding by decoding the
- // first stmdb instruction, which is found (or not) at a constant offset from
- // the pc saved by the second stmdb instruction.
- if (callee_saved != 0) {
- stm(db_w, sp, callee_saved);
- }
-
// push in reverse order: context (r2), args_len (r3), caller_pp, caller_fp,
- // sp_on_exit (ip == pp, may be patched on exit), return address, prolog_pc
+ // sp_on_exit (ip == pp, may be patched on exit), return address
stm(db_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit() |
- ip.bit() | lr.bit() | pc.bit());
+ ip.bit() | lr.bit());
// Setup new frame pointer.
add(fp, sp, Operand(-StandardFrameConstants::kContextOffset));
}
-void MacroAssembler::ExitJSFrame(ExitJSFlag flag, RegList callee_saved) {
+void MacroAssembler::ExitJSFrame(ExitJSFlag flag) {
// r0: result
// sp: stack pointer
// fp: frame pointer
// pp: parameter pointer
- if (callee_saved != 0 || flag == DO_NOT_RETURN) {
+ if (flag == DO_NOT_RETURN) {
add(r3, fp, Operand(JavaScriptFrameConstants::kSavedRegistersOffset));
}
- if (callee_saved != 0) {
- ldm(ia_w, r3, callee_saved);
- }
-
if (flag == DO_NOT_RETURN) {
// restore sp as caller_sp (not as pp)
str(r3, MemOperand(fp, JavaScriptFrameConstants::kSPOnExitOffset));
void MacroAssembler::CallStub(CodeStub* stub) {
- ASSERT(!generating_stub()); // stub calls are not allowed in stubs
+ ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), code_target);
}
void MacroAssembler::CallJSExitStub(CodeStub* stub) {
- ASSERT(!generating_stub()); // stub calls are not allowed in stubs
+ ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), exit_js_frame);
}
mov(r0, Operand(num_arguments - 1));
} else {
ASSERT(f->nargs == num_arguments);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+
+ // The number of arguments is fixed for this call.
+ // Set r0 correspondingly.
+ push(r0);
+ mov(r0, Operand(f->nargs - 1)); // receiver does not count as an argument
}
RuntimeStub stub((Runtime::FunctionId) f->stub_id);
void MacroAssembler::TailCallRuntime(Runtime::Function* f) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- if (f->nargs >= 0) {
- // The number of arguments is fixed for this call.
- // Set r0 correspondingly.
- push(r0);
- mov(r0, Operand(f->nargs - 1)); // receiver does not count as an argument
- }
JumpToBuiltin(ExternalReference(f)); // tail call to runtime routine
}
// ---------------------------------------------------------------------------
// Activation frames
- void EnterJSFrame(int argc, RegList callee_saved);
- void ExitJSFrame(ExitJSFlag flag, RegList callee_saved);
+ void EnterJSFrame(int argc);
+ void ExitJSFrame(ExitJSFlag flag);
// Support functions.
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
+ void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+ bool allow_stub_calls() { return allow_stub_calls_; }
private:
List<Unresolved> unresolved_;
bool generating_stub_;
+ bool allow_stub_calls_;
};
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
- generating_stub_(false) {
+ generating_stub_(false),
+ allow_stub_calls_(true) {
}
void MacroAssembler::CallStub(CodeStub* stub) {
- ASSERT(!generating_stub()); // calls are not allowed in stubs
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
call(stub->GetCode(), code_target);
}
bool resolved;
Handle<Code> code = ResolveBuiltin(id, &resolved);
- // Calls are not allowed in stubs.
- ASSERT(flag == JUMP_FUNCTION || !generating_stub());
+ // Calls are not allowed in some stubs.
+ ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
+ void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+ bool allow_stub_calls() { return allow_stub_calls_; }
private:
List<Unresolved> unresolved_;
bool generating_stub_;
+ bool allow_stub_calls_;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
}
+// Generate an Operand for loading an indexed field from an object.
+static inline Operand FieldOperand(Register object,
+ Register index,
+ ScaleFactor scale,
+ int offset) {
+ return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_IA32_H_
bool MarkCompactCollector::compacting_collection_ = false;
+int MarkCompactCollector::previous_marked_count_ = 0;
+GCTracer* MarkCompactCollector::tracer_ = NULL;
+
+
#ifdef DEBUG
MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
int MarkCompactCollector::live_lo_objects_ = 0;
#endif
-void MarkCompactCollector::CollectGarbage() {
+void MarkCompactCollector::CollectGarbage(GCTracer* tracer) {
+ // Rather than passing the tracer around we stash it in a static member
+ // variable.
+ tracer_ = tracer;
Prepare();
+ // Prepare has selected whether to compact the old generation or not.
+ // Tell the tracer.
+ if (IsCompacting()) tracer_->set_is_compacting();
MarkLiveObjects();
}
Finish();
+
+ // Save the count of marked objects remaining after the collection and
+ // null out the GC tracer.
+ previous_marked_count_ = tracer_->marked_count();
+ ASSERT(previous_marked_count_ == 0);
+ tracer_ = NULL;
}
}
-// ---------------------------------------------------------------------------
-// Forwarding pointers and map pointer encoding
-// | 11 bits | offset to the live object in the page
-// | 11 bits | offset in a map page
-// | 10 bits | map table index
-
-static const int kMapPageIndexBits = 10;
-static const int kMapPageOffsetBits = 11;
-static const int kForwardingOffsetBits = 11;
-static const int kAlignmentBits = 1;
-
-static const int kMapPageIndexShift = 0;
-static const int kMapPageOffsetShift =
- kMapPageIndexShift + kMapPageIndexBits;
-static const int kForwardingOffsetShift =
- kMapPageOffsetShift + kMapPageOffsetBits;
-
-// 0x000003FF
-static const uint32_t kMapPageIndexMask =
- (1 << kMapPageOffsetShift) - 1;
-
-// 0x001FFC00
-static const uint32_t kMapPageOffsetMask =
- ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
-
-// 0xFFE00000
-static const uint32_t kForwardingOffsetMask =
- ~(kMapPageIndexMask | kMapPageOffsetMask);
-
-
-static uint32_t EncodePointers(Address map_addr, int offset) {
- // Offset is the distance to the first alive object in the same
- // page. The offset between two objects in the same page should not
- // exceed the object area size of a page.
- ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
-
- int compact_offset = offset >> kObjectAlignmentBits;
- ASSERT(compact_offset < (1 << kForwardingOffsetBits));
-
- Page* map_page = Page::FromAddress(map_addr);
- int map_page_index = map_page->mc_page_index;
- ASSERT_MAP_PAGE_INDEX(map_page_index);
-
- int map_page_offset = map_page->Offset(map_addr) >> kObjectAlignmentBits;
-
- return (compact_offset << kForwardingOffsetShift)
- | (map_page_offset << kMapPageOffsetShift)
- | (map_page_index << kMapPageIndexShift);
-}
-
-
-static int DecodeOffset(uint32_t encoded) {
- // The offset field is represented in the MSB.
- int offset = (encoded >> kForwardingOffsetShift) << kObjectAlignmentBits;
- ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
- return offset;
-}
-
-
-static Address DecodeMapPointer(uint32_t encoded, MapSpace* map_space) {
- int map_page_index = (encoded & kMapPageIndexMask) >> kMapPageIndexShift;
- ASSERT_MAP_PAGE_INDEX(map_page_index);
-
- int map_page_offset = ((encoded & kMapPageOffsetMask) >> kMapPageOffsetShift)
- << kObjectAlignmentBits;
-
- return (map_space->PageAddress(map_page_index) + map_page_offset);
-}
-
-
// ----------------------------------------------------------------------------
// Phase 1: tracing and marking live objects.
// before: all objects are in normal state.
// object->IsConsString() &&
// (ConsString::cast(object)->second() == Heap::empty_string())
// except the map for the object might be marked.
- intptr_t map_word =
- reinterpret_cast<intptr_t>(HeapObject::cast(obj)->map());
- uint32_t tag =
- (reinterpret_cast<Map*>(clear_mark_bit(map_word)))->instance_type();
- if ((tag < FIRST_NONSTRING_TYPE) &&
- (kConsStringTag ==
- static_cast<StringRepresentationTag>(tag &
- kStringRepresentationMask)) &&
- (Heap::empty_string() ==
- reinterpret_cast<String*>(
- reinterpret_cast<ConsString*>(obj)->second()))) {
+ MapWord map_word = HeapObject::cast(obj)->map_word();
+ map_word.ClearMark();
+ InstanceType type = map_word.ToMap()->instance_type();
+ if ((type < FIRST_NONSTRING_TYPE) &&
+ (static_cast<StringRepresentationTag>(
+ type & kStringRepresentationMask) == kConsStringTag) &&
+ (reinterpret_cast<String*>(
+ reinterpret_cast<ConsString*>(obj)->second()) ==
+ Heap::empty_string())) {
// Since we don't have the object start it is impossible to update the
// remeber set quickly. Therefore this optimization only is taking
// place when we can avoid changing.
MarkCompactCollector::UpdateLiveObjectCount(obj);
#endif
Map* map = obj->map();
- set_mark(obj);
+ obj->SetMark();
+ MarkCompactCollector::tracer()->increment_marked_count();
// Mark the map pointer and the body.
MarkCompactCollector::MarkObject(map);
obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- if (is_marked(obj)) continue;
+ if (obj->IsMarked()) continue;
VisitUnmarkedObject(obj);
}
return true;
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject() && !is_marked(HeapObject::cast(*p))) {
+ if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Set the entry to null_value (as deleted).
*p = Heap::null_value();
pointers_removed_++;
};
-static void MarkObjectGroups(MarkingVisitor* marker) {
- List<ObjectGroup*>& object_groups = GlobalHandles::ObjectGroups();
-
- for (int i = 0; i < object_groups.length(); i++) {
- ObjectGroup* entry = object_groups[i];
- bool group_marked = false;
- List<Object**>& objects = entry->objects_;
- for (int j = 0; j < objects.length(); j++) {
- Object* obj = *objects[j];
- if (obj->IsHeapObject() && is_marked(HeapObject::cast(obj))) {
- group_marked = true;
- break;
- }
- }
-
- if (!group_marked) continue;
-
- for (int j = 0; j < objects.length(); j++) {
- marker->VisitPointer(objects[j]);
- }
- }
-}
-
-
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG
- if (!is_marked(obj)) UpdateLiveObjectCount(obj);
+ UpdateLiveObjectCount(obj);
#endif
- ASSERT(!is_marked(obj));
+ ASSERT(!obj->IsMarked());
if (obj->IsJSGlobalObject()) Counters::global_objects.Increment();
if (FLAG_cleanup_caches_in_maps_at_gc && obj->IsMap()) {
Map::cast(obj)->ClearCodeCache();
}
- set_mark(obj);
+ obj->SetMark();
+ tracer_->increment_marked_count();
if (!marking_stack.overflowed()) {
ASSERT(Heap::Contains(obj));
marking_stack.Push(obj);
} else {
// Set object's stack overflow bit, wait for rescan.
- set_overflow(obj);
+ obj->SetOverflow();
}
}
-void MarkCompactCollector::MarkObjectsReachableFromTopFrame() {
- MarkingVisitor marking_visitor;
- do {
- while (!marking_stack.is_empty()) {
- HeapObject* obj = marking_stack.Pop();
- ASSERT(Heap::Contains(obj));
- ASSERT(is_marked(obj) && !is_overflowed(obj));
-
- // Because the object is marked, the map pointer is not tagged as a
- // normal HeapObject pointer, we need to recover the map pointer,
- // then use the map pointer to mark the object body.
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- Map* map = reinterpret_cast<Map*>(clear_mark_bit(map_word));
- MarkObject(map);
- obj->IterateBody(map->instance_type(), obj->SizeFromMap(map),
- &marking_visitor);
- };
- // Check objects in object groups.
- MarkObjectGroups(&marking_visitor);
- } while (!marking_stack.is_empty());
-}
-
-
static int OverflowObjectSize(HeapObject* obj) {
// Recover the normal map pointer, it might be marked as live and
// overflowed.
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- map_word = clear_mark_bit(map_word);
- map_word = clear_overflow_bit(map_word);
- return obj->SizeFromMap(reinterpret_cast<Map*>(map_word));
+ MapWord map_word = obj->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return obj->SizeFromMap(map_word.ToMap());
}
static bool VisitOverflowedObject(HeapObject* obj) {
- if (!is_overflowed(obj)) return true;
- ASSERT(is_marked(obj));
+ if (!obj->IsOverflowed()) return true;
+ ASSERT(obj->IsMarked());
if (marking_stack.overflowed()) return false;
- clear_overflow(obj); // clear overflow bit
+ obj->ClearOverflow();
ASSERT(Heap::Contains(obj));
marking_stack.Push(obj);
return true;
bool MarkCompactCollector::MustBeMarked(Object** p) {
// Check whether *p is a HeapObject pointer.
if (!(*p)->IsHeapObject()) return false;
- return !is_marked(HeapObject::cast(*p));
+ return !HeapObject::cast(*p)->IsMarked();
}
-void MarkCompactCollector::MarkLiveObjects() {
-#ifdef DEBUG
- ASSERT(state_ == PREPARE_GC);
- state_ = MARK_LIVE_OBJECTS;
-#endif
- // The to space contains live objects, the from space is used as a marking
- // stack.
- marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
- Heap::new_space()->FromSpaceHigh());
-
- ASSERT(!marking_stack.overflowed());
-
- // Mark the heap roots, including global variables, stack variables, etc.
- MarkingVisitor marking_visitor;
-
- Heap::IterateStrongRoots(&marking_visitor);
+void MarkCompactCollector::MarkStrongRoots(MarkingVisitor* marking_visitor) {
+ // Mark the heap roots gray, including global variables, stack variables,
+ // etc.
+ Heap::IterateStrongRoots(marking_visitor);
// Take care of the symbol table specially.
SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
+ // 1. Mark the prefix of the symbol table gray.
+ symbol_table->IteratePrefix(marking_visitor);
#ifdef DEBUG
UpdateLiveObjectCount(symbol_table);
#endif
+ // 2. Mark the symbol table black (ie, do not push it on the marking stack
+ // or mark it overflowed).
+ symbol_table->SetMark();
+ tracer_->increment_marked_count();
+}
+
+
+void MarkCompactCollector::MarkObjectGroups() {
+ List<ObjectGroup*>& object_groups = GlobalHandles::ObjectGroups();
+
+ for (int i = 0; i < object_groups.length(); i++) {
+ ObjectGroup* entry = object_groups[i];
+ if (entry == NULL) continue;
+
+ List<Object**>& objects = entry->objects_;
+ bool group_marked = false;
+ for (int j = 0; j < objects.length(); j++) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
+ group_marked = true;
+ break;
+ }
+ }
- // 1. mark the prefix of the symbol table and push the objects on
- // the stack.
- symbol_table->IteratePrefix(&marking_visitor);
- // 2. mark the symbol table without pushing it on the stack.
- set_mark(symbol_table); // map word is changed.
+ if (!group_marked) continue;
- bool has_processed_weak_pointers = false;
+ // An object in the group is marked, so mark as gray all white heap
+ // objects in the group.
+ for (int j = 0; j < objects.length(); ++j) {
+ if ((*objects[j])->IsHeapObject()) {
+ MarkObject(HeapObject::cast(*objects[j]));
+ }
+ }
+ // Once the entire group has been colored gray, set the object group
+ // to NULL so it won't be processed again.
+ delete object_groups[i];
+ object_groups[i] = NULL;
+ }
+}
+
+
+// Mark as black all objects reachable starting from gray objects. (Gray
+// objects are marked and on the marking stack, or marked and marked as
+// overflowed and not on the marking stack).
+//
+// Before: the heap contains a mixture of white, gray, and black objects.
+// After: the heap contains a mixture of white and black objects.
+void MarkCompactCollector::ProcessMarkingStack(
+ MarkingVisitor* marking_visitor) {
- // Mark objects reachable from the roots.
while (true) {
- MarkObjectsReachableFromTopFrame();
-
- if (!marking_stack.overflowed()) {
- if (has_processed_weak_pointers) break;
- // First we mark weak pointers not yet reachable.
- GlobalHandles::MarkWeakRoots(&MustBeMarked);
- // Then we process weak pointers and process the transitive closure.
- GlobalHandles::IterateWeakRoots(&marking_visitor);
- has_processed_weak_pointers = true;
- continue;
+ while (!marking_stack.is_empty()) {
+ HeapObject* object = marking_stack.Pop();
+ ASSERT(object->IsHeapObject());
+ ASSERT(Heap::Contains(object));
+ // Removing a (gray) object from the marking stack turns it black.
+ ASSERT(object->IsMarked() && !object->IsOverflowed());
+
+ // Because the object is marked, we have to recover the original map
+ // pointer and use it to mark the object's body.
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ Map* map = map_word.ToMap();
+ MarkObject(map);
+ object->IterateBody(map->instance_type(), object->SizeFromMap(map),
+ marking_visitor);
}
- // The marking stack overflowed, we need to rebuild it by scanning the
- // whole heap.
- marking_stack.clear_overflowed();
+ // The only gray objects are marked overflowed in the heap. If there
+ // are any, refill the marking stack and continue.
+ if (!marking_stack.overflowed()) return;
- // We have early stops if the stack overflowed again while scanning
- // overflowed objects in a space.
+ marking_stack.clear_overflowed();
+ // We have early stops if the marking stack overflows while refilling it
+ // with gray objects to avoid pointlessly scanning extra spaces.
SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
ScanOverflowedObjects(&new_it);
if (marking_stack.overflowed()) continue;
LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
ScanOverflowedObjects(&lo_it);
}
+}
+
+
+void MarkCompactCollector::ProcessObjectGroups(
+ MarkingVisitor* marking_visitor) {
+ bool work_to_do = true;
+ ASSERT(marking_stack.is_empty());
+ while (work_to_do) {
+ MarkObjectGroups();
+ work_to_do = !marking_stack.is_empty();
+ ProcessMarkingStack(marking_visitor);
+ }
+}
+
+
+void MarkCompactCollector::MarkLiveObjects() {
+#ifdef DEBUG
+ ASSERT(state_ == PREPARE_GC);
+ state_ = MARK_LIVE_OBJECTS;
+#endif
+ // The to space contains live objects, the from space is used as a marking
+ // stack.
+ marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
+ Heap::new_space()->FromSpaceHigh());
+
+ ASSERT(!marking_stack.overflowed());
- // Prune the symbol table removing all symbols only pointed to by
- // the symbol table.
+ MarkingVisitor marking_visitor;
+ MarkStrongRoots(&marking_visitor);
+ ProcessMarkingStack(&marking_visitor);
+
+ // The objects reachable from the roots are marked black, unreachable
+ // objects are white. Mark objects reachable from object groups with at
+ // least one marked object, and continue until no new objects are
+ // reachable from the object groups.
+ ProcessObjectGroups(&marking_visitor);
+
+ // The objects reachable from the roots or object groups are marked black,
+ // unreachable objects are white. Process objects reachable only from
+ // weak global handles.
+ //
+ // First we mark weak pointers not yet reachable.
+ GlobalHandles::MarkWeakRoots(&MustBeMarked);
+ // Then we process weak pointers and process the transitive closure.
+ GlobalHandles::IterateWeakRoots(&marking_visitor);
+ ProcessMarkingStack(&marking_visitor);
+
+ // Repeat the object groups to mark unmarked groups reachable from the
+ // weak roots.
+ ProcessObjectGroups(&marking_visitor);
+
+ // Prune the symbol table removing all symbols only pointed to by the
+ // symbol table. Cannot use SymbolTable::cast here because the symbol
+ // table is marked.
+ SymbolTable* symbol_table =
+ reinterpret_cast<SymbolTable*>(Heap::symbol_table());
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
// Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups();
-
- // Objects in the active semispace of the young generation will be relocated
- // to the inactive semispace. Set the relocation info to the beginning of
- // the inactive semispace.
- Heap::new_space()->MCResetRelocationInfo();
}
static int CountMarkedCallback(HeapObject* obj) {
- if (!is_marked(obj)) return obj->Size();
- clear_mark(obj);
- int obj_size = obj->Size();
- set_mark(obj);
- return obj_size;
+ MapWord map_word = obj->map_word();
+ map_word.ClearMark();
+ return obj->SizeFromMap(map_word.ToMap());
}
live_objects = 0; \
while (it.has_next()) { \
HeapObject* obj = HeapObject::cast(it.next()); \
- if (is_marked(obj)) live_objects++; \
+ if (obj->IsMarked()) live_objects++; \
} \
ASSERT(live_objects == expected);
// Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, all others to the old space.
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
- bool has_pointers = !object->IsHeapNumber() && !object->IsSeqString();
- Object* forwarded = has_pointers ?
- Heap::old_space()->MCAllocateRaw(object_size) :
- Heap::code_space()->MCAllocateRaw(object_size);
+ AllocationSpace target_space = Heap::TargetSpace(object);
+ Object* forwarded;
+ if (target_space == OLD_SPACE) {
+ forwarded = Heap::old_space()->MCAllocateRaw(object_size);
+ } else {
+ ASSERT(target_space == CODE_SPACE);
+ forwarded = Heap::code_space()->MCAllocateRaw(object_size);
+ }
if (forwarded->IsFailure()) {
forwarded = Heap::new_space()->MCAllocateRaw(object_size);
HeapObject::cast(new_object)->address();
}
- uint32_t encoded = EncodePointers(old_object->map()->address(), *offset);
- old_object->set_map(reinterpret_cast<Map*>(encoded));
+ MapWord encoding =
+ MapWord::EncodeAddress(old_object->map()->address(), *offset);
+ old_object->set_map_word(encoding);
*offset += object_size;
ASSERT(*offset <= Page::kObjectAreaSize);
}
int object_size; // Will be set on each iteration of the loop.
for (Address current = start; current < end; current += object_size) {
HeapObject* object = HeapObject::FromAddress(current);
- if (is_marked(object)) {
- clear_mark(object);
+ if (object->IsMarked()) {
+ object->ClearMark();
+ MarkCompactCollector::tracer()->decrement_marked_count();
object_size = object->Size();
Object* forwarded = Alloc(object, object_size);
current < space->top();
current += object->Size()) {
object = HeapObject::FromAddress(current);
- if (is_marked(object)) {
- clear_mark(object);
+ if (object->IsMarked()) {
+ object->ClearMark();
+ MarkCompactCollector::tracer()->decrement_marked_count();
} else {
// We give non-live objects a map that will correctly give their size,
// since their existing map might not be live after the collection.
current < p->AllocationTop();
current += object->Size()) {
object = HeapObject::FromAddress(current);
- if (is_marked(object)) {
- clear_mark(object);
+ if (object->IsMarked()) {
+ object->ClearMark();
+ MarkCompactCollector::tracer()->decrement_marked_count();
if (MarkCompactCollector::IsCompacting() && object->IsCode()) {
// If this is compacting collection marked code objects have had
// their IC targets converted to objects.
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+ // Objects in the active semispace of the young generation may be
+ // relocated to the inactive semispace (if not promoted). Set the
+ // relocation info to the beginning of the inactive semispace.
+ Heap::new_space()->MCResetRelocationInfo();
+
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldSpace,
IgnoreNonLiveObject>(
int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
// Decode the map pointer.
- uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
- Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
// At this point, the first word of map_addr is also encoded, cannot
// Update map pointer.
Address new_map_addr = GetForwardingAddressInOldSpace(map);
- int offset = DecodeOffset(encoded);
- encoded = EncodePointers(new_map_addr, offset);
- obj->set_map(reinterpret_cast<Map*>(encoded));
+ int offset = encoding.DecodeOffset();
+ obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
#ifdef DEBUG
if (FLAG_gc_verbose) {
Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
// Object should either in old or map space.
- uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
+ MapWord encoding = obj->map_word();
// Offset to the first live object's forwarding address.
- int offset = DecodeOffset(encoded);
+ int offset = encoding.DecodeOffset();
Address obj_addr = obj->address();
// Find the first live object's forwarding address.
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// decode map pointer (forwarded address)
- uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
- Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
// decode map pointer (forwarded address)
- uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
- Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
+ ASSERT(Heap::map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// decode map pointer (forwarded address)
- uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
- Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
- bool has_pointers = !obj->IsHeapNumber() && !obj->IsSeqString();
- if (has_pointers) {
+ AllocationSpace target_space = Heap::TargetSpace(obj);
+ if (target_space == OLD_SPACE) {
Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
} else {
+ ASSERT(target_space == CODE_SPACE);
Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
}
}
typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
+// Forward declaration of visitor.
+class MarkingVisitor;
+
// ----------------------------------------------------------------------------
// Mark-Compact collector
//
typedef void (*ProcessNonLiveFunction)(HeapObject* object);
// Performs a global garbage collection.
- static void CollectGarbage();
+ static void CollectGarbage(GCTracer* tracer);
// True if the last full GC performed heap compaction.
static bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
static bool IsCompacting() { return compacting_collection_; }
+ // The count of the number of objects left marked at the end of the last
+ // completed full GC (expected to be zero).
+ static int previous_marked_count() { return previous_marked_count_; }
+
+ // During a full GC, there is a stack-allocated GCTracer that is used for
+ // bookkeeping information. Return a pointer to that tracer.
+ static GCTracer* tracer() { return tracer_; }
+
#ifdef DEBUG
// Checks whether performing mark-compact collection.
static bool in_use() { return state_ > PREPARE_GC; }
// Global flag indicating whether spaces were compacted on the last GC.
static bool compacting_collection_;
+ // The number of objects left marked at the end of the last completed full
+ // GC (expected to be zero).
+ static int previous_marked_count_;
+
+ // A pointer to the current stack-allocated GC tracer object during a full
+ // collection (NULL before and after).
+ static GCTracer* tracer_;
+
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
static void Prepare();
static void MarkUnmarkedObject(HeapObject* obj);
static inline void MarkObject(HeapObject* obj) {
- if (!is_marked(obj)) MarkUnmarkedObject(obj);
+ if (!obj->IsMarked()) MarkUnmarkedObject(obj);
}
- static void MarkObjectsReachableFromTopFrame();
+ // Mark the heap roots.
+ static void MarkStrongRoots(MarkingVisitor* marking_visitor);
+
+ // Mark objects in object groups that have at least one object in the
+ // group marked.
+ static void MarkObjectGroups();
+
+ // Mark all objects in an object group with at least one marked
+ // object, then all objects reachable from marked objects in object
+ // groups, and repeat.
+ static void ProcessObjectGroups(MarkingVisitor* marking_visitor);
+
+ // Mark all objects reachable (transitively) from objects in the
+ // marking stack or marked as overflowed in the heap.
+ static void ProcessMarkingStack(MarkingVisitor* marking_visitor);
// Callback function for telling whether the object *p must be marked.
static bool MustBeMarked(Object** p);
v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
v8::Context::New(&extensions);
- // TODO(1247464): Cache delayed scripts.
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope;
+ for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
+ i::Bootstrapper::NativesSourceLookup(i);
+ }
+ }
// Get rid of unreferenced scripts.
i::Heap::CollectGarbage(0, i::OLD_SPACE);
i::Serializer ser;
void Code::CodeVerify() {
- ASSERT(ic_flag() == IC_TARGET_IS_ADDRESS);
+ CHECK(ic_flag() == IC_TARGET_IS_ADDRESS);
+ Address last_gc_pc = NULL;
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify();
+ // Ensure that GC will not iterate twice over the same pointer.
+ if (is_gc_reloc_mode(it.rinfo()->rmode())) {
+ CHECK(it.rinfo()->pc() != last_gc_pc);
+ last_gc_pc = it.rinfo()->pc();
+ }
}
}
}
+MapWord MapWord::FromMap(Map* map) {
+ return MapWord(reinterpret_cast<uintptr_t>(map));
+}
+
+
+Map* MapWord::ToMap() {
+ return reinterpret_cast<Map*>(value_);
+}
+
+
+bool MapWord::IsForwardingAddress() {
+ // This function only works for map words that are heap object pointers.
+ // Since it is a heap object, it has a map. We use that map's instance
+ // type to detect if this map word is not actually a map (ie, it is a
+ // forwarding address during a scavenge collection).
+ return reinterpret_cast<HeapObject*>(value_)->map()->instance_type() !=
+ MAP_TYPE;
+}
+
+
+MapWord MapWord::FromForwardingAddress(HeapObject* object) {
+ return MapWord(reinterpret_cast<uintptr_t>(object));
+}
+
+
+HeapObject* MapWord::ToForwardingAddress() {
+ ASSERT(IsForwardingAddress());
+ return reinterpret_cast<HeapObject*>(value_);
+}
+
+
+bool MapWord::IsMarked() {
+ return (value_ & kMarkingMask) == 0;
+}
+
+
+void MapWord::SetMark() {
+ value_ &= ~kMarkingMask;
+}
+
+
+void MapWord::ClearMark() {
+ value_ |= kMarkingMask;
+}
+
+
+bool MapWord::IsOverflowed() {
+ return (value_ & kOverflowMask) != 0;
+}
+
+
+void MapWord::SetOverflow() {
+ value_ |= kOverflowMask;
+}
+
+
+void MapWord::ClearOverflow() {
+ value_ &= ~kOverflowMask;
+}
+
+
+MapWord MapWord::EncodeAddress(Address map_address, int offset) {
+ // Offset is the distance in live bytes from the first live object in the
+ // same page. The offset between two objects in the same page should not
+ // exceed the object area size of a page.
+ ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+
+ int compact_offset = offset >> kObjectAlignmentBits;
+ ASSERT(compact_offset < (1 << kForwardingOffsetBits));
+
+ Page* map_page = Page::FromAddress(map_address);
+ ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
+
+ int map_page_offset =
+ map_page->Offset(map_address) >> kObjectAlignmentBits;
+
+ uintptr_t encoding =
+ (compact_offset << kForwardingOffsetShift) |
+ (map_page_offset << kMapPageOffsetShift) |
+ (map_page->mc_page_index << kMapPageIndexShift);
+ return MapWord(encoding);
+}
+
+
+Address MapWord::DecodeMapAddress(MapSpace* map_space) {
+ int map_page_index = (value_ & kMapPageIndexMask) >> kMapPageIndexShift;
+ ASSERT_MAP_PAGE_INDEX(map_page_index);
+
+ int map_page_offset =
+ ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
+ << kObjectAlignmentBits;
+
+ return (map_space->PageAddress(map_page_index) + map_page_offset);
+}
+
+
+int MapWord::DecodeOffset() {
+ // The offset field is represented in the kForwardingOffsetBits
+ // most-significant bits.
+ int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
+ ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+ return offset;
+}
+
+
+MapWord MapWord::FromEncodedAddress(Address address) {
+ return MapWord(reinterpret_cast<uintptr_t>(address));
+}
+
+
+Address MapWord::ToEncodedAddress() {
+ return reinterpret_cast<Address>(value_);
+}
+
+
#ifdef DEBUG
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
Map* HeapObject::map() {
- return reinterpret_cast<Map*> READ_FIELD(this, kMapOffset);
+ return map_word().ToMap();
}
void HeapObject::set_map(Map* value) {
- WRITE_FIELD(this, kMapOffset, value);
+ set_map_word(MapWord::FromMap(value));
+}
+
+
+MapWord HeapObject::map_word() {
+ return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
}
+void HeapObject::set_map_word(MapWord map_word) {
+ // WRITE_FIELD does not update the remembered set, but there is no need
+ // here.
+ WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+}
HeapObject* HeapObject::FromAddress(Address address) {
}
+bool HeapObject::IsMarked() {
+ return map_word().IsMarked();
+}
+
+
+void HeapObject::SetMark() {
+ ASSERT(!IsMarked());
+ MapWord first_word = map_word();
+ first_word.SetMark();
+ set_map_word(first_word);
+}
+
+
+void HeapObject::ClearMark() {
+ ASSERT(IsMarked());
+ MapWord first_word = map_word();
+ first_word.ClearMark();
+ set_map_word(first_word);
+}
+
+
+bool HeapObject::IsOverflowed() {
+ return map_word().IsOverflowed();
+}
+
+
+void HeapObject::SetOverflow() {
+ MapWord first_word = map_word();
+ first_word.SetOverflow();
+ set_map_word(first_word);
+}
+
+
+void HeapObject::ClearOverflow() {
+ ASSERT(IsOverflowed());
+ MapWord first_word = map_word();
+ first_word.ClearOverflow();
+ set_map_word(first_word);
+}
+
+
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
}
-InlineCacheState Code::state() {
- InlineCacheState result = ExtractStateFromFlags(flags());
+InlineCacheState Code::ic_state() {
+ InlineCacheState result = ExtractICStateFromFlags(flags());
// Only allow uninitialized or debugger states for non-IC code
// objects. This is used in the debugger to determine whether or not
// a call to code object has been replaced with a debug break call.
PropertyType Code::type() {
- ASSERT(state() == MONOMORPHIC);
+ ASSERT(ic_state() == MONOMORPHIC);
return ExtractTypeFromFlags(flags());
}
CodeStub::Major Code::major_key() {
- // TODO(1238541): Simplify this somewhat complicated encoding.
ASSERT(kind() == STUB);
- int low = ExtractStateFromFlags(flags());
- int high = ExtractTypeFromFlags(flags());
- return static_cast<CodeStub::Major>(high << 3 | low);
+ return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
+ kStubMajorKeyOffset));
+}
+
+
+void Code::set_major_key(CodeStub::Major major) {
+ ASSERT(kind() == STUB);
+ ASSERT(0 <= major && major < 256);
+ WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
}
Code::Flags Code::ComputeFlags(Kind kind,
- InlineCacheState state,
+ InlineCacheState ic_state,
PropertyType type,
int argc) {
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
- bits |= state << kFlagsStateShift;
+ bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= argc << kFlagsArgumentsCountShift;
// Cast to flags and validate result before returning it.
Flags result = static_cast<Flags>(bits);
ASSERT(ExtractKindFromFlags(result) == kind);
- ASSERT(ExtractStateFromFlags(result) == state);
+ ASSERT(ExtractICStateFromFlags(result) == ic_state);
ASSERT(ExtractTypeFromFlags(result) == type);
ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
return result;
}
-InlineCacheState Code::ExtractStateFromFlags(Flags flags) {
- int bits = (flags & kFlagsStateMask) >> kFlagsStateShift;
+InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
+ int bits = (flags & kFlagsICStateMask) >> kFlagsICStateShift;
return static_cast<InlineCacheState>(bits);
}
Code::ICTargetState Code::ic_flag() {
- return static_cast<ICTargetState>(READ_INT_FIELD(this, kICFlagOffset));
+ return static_cast<ICTargetState>(READ_BYTE_FIELD(this, kICFlagOffset));
}
void Code::set_ic_flag(ICTargetState value) {
- WRITE_INT_FIELD(this, kICFlagOffset, value);
+ WRITE_BYTE_FIELD(this, kICFlagOffset, value);
}
StringInputBuffer buf(this);
bool truncated = false;
- if (len > 1024) {
- len = 1024;
+ if (len > kMaxShortPrintLength) {
+ len = kMaxShortPrintLength;
truncated = true;
}
bool ascii = true;
Object* Map::UpdateCodeCache(String* name, Code* code) {
- ASSERT(code->state() == MONOMORPHIC);
+ ASSERT(code->ic_state() == MONOMORPHIC);
FixedArray* cache = code_cache();
// When updating the code cache we disregard the type encoded in the
};
+// Heap objects typically have a map pointer in their first word. However,
+// during GC other data (eg, mark bits, forwarding addresses) is sometimes
+// encoded in the first word. The class MapWord is an abstraction of the
+// value in a heap object's first word.
+class MapWord BASE_EMBEDDED {
+ public:
+ // Normal state: the map word contains a map pointer.
+
+ // Create a map word from a map pointer.
+ static inline MapWord FromMap(Map* map);
+
+ // View this map word as a map pointer.
+ inline Map* ToMap();
+
+
+ // Scavenge collection: the map word of live objects in the from space
+ // contains a forwarding address (a heap object pointer in the to space).
+
+ // True if this map word is a forwarding address for a scavenge
+ // collection. Only valid during a scavenge collection (specifically,
+ // when all map words are heap object pointers, ie. not during a full GC).
+ inline bool IsForwardingAddress();
+
+ // Create a map word from a forwarding address.
+ static inline MapWord FromForwardingAddress(HeapObject* object);
+
+ // View this map word as a forwarding address.
+ inline HeapObject* ToForwardingAddress();
+
+
+ // Marking phase of full collection: the map word of live objects is
+ // marked, and may be marked as overflowed (eg, the object is live, its
+ // children have not been visited, and it does not fit in the marking
+ // stack).
+
+ // True if this map word's mark bit is set.
+ inline bool IsMarked();
+
+ // Return this map word but with its mark bit set.
+ inline void SetMark();
+
+ // Return this map word but with its mark bit cleared.
+ inline void ClearMark();
+
+ // True if this map word's overflow bit is set.
+ inline bool IsOverflowed();
+
+ // Return this map word but with its overflow bit set.
+ inline void SetOverflow();
+
+ // Return this map word but with its overflow bit cleared.
+ inline void ClearOverflow();
+
+
+ // Compacting phase of a full compacting collection: the map word of live
+ // objects contains an encoding of the original map address along with the
+ // forwarding address (represented as an offset from the first live object
+ // in the same page as the (old) object address).
+
+ // Create a map word from a map address and a forwarding address offset.
+ static inline MapWord EncodeAddress(Address map_address, int offset);
+
+ // Return the map address encoded in this map word.
+ inline Address DecodeMapAddress(MapSpace* map_space);
+
+ // Return the forwarding offset encoded in this map word.
+ inline int DecodeOffset();
+
+
+ // During serialization: the map word is used to hold an encoded
+ // address, and possibly a mark bit (set and cleared with SetMark
+ // and ClearMark).
+
+ // Create a map word from an encoded address.
+ static inline MapWord FromEncodedAddress(Address address);
+
+ inline Address ToEncodedAddress();
+
+ private:
+ // HeapObject calls the private constructor and directly reads the value.
+ friend class HeapObject;
+
+ explicit MapWord(uintptr_t value) : value_(value) {}
+
+ uintptr_t value_;
+
+ // Bits used by the marking phase of the garbage collector.
+ //
+ // The first word of a heap object is normall a map pointer. The last two
+ // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
+ // mark an object as live and/or overflowed:
+ // last bit = 0, marked as alive
+ // second bit = 1, overflowed
+ // An object is only marked as overflowed when it is marked as live while
+ // the marking stack is overflowed.
+ static const int kMarkingBit = 0; // marking bit
+ static const int kMarkingMask = (1 << kMarkingBit); // marking mask
+ static const int kOverflowBit = 1; // overflow bit
+ static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
+
+ // Forwarding pointers and map pointer encoding
+ // 31 21 20 10 9 0
+ // +-----------------+------------------+-----------------+
+ // |forwarding offset|page offset of map|page index of map|
+ // +-----------------+------------------+-----------------+
+ // 11 bits 11 bits 10 bits
+ static const int kMapPageIndexBits = 10;
+ static const int kMapPageOffsetBits = 11;
+ static const int kForwardingOffsetBits = 11;
+
+ static const int kMapPageIndexShift = 0;
+ static const int kMapPageOffsetShift =
+ kMapPageIndexShift + kMapPageIndexBits;
+ static const int kForwardingOffsetShift =
+ kMapPageOffsetShift + kMapPageOffsetBits;
+
+ // 0x000003FF
+ static const uint32_t kMapPageIndexMask =
+ (1 << kMapPageOffsetShift) - 1;
+
+ // 0x001FFC00
+ static const uint32_t kMapPageOffsetMask =
+ ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
+
+ // 0xFFE00000
+ static const uint32_t kForwardingOffsetMask =
+ ~(kMapPageIndexMask | kMapPageOffsetMask);
+};
+
+
// HeapObject is the superclass for all classes describing heap allocated
// objects.
class HeapObject: public Object {
public:
- // [map]: contains a Map which contains the objects reflective information.
+ // [map]: Contains a map which contains the object's reflective
+ // information.
inline Map* map();
inline void set_map(Map* value);
+ // During garbage collection, the map word of a heap object does not
+ // necessarily contain a map pointer.
+ inline MapWord map_word();
+ inline void set_map_word(MapWord map_word);
+
// Converts an address to a HeapObject pointer.
static inline HeapObject* FromAddress(Address address);
// GC internal.
inline int SizeFromMap(Map* map);
+ // Support for the marking heap objects during the marking phase of GC.
+ // True if the object is marked live.
+ inline bool IsMarked();
+
+ // Mutate this object's map pointer to indicate that the object is live.
+ inline void SetMark();
+
+ // Mutate this object's map pointer to remove the indication that the
+ // object is live (ie, partially restore the map pointer).
+ inline void ClearMark();
+
+ // True if this object is marked as overflowed. Overflowed objects have
+ // been reached and marked during marking of the heap, but their children
+ // have not necessarily been marked and they have not been pushed on the
+ // marking stack.
+ inline bool IsOverflowed();
+
+ // Mutate this object's map pointer to indicate that the object is
+ // overflowed.
+ inline void SetOverflow();
+
+ // Mutate this object's map pointer to remove the indication that the
+ // object is overflowed (ie, partially restore the map pointer).
+ inline void ClearOverflow();
+
static inline Object* GetHeapObjectField(HeapObject* obj, int index);
// Casting.
// [flags]: Access to specific code flags.
inline Kind kind();
- inline InlineCacheState state(); // only valid for IC stubs
+ inline InlineCacheState ic_state(); // only valid for IC stubs
inline PropertyType type(); // only valid for monomorphic IC stubs
inline int arguments_count(); // only valid for call IC stubs
- inline CodeStub::Major major_key(); // only valid for kind STUB
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
inline ICTargetState ic_flag();
inline void set_ic_flag(ICTargetState value);
+ // [major_key]: For kind STUB, the major key.
+ inline CodeStub::Major major_key();
+ inline void set_major_key(CodeStub::Major major);
+
// Flags operations.
static inline Flags ComputeFlags(Kind kind,
- InlineCacheState state = UNINITIALIZED,
+ InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
int argc = -1);
int argc = -1);
static inline Kind ExtractKindFromFlags(Flags flags);
- static inline InlineCacheState ExtractStateFromFlags(Flags flags);
+ static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
static const int kRelocationSizeOffset = kInstructionSizeOffset + kIntSize;
static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
- static const int kICFlagOffset = kFlagsOffset + kIntSize;
- static const int kHeaderSize = kICFlagOffset + kIntSize;
+ static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
+ static const int kHeaderSize = kKindSpecificFlagsOffset + kIntSize;
+
+ // Byte offsets within kKindSpecificFlagsOffset.
+ static const int kICFlagOffset = kKindSpecificFlagsOffset + 0;
+ static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
// Flags layout.
- static const int kFlagsStateShift = 0;
+ static const int kFlagsICStateShift = 0;
static const int kFlagsKindShift = 3;
static const int kFlagsTypeShift = 6;
static const int kFlagsArgumentsCountShift = 9;
- static const int kFlagsStateMask = 0x00000007; // 000000111
+ static const int kFlagsICStateMask = 0x00000007; // 000000111
static const int kFlagsKindMask = 0x00000038; // 000111000
static const int kFlagsTypeMask = 0x000001C0; // 111000000
static const int kFlagsArgumentsCountMask = 0xFFFFFE00;
static const int kHashComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
+ // Limit for truncation in short printing.
+ static const int kMaxShortPrintLength = 1024;
+
// Support for regular expressions.
const uc16* GetTwoByteData();
const uc16* GetTwoByteData(unsigned start);
double OS::DaylightSavingsOffset(double time) {
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
- return t->tm_isdst ? 3600 * msPerSecond : 0;
+ return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
double OS::LocalTimeOffset() {
- // 1199174400 = Jan 1 2008 (UTC).
- // Random date where daylight savings time is not in effect.
- static const int kJan1st2008 = 1199174400;
- time_t tv = static_cast<time_t>(kJan1st2008);
+ time_t tv = time(NULL);
struct tm* t = localtime(&tv);
- ASSERT(t->tm_isdst <= 0);
- return static_cast<double>(t->tm_gmtoff * msPerSecond);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- return vsnprintf(str, size, format, args); // forward to linux.
+ int n = vsnprintf(str, size, format, args); // forward to linux.
+ if (n < 0 || static_cast<size_t>(n) >= size) {
+ str[size - 1] = '\0';
+ return -1;
+ } else {
+ return n;
+ }
}
}
-void* OS::Allocate(const size_t requested, size_t* allocated) {
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
const size_t msize = RoundUp(requested, getpagesize());
- void* mbase = mmap(NULL, msize, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
+void OS::DebugBreak() {
+#if defined (__arm__) || defined(__thumb__)
+ asm("bkpt 0");
+#else
+ asm("int $3");
+#endif
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
}
-bool VirtualMemory::Commit(void* address, size_t size) {
- if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
- virtual void Wait() { sem_wait(&sem_); }
-
+ virtual void Wait();
virtual void Signal() { sem_post(&sem_); }
-
private:
sem_t sem_;
};
+void LinuxSemaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&sem_);
+ if (result == 0) return; // Successfully got semaphore.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
Semaphore* OS::CreateSemaphore(int count) {
return new LinuxSemaphore(count);
}
-// TODO(1233584): Implement Linux support.
-Select::Select(int len, Semaphore** sems) {
- FATAL("Not implemented");
-}
-
-
-Select::~Select() {
- FATAL("Not implemented");
-}
-
-
-int Select::WaitSingle() {
- FATAL("Not implemented");
- return 0;
-}
-
-
-void Select::WaitAll() {
- FATAL("Not implemented");
-}
-
#ifdef ENABLE_LOGGING_AND_PROFILING
static ProfileSampler* active_sampler_ = NULL;
double OS::DaylightSavingsOffset(double time) {
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
- return t->tm_isdst ? 3600 * msPerSecond : 0;
+ return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
double OS::LocalTimeOffset() {
- // 1199174400 = Jan 1 2008 (UTC).
- // Random date where daylight savings time is not in effect.
- static const int kJan1st2008 = 1199174400;
- time_t tv = static_cast<time_t>(kJan1st2008);
+ time_t tv = time(NULL);
struct tm* t = localtime(&tv);
- ASSERT(t->tm_isdst <= 0);
- return static_cast<double>(t->tm_gmtoff * msPerSecond);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- return vsnprintf(str, size, format, args); // forward to Mac OS X.
+ int n = vsnprintf(str, size, format, args); // forward to Mac OS X.
+ if (n < 0 || static_cast<size_t>(n) >= size) {
+ str[size - 1] = '\0';
+ return -1;
+ } else {
+ return n;
+ }
}
size_t OS::AllocateAlignment() {
- return kPointerSize;
+ return getpagesize();
}
-void* OS::Allocate(const size_t requested, size_t* allocated) {
- *allocated = requested;
- void* mbase = malloc(requested);
- UpdateAllocatedSpaceLimits(mbase, requested);
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
void OS::Free(void* buf, const size_t length) {
- free(buf);
- USE(length);
+ // TODO(1240712): munmap has a return value which is ignored here.
+ munmap(buf, length);
}
}
+void OS::DebugBreak() {
+ asm("int $3");
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
}
-bool VirtualMemory::Commit(void* address, size_t size) {
- if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
semaphore_destroy(mach_task_self(), semaphore_);
}
+ // The MacOS mach semaphore documentation claims it does not have spurious
+ // wakeups, the way pthreads semaphores do. So the code from the linux
+ // platform is not needed here.
void Wait() { semaphore_wait(semaphore_); }
void Signal() { semaphore_signal(semaphore_); }
return new MacOSSemaphore(count);
}
-
-// TODO(1233584): Implement MacOS support.
-Select::Select(int len, Semaphore** sems) {
- FATAL("Not implemented");
-}
-
-
-Select::~Select() {
- FATAL("Not implemented");
-}
-
-
-int Select::WaitSingle() {
- FATAL("Not implemented");
- return 0;
-}
-
-
-void Select::WaitAll() {
- FATAL("Not implemented");
-}
-
#ifdef ENABLE_LOGGING_AND_PROFILING
static ProfileSampler* active_sampler_ = NULL;
}
-// Returns the local time offset in milliseconds east of UTC.
+// Returns the local time offset in milliseconds east of UTC without
+// taking daylight savings time into account.
double OS::LocalTimeOffset() {
- // 1199174400 = Jan 1 2008 (UTC).
- // Random date where daylight savings time is not in effect.
- int64_t offset = Time(1199174400).LocalOffset();
- return static_cast<double>(offset);
+ // Use current time, rounded to the millisecond.
+ Time t(TimeCurrentMillis());
+ // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
+ return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
}
int n = _vsnprintf(str, size, format, args);
// Make sure to zero-terminate the string if the output was
// truncated or if there was an error.
- if (n < 0 || static_cast<size_t>(n) >= size) str[size - 1] = '\0';
- return n;
+ if (n < 0 || static_cast<size_t>(n) >= size) {
+ str[size - 1] = '\0';
+ return -1;
+ } else {
+ return n;
+ }
}
}
-void* OS::Allocate(const size_t requested, size_t* allocated) {
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, GetPageSize());
// Windows XP SP2 allows Data Excution Prevention (DEP).
- LPVOID mbase = VirtualAlloc(NULL, requested, MEM_COMMIT | MEM_RESERVE,
- PAGE_EXECUTE_READWRITE);
+ int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ LPVOID mbase = VirtualAlloc(NULL, requested, MEM_COMMIT | MEM_RESERVE, prot);
if (mbase == NULL) {
LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
+void OS::DebugBreak() {
+ __debugbreak();
+}
+
+
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory)
VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
address_ =
- VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+ VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_NOACCESS);
size_ = size;
}
}
-bool VirtualMemory::Commit(void* address, size_t size) {
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE)) {
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
}
-// ----------------------------------------------------------------------------
-// Win32 select support.
-//
-// On Win32 the function WaitForMultipleObjects can be used to wait
-// for all kind of synchronization handles. Currently the
-// implementation only suports the fixed Select::MaxSelectSize maximum
-// number of handles
-
-
-class Select::PlatformData : public Malloced {
- public:
- PlatformData(int len, Semaphore** sems);
- int len_;
- HANDLE objs_[Select::MaxSelectSize];
-};
-
-
-Select::Select(int len, Semaphore** sems) {
- data_ = new PlatformData(len, sems);
-}
-
-
-Select::~Select() {
- delete data_;
-}
-
-
-int Select::WaitSingle() {
- return WaitForMultipleObjects(data_->len_,
- data_->objs_,
- FALSE,
- INFINITE) - WAIT_OBJECT_0;
-}
-
-
-void Select::WaitAll() {
- WaitForMultipleObjects(data_->len_, data_->objs_, TRUE, INFINITE);
-}
-
-
// ----------------------------------------------------------------------------
// Win32 semaphore support.
//
private:
HANDLE sem;
- friend class Select::PlatformData;
};
return new Win32Semaphore(count);
}
-
-
-Select::PlatformData::PlatformData(int len, Semaphore** sems) : len_(len) {
- ASSERT(len_ < Select::MaxSelectSize);
- for (int i = 0; i < len_; i++) {
- objs_[i] = reinterpret_cast<Win32Semaphore*>(sems[i])->sem;
- }
-}
-
-
#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------
static void PrintError(const char* format, ...);
static void VPrintError(const char* format, va_list args);
- // Allocate/Free memory used by JS heap.
- // Pages are readable/writeable/executable by default.
+ // Allocate/Free memory used by JS heap. Pages are readable/writeable, but
+ // they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested, size_t* allocated);
+ static void* Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable);
static void Free(void* buf, const size_t length);
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
// Abort the current process.
static void Abort();
+ // Debug break.
+ static void DebugBreak();
+
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
virtual void* memory() = 0;
};
- // Safe formatting print.
+ // Safe formatting print. Ensures that str is always null-terminated.
+ // Returns the number of chars written, or -1 if output was truncated.
static int SNPrintF(char* str, size_t size, const char* format, ...);
static int VSNPrintF(char* str,
size_t size,
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size);
+ bool Commit(void* address, size_t size, bool executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// ----------------------------------------------------------------------------
-// Guard
+// ScopedLock
//
-// Stack-allocated Guards provide block-scoped locking and unlocking
+// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
// of a mutex.
-
-class Guard {
+class ScopedLock {
public:
- explicit Guard(Mutex* mux): mux_(mux) { mux_->Lock(); }
- ~Guard() { mux_->Unlock(); }
+ explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
+ mutex_->Lock();
+ }
+ ~ScopedLock() {
+ mutex_->Unlock();
+ }
private:
- Mutex* mux_;
- DISALLOW_EVIL_CONSTRUCTORS(Guard);
+ Mutex* mutex_;
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedLock);
};
};
-// ----------------------------------------------------------------------------
-// Select
-//
-// A selector makes it possible to wait for several synchronization objects
-
-class Select {
- public:
- Select(int len, Semaphore** sems);
- ~Select();
- int WaitSingle();
- void WaitAll();
- static const int MaxSelectSize = 32;
-
- class PlatformData;
- private:
- PlatformData* data_; // Platform specific data.
- DISALLOW_EVIL_CONSTRUCTORS(Select);
-};
-
-
#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------
// ProfileSampler
target->shared()->set_length(fun->shared()->length());
target->shared()->set_formal_parameter_count(
fun->shared()->formal_parameter_count());
+ // Set the source code of the target function.
+ target->shared()->set_script(fun->shared()->script());
+ target->shared()->set_start_position(fun->shared()->start_position());
+ target->shared()->set_end_position(fun->shared()->end_position());
context = Handle<Context>(fun->context());
// Make sure we get a fresh copy of the literal vector to avoid
static Object* Runtime_NewContext(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 1);
- CONVERT_CHECKED(JSFunction, function, args[1]);
+ CONVERT_CHECKED(JSFunction, function, args[0]);
int length = ScopeInfo<>::NumberOfContextSlots(function->code());
Object* result = Heap::AllocateFunctionContext(length, function);
if (result->IsFailure()) return result;
Top::set_context(Context::cast(result));
- return args[0]; // return TOS
+ return result; // non-failure
}
static Object* Runtime_PushContext(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 1);
// Convert the object to a proper JavaScript object.
- Object* object = args[1];
+ Object* object = args[0];
if (!object->IsJSObject()) {
object = object->ToObject();
if (object->IsFailure()) {
if (!Failure::cast(object)->IsInternalError()) return object;
HandleScope scope;
- Handle<Object> handle(args[1]);
+ Handle<Object> handle(args[0]);
Handle<Object> result =
Factory::NewTypeError("with_expression", HandleVector(&handle, 1));
return Top::Throw(*result);
Top::set_context(Context::cast(result));
- return args[0]; // return TOS
+ return result;
}
return args[0];
}
- // Don't break in system functions. If the current function is either in the
- // builtins object of some context or is in the debug context just return with
- // the debug break stack guard active.
+ // Don't break in system functions. If the current function is
+ // either in the builtins object of some context or is in the debug
+ // context just return with the debug break stack guard active.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
Object* fun = frame->function();
SaveBreakFrame save;
EnterDebuggerContext enter;
- // Process debug requests. Returns true if break request.
- bool break_request = Debugger::ProcessPendingRequests();
-
- // Notify the debug event listeners if break request.
- if (break_request) {
- Debugger::OnDebugBreak(Factory::undefined_value());
- }
+ // Notify the debug event listeners.
+ Debugger::OnDebugBreak(Factory::undefined_value());
// Return to continue execution.
return args[0];
F(StackGuard, 1) \
\
/* Contexts */ \
- F(NewContext, 2) \
- F(PushContext, 2) \
+ F(NewContext, 1) \
+ F(PushContext, 1) \
F(LookupContext, 2) \
F(LoadContextSlot, 2) \
F(LoadContextSlotNoReferenceError, 2) \
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
- Add(FUNCTION_ADDR(StackFrameIterator::RestoreCalleeSavedForTopHandler),
- RUNTIME_ENTRY,
- 2,
- "StackFrameIterator::RestoreCalleeSavedForTopHandler");
// Miscellaneous
Add(ExternalReference::builtin_passed_function().address(),
int length() { return len_; }
+ Address position() { return reinterpret_cast<Address>(&str_[len_]); }
+
private:
char* str_; // the snapshot
int len_; // the curent length of str_
}
+class ReferenceUpdater: public ObjectVisitor {
+ public:
+ ReferenceUpdater(HeapObject* obj, Serializer* serializer)
+ : obj_address_(obj->address()),
+ serializer_(serializer),
+ reference_encoder_(serializer->reference_encoder_),
+ offsets_(8),
+ addresses_(8) {
+ }
+
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; ++p) {
+ if ((*p)->IsHeapObject()) {
+ offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
+ Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
+ addresses_.Add(a);
+ }
+ }
+ }
+
+ virtual void VisitExternalReferences(Address* start, Address* end) {
+ for (Address* p = start; p < end; ++p) {
+ uint32_t code = reference_encoder_->Encode(*p);
+ CHECK(*p == NULL ? code == 0 : code != 0);
+ offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
+ addresses_.Add(reinterpret_cast<Address>(code));
+ }
+ }
+
+ virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
+ Address target = rinfo->target_address();
+ uint32_t encoding = reference_encoder_->Encode(target);
+ CHECK(target == NULL ? encoding == 0 : encoding != 0);
+ offsets_.Add(reinterpret_cast<Address>(rinfo->pc()) - obj_address_);
+ addresses_.Add(reinterpret_cast<Address>(encoding));
+ }
+
+ void Update(Address start_address) {
+ for (int i = 0; i < offsets_.length(); i++) {
+ Address* p = reinterpret_cast<Address*>(start_address + offsets_[i]);
+ *p = addresses_[i];
+ }
+ }
+
+ private:
+ Address obj_address_;
+ Serializer* serializer_;
+ ExternalReferenceEncoder* reference_encoder_;
+ List<int> offsets_;
+ List<Address> addresses_;
+};
+
+
+// Helper functions for a map of encoded heap object addresses.
+static uint32_t HeapObjectHash(HeapObject* key) {
+ return reinterpret_cast<uint32_t>(key) >> 2;
+}
+
+
+static bool MatchHeapObject(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
Serializer::Serializer()
- : global_handles_(4) {
+ : global_handles_(4),
+ saved_addresses_(MatchHeapObject) {
root_ = true;
roots_ = 0;
objects_ = 0;
}
+bool Serializer::IsVisited(HeapObject *obj) {
+ HashMap::Entry* entry =
+ saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
+ return entry != NULL;
+}
+
+
+Address Serializer::GetSavedAddress(HeapObject *obj) {
+ HashMap::Entry* entry
+ = saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
+ ASSERT(entry != NULL);
+ return reinterpret_cast<Address>(entry->value);
+}
+
+
+void Serializer::SaveAddress(HeapObject* obj, Address addr) {
+ HashMap::Entry* entry =
+ saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
+ entry->value = addr;
+}
+
+
void Serializer::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
}
-// Serialize roots by writing them into the stream. Serialize pointers
-// in HeapObjects by changing them to the encoded address where the
-// object will be allocated on deserialization
+// Serialize objects by writing them into the stream.
void Serializer::VisitPointers(Object** start, Object** end) {
bool root = root_;
root_ = false;
for (Object** p = start; p < end; ++p) {
bool serialized;
+ Address a = Encode(*p, &serialized);
if (root) {
roots_++;
- Address a = Encode(*p, &serialized);
// If the object was not just serialized,
// write its encoded address instead.
if (!serialized) PutEncodedAddress(a);
- } else {
- // Rewrite the pointer in the HeapObject.
- *p = reinterpret_cast<Object*>(Encode(*p, &serialized));
}
}
root_ = root;
}
-void Serializer::VisitExternalReferences(Address* start, Address* end) {
- for (Address* p = start; p < end; ++p) {
- uint32_t code = reference_encoder_->Encode(*p);
- CHECK(*p == NULL ? code == 0 : code != 0);
- *p = reinterpret_cast<Address>(code);
- }
-}
-
-
-void Serializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target = rinfo->target_address();
- uint32_t encoding = reference_encoder_->Encode(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->pc());
- *pc = encoding;
-}
-
-
class GlobalHandlesRetriever: public ObjectVisitor {
public:
explicit GlobalHandlesRetriever(List<Object**>* handles)
#else
writer_->PutC('0');
#endif
- // Write sizes of paged memory spaces.
+ // Write sizes of paged memory spaces. Allocate extra space for the old
+ // and code spaces, because objects in new space will be promoted to them.
writer_->PutC('S');
writer_->PutC('[');
- writer_->PutInt(Heap::old_space()->Size());
+ writer_->PutInt(Heap::old_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
- writer_->PutInt(Heap::code_space()->Size());
+ writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::map_space()->Size());
writer_->PutC(']');
HandleScopeImplementer::instance()->RestoreContext();
contexts.Add(context);
}
+ for (int i = contexts.length() - 1; i >= 0; i--) {
+ HandleScopeImplementer::instance()->SaveContext(contexts[i]);
+ }
PutGlobalHandleStack(contexts);
List<Handle<Object> > security_contexts(2);
HandleScopeImplementer::instance()->RestoreSecurityContext();
security_contexts.Add(context);
}
+ for (int i = security_contexts.length() - 1; i >= 0; i--) {
+ Handle<Object> context = security_contexts[i];
+ HandleScopeImplementer::instance()->SaveSecurityContext(context);
+ }
PutGlobalHandleStack(security_contexts);
}
return reinterpret_cast<Address>(o);
} else {
HeapObject* obj = HeapObject::cast(o);
- if (is_marked(obj)) {
- // Already serialized: encoded address is in map.
- intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
- return reinterpret_cast<Address>(clear_mark_bit(map_word));
+ if (IsVisited(obj)) {
+ return GetSavedAddress(obj);
} else {
// First visit: serialize the object.
*serialized = true;
// allocated during deserialization.
Address addr = Allocate(obj).Encode();
+ SaveAddress(obj, addr);
+
if (type == CODE_TYPE) {
Code* code = Code::cast(obj);
// Ensure Code objects contain Object pointers, not Addresses.
LOG(CodeMoveEvent(code->address(), addr));
}
- // Put the encoded address in the map() of the object, and mark the
- // object. Do this to break recursion before visiting any pointers
- // in the object.
- obj->set_map(reinterpret_cast<Map*>(addr));
- set_mark(obj);
-
// Write out the object prologue: type, size, and simulated address of obj.
writer_->PutC('[');
CHECK_EQ(0, size & kObjectAlignmentMask);
Address map_addr = Encode(map, &serialized);
// Visit all the pointers in the object other than the map. This
- // will rewrite these pointers in place in the body of the object
- // with their encoded RelativeAddresses, and recursively serialize
- // any as-yet-unvisited objects.
+ // will recursively serialize any as-yet-unvisited objects.
obj->IterateBody(type, size, this);
// Mark end of recursively embedded objects, start of object body.
// Write out the raw contents of the object following the map
// pointer containing the now-updated pointers. No compression, but
// fast to deserialize.
+ ReferenceUpdater updater(obj, this);
+ obj->IterateBody(type, size, &updater);
writer_->PutBytes(obj->address() + HeapObject::kSize,
size - HeapObject::kSize);
+ updater.Update(writer_->position() - size);
#ifdef DEBUG
if (FLAG_debug_serialization) {
}
#endif
+ if (type == CODE_TYPE) {
+ Code* code = Code::cast(obj);
+ // Convert relocations from Object* to Address in Code objects
+ code->ConvertICTargetsFromObjectToAddress();
+ }
+
objects_++;
return addr;
}
found = Heap::InSpace(obj, s);
}
CHECK(found);
+ if (s == NEW_SPACE) {
+ s = Heap::TargetSpace(obj);
+ }
int size = obj->Size();
return allocator_[s]->Allocate(size);
}
reference_decoder_ = new ExternalReferenceDecoder();
// By setting linear allocation only, we forbid the use of free list
// allocation which is not predicted by SimulatedAddress.
- Heap::SetLinearAllocationOnly(true);
GetHeader();
Heap::IterateRoots(this);
GetContextStack();
- Heap::SetLinearAllocationOnly(false);
Heap::RebuildRSets();
}
int page_offset,
PagedSpace* space,
List<Page*>* page_list) {
-#ifdef DEBUG
- space->CheckLinearAllocationOnly();
-#endif
-
ASSERT(page_index < page_list->length());
Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
return HeapObject::FromAddress(address);
class RelativeAddress;
class SimulatedHeapSpace;
class SnapshotWriter;
+class ReferenceUpdater;
class Serializer: public ObjectVisitor {
static void disable() { serialization_enabled_ = false; }
private:
+ friend class ReferenceUpdater;
+
virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitExternalReferences(Address* start, Address* end);
- virtual void VisitRuntimeEntry(RelocInfo* rinfo);
+
+ bool IsVisited(HeapObject *obj);
+
+ Address GetSavedAddress(HeapObject *obj);
+
+ void SaveAddress(HeapObject* obj, Address addr);
void PutEncodedAddress(Address addr);
// Write the global flags into the file.
ExternalReferenceEncoder* reference_encoder_;
+ HashMap saved_addresses_;
+
DISALLOW_EVIL_CONSTRUCTORS(Serializer);
};
class Deserializer: public ObjectVisitor {
public:
// Create a deserializer. The snapshot is held in str and has size len.
- // Ownership of str is not assumed by the Deserializer.
Deserializer(const char* str, int len);
virtual ~Deserializer();
}
-static char* reg_names[] = { "r0", "r1", "r2", "r3",
- "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11",
- "r12", "r13", "r14", "r15",
- "pc", "lr", "sp", "ip",
- "fp", "sl", ""};
+static const char* reg_names[] = { "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "pc", "lr", "sp", "ip",
+ "fp", "sl", ""};
-static int reg_nums[] = { 0, 1, 2, 3,
- 4, 5, 6, 7,
- 8, 9, 10, 11,
- 12, 13, 14, 15,
- 15, 14, 13, 12,
- 11, 10};
+static int reg_nums[] = { 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 15, 14, 13, 12,
+ 11, 10};
static int RegNameToRegNum(char* name) {
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
- asm("int $3");
+ v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (args == 2) {
static int count_bits(int bit_vector) {
int count = 0;
while (bit_vector != 0) {
- if (bit_vector & 1 != 0) {
+ if ((bit_vector & 1) != 0) {
count++;
}
bit_vector >>= 1;
// the LR the simulation stops when returning to this call point.
set_register(lr, end_sim_pc);
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t r4_val = get_register(r4);
+ int32_t r5_val = get_register(r5);
+ int32_t r6_val = get_register(r6);
+ int32_t r7_val = get_register(r7);
+ int32_t r8_val = get_register(r8);
+ int32_t r9_val = get_register(r9);
+ int32_t r10_val = get_register(r10);
+ int32_t r11_val = get_register(r11);
+
+ // Setup the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ set_register(r4, callee_saved_value);
+ set_register(r5, callee_saved_value);
+ set_register(r6, callee_saved_value);
+ set_register(r7, callee_saved_value);
+ set_register(r8, callee_saved_value);
+ set_register(r9, callee_saved_value);
+ set_register(r10, callee_saved_value);
+ set_register(r11, callee_saved_value);
+
// Start the simulation
execute();
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(get_register(r4), callee_saved_value);
+ CHECK_EQ(get_register(r5), callee_saved_value);
+ CHECK_EQ(get_register(r6), callee_saved_value);
+ CHECK_EQ(get_register(r7), callee_saved_value);
+ CHECK_EQ(get_register(r8), callee_saved_value);
+ CHECK_EQ(get_register(r9), callee_saved_value);
+ CHECK_EQ(get_register(r10), callee_saved_value);
+ CHECK_EQ(get_register(r11), callee_saved_value);
+
+ // Restore callee-saved registers with the original value.
+ set_register(r4, r4_val);
+ set_register(r5, r5_val);
+ set_register(r6, r6_val);
+ set_register(r7, r7_val);
+ set_register(r8, r8_val);
+ set_register(r9, r9_val);
+ set_register(r10, r10_val);
+ set_register(r11, r11_val);
+
int result = get_register(r0);
return reinterpret_cast<Object*>(result);
}
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarentee that the
+// running thread has its stack in all memory up to address 0x00000000.
#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) - limit)
+ (reinterpret_cast<uintptr_t>(this) >= limit ? \
+ reinterpret_cast<uintptr_t>(this) - limit : 0)
#endif // V8_SIMULATOR_IA32_H_
}
-// -----------------------------------------------------------------------------
-// Space
+// --------------------------------------------------------------------------
+// PagedSpace
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
}
+// Try linear allocation in the page of alloc_info's allocation top. Does
+// not contain slow case logic (eg, move to the next page or try free list
+// allocation) so it can be used by all the allocation functions and for all
+// the paged spaces.
+HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
+ int size_in_bytes) {
+ Address current_top = alloc_info->top;
+ Address new_top = current_top + size_in_bytes;
+ if (new_top > alloc_info->limit) return NULL;
+
+ alloc_info->top = new_top;
+ ASSERT(alloc_info->VerifyPagedAllocation());
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return HeapObject::FromAddress(current_top);
+}
+
+
+// Raw allocation.
+Object* PagedSpace::AllocateRaw(int size_in_bytes) {
+ ASSERT(HasBeenSetup());
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
+ if (object != NULL) return object;
+
+ object = SlowAllocateRaw(size_in_bytes);
+ if (object != NULL) return object;
+
+ return Failure::RetryAfterGC(size_in_bytes, identity());
+}
+
+
+// Reallocating (and promoting) objects during a compacting collection.
+Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
+ ASSERT(HasBeenSetup());
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+ if (object != NULL) return object;
+
+ object = SlowMCAllocateRaw(size_in_bytes);
+ if (object != NULL) return object;
+
+ return Failure::RetryAfterGC(size_in_bytes, identity());
+}
+
+
+// Allocating during deserialization. Always roll to the next page in the
+// space, which should be suitably expanded.
+Object* PagedSpace::AllocateForDeserialization(int size_in_bytes) {
+ ASSERT(HasBeenSetup());
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
+ if (object != NULL) return object;
+
+ // The space should be pre-expanded.
+ Page* current_page = Page::FromAllocationTop(allocation_info_.top);
+ ASSERT(current_page->next_page()->is_valid());
+ object = AllocateInNextPage(current_page, size_in_bytes);
+
+ ASSERT(object != NULL);
+ return object;
+}
+
+
// -----------------------------------------------------------------------------
// LargeObjectChunk
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;
if (new_top > alloc_info->limit) {
- return Failure::RetryAfterGC(size_in_bytes, NEW_SPACE);
+ return Failure::RetryAfterGC(size_in_bytes, identity());
}
Object* obj = HeapObject::FromAddress(alloc_info->top);
DECLARE_bool(log_gc);
#endif
-// For paged spaces, top and limit should always be in the same page and top
-// should not be greater than limit.
-#define ASSERT_PAGED_ALLOCATION_INFO(info) \
- ASSERT((Page::FromAllocationTop((info).top) == \
- Page::FromAllocationTop((info).limit)) \
- &&((info).top <= (info).limit))
-
-
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
&& (info).top <= (space)->high() \
&& (info).limit == (space)->high())
-// ----------------------------------------------------------------------------
-// SpaceIterator
-
-SpaceIterator::SpaceIterator() : current_space_(NEW_SPACE), iterator_(NULL) {
- // SpaceIterator depends on AllocationSpace enumeration starts with NEW_SPACE.
- ASSERT(NEW_SPACE == 0);
-}
-
-
-SpaceIterator::~SpaceIterator() {
- // Delete active iterator if any.
- if (iterator_ != NULL) delete iterator_;
-}
-
-
-bool SpaceIterator::has_next() {
- // Iterate until no more spaces.
- return current_space_ != LAST_SPACE;
-}
-
-
-ObjectIterator* SpaceIterator::next() {
- if (iterator_ != NULL) {
- delete iterator_;
- iterator_ = NULL;
- // Move to the next space
- current_space_++;
- if (current_space_ > LAST_SPACE) {
- return NULL;
- }
- }
-
- // Return iterator for the new current space.
- return CreateIterator();
-}
-
-
-// Create an iterator for the space to iterate.
-ObjectIterator* SpaceIterator::CreateIterator() {
- ASSERT(iterator_ == NULL);
-
- switch (current_space_) {
- case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(Heap::new_space());
- break;
- case OLD_SPACE:
- iterator_ = new HeapObjectIterator(Heap::old_space());
- break;
- case CODE_SPACE:
- iterator_ = new HeapObjectIterator(Heap::code_space());
- break;
- case MAP_SPACE:
- iterator_ = new HeapObjectIterator(Heap::map_space());
- break;
- case LO_SPACE:
- iterator_ = new LargeObjectIterator(Heap::lo_space());
- break;
- }
-
- // Return the newly allocated iterator;
- ASSERT(iterator_ != NULL);
- return iterator_;
-}
-
// ----------------------------------------------------------------------------
// HeapObjectIterator
// Due to alignment, allocated space might be one page less than required
// number (kPagesPerChunk) of pages for old spaces.
//
- // Reserve two chunk ids for semispaces, one for map space and one for old
- // space.
- max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 4;
+ // Reserve two chunk ids for semispaces, one for map space, one for old
+ // space, and one for code space.
+ max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0;
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
- size_t* allocated) {
+ size_t* allocated,
+ bool executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
- void* mem = OS::Allocate(requested, allocated);
+ void* mem = OS::Allocate(requested, allocated, executable);
int alloced = *allocated;
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}
-
- void* chunk = AllocateRawMemory(chunk_size, &chunk_size);
+ void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
if (chunk == NULL) return Page::FromAddress(NULL);
LOG(NewEvent("PagedChunk", chunk, chunk_size));
ASSERT(initial_chunk_->address() <= start);
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
-
- if (!initial_chunk_->Commit(start, size)) {
+ if (!initial_chunk_->Commit(start, size, owner->executable())) {
return Page::FromAddress(NULL);
}
Counters::memory_allocated.Increment(size);
}
-bool MemoryAllocator::CommitBlock(Address start, size_t size) {
+bool MemoryAllocator::CommitBlock(Address start,
+ size_t size,
+ bool executable) {
ASSERT(start != NULL);
ASSERT(size > 0);
ASSERT(initial_chunk_ != NULL);
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
- if (!initial_chunk_->Commit(start, size)) return false;
+ if (!initial_chunk_->Commit(start, size, executable)) return false;
Counters::memory_allocated.Increment(size);
return true;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(int max_capacity, AllocationSpace id) {
- ASSERT(id == OLD_SPACE || id == CODE_SPACE || id == MAP_SPACE);
+PagedSpace::PagedSpace(int max_capacity, AllocationSpace id, bool executable)
+ : Space(id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
- identity_ = id;
accounting_stats_.Clear();
- allocation_mode_ = LINEAR;
-
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
if (!Contains(addr)) return Failure::Exception();
Page* p = Page::FromAddress(addr);
+ ASSERT(IsUsed(p));
Address cur = p->ObjectAreaStart();
Address end = p->AllocationTop();
while (cur < end) {
cur = next;
}
+ UNREACHABLE();
return Failure::Exception();
}
+bool PagedSpace::IsUsed(Page* page) {
+ PageIterator it(this, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ if (page == it.next()) return true;
+ }
+ return false;
+}
+
+
void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
alloc_info->top = p->ObjectAreaStart();
alloc_info->limit = p->ObjectAreaEnd();
- ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+ ASSERT(alloc_info->VerifyPagedAllocation());
}
}
-void PagedSpace::SetLinearAllocationOnly(bool linear_only) {
- if (linear_only) {
- // Note that the free_list is not cleared. If we switch back to
- // FREE_LIST mode it will be available for use. Resetting it
- // requires correct accounting for the wasted bytes.
- allocation_mode_ = LINEAR_ONLY;
- } else {
- ASSERT(allocation_mode_ == LINEAR_ONLY);
- allocation_mode_ = LINEAR;
- }
-}
-
-
int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
#ifdef DEBUG
// The Contains function considers the address at the beginning of a
}
+// Slow case for reallocating and promoting objects during a compacting
+// collection. This function is not space-specific.
+HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
+ Page* current_page = TopPageOf(mc_forwarding_info_);
+ if (!current_page->next_page()->is_valid()) {
+ if (!Expand(current_page)) {
+ return NULL;
+ }
+ }
+
+ // There are surely more pages in the space now.
+ ASSERT(current_page->next_page()->is_valid());
+ // We do not add the top of page block for current page to the space's
+ // free list---the block may contain live objects so we cannot write
+ // bookkeeping information to it. Instead, we will recover top of page
+ // blocks when we move objects to their new locations.
+ //
+ // We do however write the allocation pointer to the page. The encoding
+ // of forwarding addresses is as an offset in terms of live bytes, so we
+ // need quick access to the allocation top of each page to decode
+ // forwarding addresses.
+ current_page->mc_relocation_top = mc_forwarding_info_.top;
+ SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
+ return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+}
+
+
bool PagedSpace::Expand(Page* last_page) {
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
Page* current_page = top_page->next_page();
// Loop over the pages to the end of the space.
while (current_page->is_valid()) {
- // Keep every odd-numbered page, one page for every two in the space.
+ // Advance last_page_to_keep every other step to end up at the midpoint.
if ((free_pages & 0x1) == 1) {
pages_to_keep++;
last_page_to_keep = last_page_to_keep->next_page();
// NewSpace implementation
NewSpace::NewSpace(int initial_semispace_capacity,
- int maximum_semispace_capacity) {
+ int maximum_semispace_capacity,
+ AllocationSpace id,
+ bool executable)
+ : Space(id, executable) {
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
maximum_capacity_ = maximum_semispace_capacity;
capacity_ = initial_semispace_capacity;
- to_space_ = new SemiSpace(capacity_, maximum_capacity_);
- from_space_ = new SemiSpace(capacity_, maximum_capacity_);
+ to_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
+ from_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
// Allocate and setup the histogram arrays if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// -----------------------------------------------------------------------------
// SemiSpace implementation
-SemiSpace::SemiSpace(int initial_capacity, int maximum_capacity)
- : capacity_(initial_capacity), maximum_capacity_(maximum_capacity),
- start_(NULL), age_mark_(NULL) {
+SemiSpace::SemiSpace(int initial_capacity,
+ int maximum_capacity,
+ AllocationSpace id,
+ bool executable)
+ : Space(id, executable), capacity_(initial_capacity),
+ maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {
}
bool SemiSpace::Setup(Address start, int size) {
ASSERT(size == maximum_capacity_);
- if (!MemoryAllocator::CommitBlock(start, capacity_)) return false;
+ if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
+ return false;
+ }
start_ = start;
address_mask_ = ~(size - 1);
bool SemiSpace::Double() {
- if (!MemoryAllocator::CommitBlock(high(), capacity_)) return false;
+ if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) {
+ return false;
+ }
capacity_ *= 2;
return true;
}
} else {
UNREACHABLE();
}
+ ASSERT(Size() == size_in_bytes);
}
Address FreeListNode::next() {
ASSERT(map() == Heap::byte_array_map());
+ ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
}
void FreeListNode::set_next(Address next) {
ASSERT(map() == Heap::byte_array_map());
+ ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
}
int rem = cur - index;
int rem_bytes = rem << kPointerSizeLog2;
FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
+ ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
size_in_bytes);
// Distinguish the cases prev < rem < cur and rem <= prev < cur
}
-MapSpaceFreeList::MapSpaceFreeList() {
+#ifdef DEBUG
+bool OldSpaceFreeList::Contains(FreeListNode* node) {
+ for (int i = 0; i < kFreeListsLength; i++) {
+ Address cur_addr = free_[i].head_node_;
+ while (cur_addr != NULL) {
+ FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+ if (cur_node == node) return true;
+ cur_addr = cur_node->next();
+ }
+ }
+ return false;
+}
+#endif
+
+
+MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
+ owner_ = owner;
Reset();
}
Object* MapSpaceFreeList::Allocate() {
if (head_ == NULL) {
- return Failure::RetryAfterGC(Map::kSize, MAP_SPACE);
+ return Failure::RetryAfterGC(Map::kSize, owner_);
}
FreeListNode* node = FreeListNode::FromAddress(head_);
accounting_stats_.FillWastedBytes(Waste());
}
- // Clear the free list and switch to linear allocation if we are in FREE_LIST
+ // Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
- if (allocation_mode_ == FREE_LIST) allocation_mode_ = LINEAR;
}
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
- ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+ ASSERT(allocation_info_.VerifyPagedAllocation());
// The space is compacted and we haven't yet built free lists or
// wasted any space.
}
-Object* OldSpace::AllocateRawInternal(int size_in_bytes,
- AllocationInfo* alloc_info) {
- ASSERT(HasBeenSetup());
+// Slow case for normal allocation. Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
+ // Linear allocation in this space has failed. If there is another page
+ // in the space, move to that page and allocate there. This allocation
+ // should succeed (size_in_bytes should not be greater than a page's
+ // object area size).
+ Page* current_page = TopPageOf(allocation_info_);
+ if (current_page->next_page()->is_valid()) {
+ return AllocateInNextPage(current_page, size_in_bytes);
+ }
- if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
- // Try linear allocation in the current page.
- Address cur_top = alloc_info->top;
- Address new_top = cur_top + size_in_bytes;
- if (new_top <= alloc_info->limit) {
- Object* obj = HeapObject::FromAddress(cur_top);
- alloc_info->top = new_top;
- ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+ // There is no next page in this space. Try free list allocation.
+ int wasted_bytes;
+ Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ if (!result->IsFailure()) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return HeapObject::cast(result);
+ }
- accounting_stats_.AllocateBytes(size_in_bytes);
- ASSERT(Size() <= Capacity());
- return obj;
- }
- } else {
- // For now we should not try free list allocation during m-c relocation.
- ASSERT(alloc_info == &allocation_info_);
- int wasted_bytes;
- Object* object = free_list_.Allocate(size_in_bytes, &wasted_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- if (!object->IsFailure()) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- return object;
- }
+ // Free list allocation failed and there is no next page. Try to expand
+ // the space and allocate in the new next page.
+ ASSERT(!current_page->next_page()->is_valid());
+ if (Expand(current_page)) {
+ return AllocateInNextPage(current_page, size_in_bytes);
}
- // Fast allocation failed.
- return SlowAllocateRaw(size_in_bytes, alloc_info);
-}
-
-
-// Slow cases for AllocateRawInternal. In linear allocation mode, try
-// to allocate in the next page in the space. If there are no more
-// pages, switch to free-list allocation if permitted, otherwise try
-// to grow the space. In free-list allocation mode, try to grow the
-// space and switch to linear allocation.
-Object* OldSpace::SlowAllocateRaw(int size_in_bytes,
- AllocationInfo* alloc_info) {
- if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
- Page* top_page = TopPageOf(*alloc_info);
- // Until we implement free-list allocation during global gc, we have two
- // cases: one for normal allocation and one for m-c relocation allocation.
- if (alloc_info == &allocation_info_) { // Normal allocation.
- int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
- // Add the extra space at the top of this page to the free list.
- if (free_size > 0) {
- int wasted_bytes = free_list_.Free(alloc_info->top, free_size);
- accounting_stats_.WasteBytes(wasted_bytes);
- alloc_info->top += free_size;
- ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
- }
- // Move to the next page in this space if there is one; switch
- // to free-list allocation, if we can; try to expand the space otherwise
- if (top_page->next_page()->is_valid()) {
- SetAllocationInfo(alloc_info, top_page->next_page());
- } else if (allocation_mode_ == LINEAR) {
- allocation_mode_ = FREE_LIST;
- } else if (Expand(top_page)) {
- ASSERT(top_page->next_page()->is_valid());
- SetAllocationInfo(alloc_info, top_page->next_page());
- } else {
- return Failure::RetryAfterGC(size_in_bytes, identity());
- }
- } else { // Allocation during m-c relocation.
- // During m-c 'allocation' while computing forwarding addresses, we do
- // not yet add blocks to the free list because they still contain live
- // objects. We also cache the m-c forwarding allocation pointer in the
- // current page.
-
- // If there are no more pages try to expand the space. This can only
- // happen when promoting objects from the new space.
- if (!top_page->next_page()->is_valid()) {
- if (!Expand(top_page)) {
- return Failure::RetryAfterGC(size_in_bytes, identity());
- }
- }
+ // Finally, fail.
+ return NULL;
+}
- // Move to the next page.
- ASSERT(top_page->next_page()->is_valid());
- top_page->mc_relocation_top = alloc_info->top;
- SetAllocationInfo(alloc_info, top_page->next_page());
- }
- } else { // Free-list allocation.
- // We failed to allocate from the free list; try to expand the space and
- // switch back to linear allocation.
- ASSERT(alloc_info == &allocation_info_);
- Page* top_page = TopPageOf(*alloc_info);
- if (!top_page->next_page()->is_valid()) {
- if (!Expand(top_page)) {
- return Failure::RetryAfterGC(size_in_bytes, identity());
- }
- }
- // We surely have more pages, move to the next page and switch to linear
- // allocation.
- ASSERT(top_page->next_page()->is_valid());
- SetAllocationInfo(alloc_info, top_page->next_page());
- ASSERT(allocation_mode_ == FREE_LIST);
- allocation_mode_ = LINEAR;
+// Add the block at the top of the page to the space's free list, set the
+// allocation info to the next page (assumed to be one), and allocate
+// linearly there.
+HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
+ int size_in_bytes) {
+ ASSERT(current_page->next_page()->is_valid());
+ // Add the block at the top of this page to the free list.
+ int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
+ if (free_size > 0) {
+ int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
+ accounting_stats_.WasteBytes(wasted_bytes);
}
-
- // Perform the allocation.
- return AllocateRawInternal(size_in_bytes, alloc_info);
+ SetAllocationInfo(&allocation_info_, current_page->next_page());
+ return AllocateLinearly(&allocation_info_, size_in_bytes);
}
void OldSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
- ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+ ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
accounting_stats_.AllocateBytes(free_list_.available());
}
- // Clear the free list and switch to linear allocation if not already
- // required.
+ // Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
- if (allocation_mode_ != LINEAR_ONLY) allocation_mode_ = LINEAR;
}
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
- ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+ ASSERT(allocation_info_.VerifyPagedAllocation());
// The space is compacted and we haven't yet wasted any space.
ASSERT(Waste() == 0);
}
-Object* MapSpace::AllocateRawInternal(int size_in_bytes,
- AllocationInfo* alloc_info) {
- ASSERT(HasBeenSetup());
- // When doing free-list allocation, we implicitly assume that we always
- // allocate a map-sized block.
- ASSERT(size_in_bytes == Map::kSize);
-
- if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
- // Try linear allocation in the current page.
- Address cur_top = alloc_info->top;
- Address new_top = cur_top + size_in_bytes;
- if (new_top <= alloc_info->limit) {
- Object* obj = HeapObject::FromAddress(cur_top);
- alloc_info->top = new_top;
- ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+// Slow case for normal allocation. Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
+ // Linear allocation in this space has failed. If there is another page
+ // in the space, move to that page and allocate there. This allocation
+ // should succeed.
+ Page* current_page = TopPageOf(allocation_info_);
+ if (current_page->next_page()->is_valid()) {
+ return AllocateInNextPage(current_page, size_in_bytes);
+ }
+ // There is no next page in this space. Try free list allocation. The
+ // map space free list implicitly assumes that all free blocks are map
+ // sized.
+ if (size_in_bytes == Map::kSize) {
+ Object* result = free_list_.Allocate();
+ if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
- return obj;
- }
- } else {
- // We should not do free list allocation during m-c compaction.
- ASSERT(alloc_info == &allocation_info_);
- Object* object = free_list_.Allocate();
- if (!object->IsFailure()) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- return object;
+ return HeapObject::cast(result);
}
}
- // Fast allocation failed.
- return SlowAllocateRaw(size_in_bytes, alloc_info);
-}
-
-
-// Slow case for AllocateRawInternal. In linear allocation mode, try to
-// allocate in the next page in the space. If there are no more pages, switch
-// to free-list allocation. In free-list allocation mode, try to grow the
-// space and switch to linear allocation.
-Object* MapSpace::SlowAllocateRaw(int size_in_bytes,
- AllocationInfo* alloc_info) {
- if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
- Page* top_page = TopPageOf(*alloc_info);
-
- // We do not do free-list allocation during compacting GCs.
- if (alloc_info == &mc_forwarding_info_) {
- // We expect to always have more pages, because the map space cannot
- // grow during GC. Move to the next page.
- CHECK(top_page->next_page()->is_valid());
- top_page->mc_relocation_top = alloc_info->top;
- SetAllocationInfo(alloc_info, top_page->next_page());
- } else { // Normal allocation.
- // Move to the next page in this space (counting the top-of-page block
- // as waste) if there is one, otherwise switch to free-list allocation if
- // permitted, otherwise try to expand the heap
- if (top_page->next_page()->is_valid() ||
- (allocation_mode_ == LINEAR_ONLY && Expand(top_page))) {
- int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
- ASSERT(free_size == kPageExtra);
- accounting_stats_.WasteBytes(free_size);
- SetAllocationInfo(alloc_info, top_page->next_page());
- } else if (allocation_mode_ == LINEAR) {
- allocation_mode_ = FREE_LIST;
- } else {
- return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
- }
- }
- } else { // Free-list allocation.
- ASSERT(alloc_info == &allocation_info_);
- // We failed to allocate from the free list (ie, it must be empty) so try
- // to expand the space and switch back to linear allocation.
- Page* top_page = TopPageOf(*alloc_info);
- if (!top_page->next_page()->is_valid()) {
- if (!Expand(top_page)) {
- return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
- }
- }
- // We have more pages now so we can move to the next and switch to linear
- // allocation.
- ASSERT(top_page->next_page()->is_valid());
- int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
- ASSERT(free_size == kPageExtra);
- accounting_stats_.WasteBytes(free_size);
- SetAllocationInfo(alloc_info, top_page->next_page());
- ASSERT(allocation_mode_ == FREE_LIST);
- allocation_mode_ = LINEAR;
+ // Free list allocation failed and there is no next page. Try to expand
+ // the space and allocate in the new next page.
+ ASSERT(!current_page->next_page()->is_valid());
+ if (Expand(current_page)) {
+ return AllocateInNextPage(current_page, size_in_bytes);
}
- // Perform the allocation.
- return AllocateRawInternal(size_in_bytes, alloc_info);
+ // Finally, fail.
+ return NULL;
+}
+
+
+// Move to the next page (there is assumed to be one) and allocate there.
+// The top of page block is always wasted, because it is too small to hold a
+// map.
+HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
+ int size_in_bytes) {
+ ASSERT(current_page->next_page()->is_valid());
+ ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
+ accounting_stats_.WasteBytes(kPageExtra);
+ SetAllocationInfo(&allocation_info_, current_page->next_page());
+ return AllocateLinearly(&allocation_info_, size_in_bytes);
}
void MapSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
- ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+ ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// LargeObjectChunk
LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
- size_t* chunk_size) {
+ size_t* chunk_size,
+ bool executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
- void* mem = MemoryAllocator::AllocateRawMemory(requested, chunk_size);
+ void* mem = MemoryAllocator::AllocateRawMemory(requested,
+ chunk_size,
+ executable);
if (mem == NULL) return NULL;
LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
if (*chunk_size < requested) {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace()
- : first_chunk_(NULL),
+LargeObjectSpace::LargeObjectSpace(AllocationSpace id, bool executable)
+ : Space(id, executable),
+ first_chunk_(NULL),
size_(0),
page_count_(0) {}
ASSERT(0 < object_size && object_size <= requested_size);
size_t chunk_size;
LargeObjectChunk* chunk =
- LargeObjectChunk::New(requested_size, &chunk_size);
+ LargeObjectChunk::New(requested_size, &chunk_size, executable());
if (chunk == NULL) {
- return Failure::RetryAfterGC(requested_size, LO_SPACE);
+ return Failure::RetryAfterGC(requested_size, identity());
}
size_ += chunk_size;
LargeObjectChunk* current = first_chunk_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- if (is_marked(object)) {
- clear_mark(object);
+ if (object->IsMarked()) {
+ object->ClearMark();
+ MarkCompactCollector::tracer()->decrement_marked_count();
previous = current;
current = current->next();
} else {
class PagedSpace;
class MemoryAllocator;
-struct AllocationInfo;
+class AllocationInfo;
// -----------------------------------------------------------------------------
// A page normally has 8K bytes. Large object pages may be larger. A page
// Returns the next page of this page.
inline Page* next_page();
- // Return the end of allocation in this page.
+ // Return the end of allocation in this page. Undefined for unused pages.
inline Address AllocationTop();
// Returns the start address of the object area in this page.
};
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class Space : public Malloced {
+ public:
+ Space(AllocationSpace id, bool executable)
+ : id_(id), executable_(executable) {}
+ // Does the space need executable memory?
+ bool executable() { return executable_; }
+ // Identity used in error reporting.
+ AllocationSpace identity() { return id_; }
+ private:
+ AllocationSpace id_;
+ bool executable_;
+};
+
+
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator manages chunks for the paged heap spaces (old space and map
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- static bool CommitBlock(Address start, size_t size);
+ static bool CommitBlock(Address start, size_t size, bool executable);
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
- static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+ static void* AllocateRawMemory(const size_t requested,
+ size_t* allocated,
+ bool executable);
static void FreeRawMemory(void* buf, size_t length);
// Returns the maximum available bytes of heaps.
};
-// -----------------------------------------------------------------------------
-// Space iterator for iterating over all spaces.
-//
-// For each space an object iterator is provided. The deallocation of the
-// returned object iterators is handled by the space iterator.
-
-class SpaceIterator : public Malloced {
- public:
- SpaceIterator();
- virtual ~SpaceIterator();
-
- bool has_next();
- ObjectIterator* next();
-
- private:
- ObjectIterator* CreateIterator();
-
- int current_space_; // from enum AllocationSpace.
- ObjectIterator* iterator_; // object iterator for the current space.
-};
-
-
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
// An abstraction of allocation and relocation pointers in a page-structured
// space.
-struct AllocationInfo {
+class AllocationInfo {
+ public:
Address top; // current allocation top
Address limit; // current allocation limit
+
+#ifdef DEBUG
+ bool VerifyPagedAllocation() {
+ return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
+ && (top <= limit);
+ }
+#endif
};
};
-class PagedSpace : public Malloced {
+class PagedSpace : public Space {
friend class PageIterator;
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(int max_capacity, AllocationSpace id);
+ PagedSpace(int max_capacity, AllocationSpace id, bool executable);
+
+ virtual ~PagedSpace() {}
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
- // Finds an object that the given address falls in its body. Returns
- // Failure::Exception() if the operation failed. The implementation
- // iterates objects in the page containing the address, the cost is
- // linear to the number of objects in the page. It may be slow.
+ // Given an address occupied by a live object, return that object if it is
+ // in this space, or Failure::Exception() if it is not. The implementation
+ // iterates over objects in the page containing the address, the cost is
+ // linear in the number of objects in the page. It may be slow.
Object* FindObject(Address addr);
+ // Checks whether page is currently in use by this space.
+ bool IsUsed(Page* page);
+
// Clears remembered sets of pages in this space.
void ClearRSet();
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top; }
- AllocationSpace identity() { return identity_; }
+ // Allocate the requested number of bytes in the space if possible, return a
+ // failure object if not.
+ inline Object* AllocateRaw(int size_in_bytes);
+
+ // Allocate the requested number of bytes for relocation during mark-compact
+ // collection.
+ inline Object* MCAllocateRaw(int size_in_bytes);
+
- // If 'linear_only' is true, force allocation_mode_ to
- // LINEAR_ONLY. If 'linear_only' is false, allocation_mode_ is
- // checked to be LINEAR_ONLY and changed to LINEAR, allowing it to
- // alternate between LINEAR and FREE_LIST automatically.
- void SetLinearAllocationOnly(bool linear_only);
+ // Allocate the requested number of bytes during deserialization.
+ inline Object* AllocateForDeserialization(int size_in_bytes);
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
bool EnsureCapacity(int capacity);
#ifdef DEBUG
- void CheckLinearAllocationOnly() { CHECK(allocation_mode_ == LINEAR_ONLY); }
-
// Print meta info and objects in this space.
void Print();
#endif
protected:
- // In LINEAR and LINEAR_ONLY mode, allocation is from the end of the last
- // page. In FREE_LIST mode, allocation is from a fragment list of free
- // space at the end of recent pages. LINEAR and FREE_LIST mode alternate
- // automatically. LINEAR_ONLY mode is sticky until converted to LINEAR by
- // an API call.
- enum AllocationMode { LINEAR_ONLY, LINEAR, FREE_LIST };
-
// Maximum capacity of this space.
int max_capacity_;
// The first page in this space.
Page* first_page_;
- // The allocation mode.
- AllocationMode allocation_mode_;
-
// Normal allocation information.
AllocationInfo allocation_info_;
// pages are appened to the last_page;
bool Expand(Page* last_page);
+ // Generic fast case allocation function that tries linear allocation in
+ // the top page of 'alloc_info'. Returns NULL on failure.
+ inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
+ int size_in_bytes);
+
+ // During normal allocation or deserialization, roll to the next page in
+ // the space (there is assumed to be one) and allocate there. This
+ // function is space-dependent.
+ virtual HeapObject* AllocateInNextPage(Page* current_page,
+ int size_in_bytes) = 0;
+
+ // Slow path of AllocateRaw. This function is space-dependent.
+ virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
+
+ // Slow path of MCAllocateRaw.
+ HeapObject* SlowMCAllocateRaw(int size_in_bytes);
+
#ifdef DEBUG
void DoPrintRSet(const char* space_name);
#endif
private:
- // Identity of this space.
- AllocationSpace identity_;
-
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
// uses the memory in the from space as a marking stack when tracing live
// objects.
-class SemiSpace BASE_EMBEDDED {
+class SemiSpace : public Space {
public:
// Creates a space in the young generation. The constructor does not
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
- SemiSpace(int initial_capacity, int maximum_capacity);
+ SemiSpace(int initial_capacity,
+ int maximum_capacity,
+ AllocationSpace id,
+ bool executable);
// Sets up the semispace using the given chunk.
bool Setup(Address start, int size);
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
-class NewSpace : public Malloced {
+class NewSpace : public Space {
public:
// Create a new space with a given allocation capacity (ie, the capacity of
// *one* of the semispaces). The constructor does not allocate heap memory
// memory of size 2 * semispace_capacity. To support fast containment
// testing in the new space, the size of this chunk must be a power of two
// and it must be aligned to its size.
- NewSpace(int initial_semispace_capacity, int maximum_semispace_capacity);
+ NewSpace(int initial_semispace_capacity,
+ int maximum_semispace_capacity,
+ AllocationSpace id,
+ bool executable);
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
- // Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
Object* AllocateRaw(int size_in_bytes) {
return AllocateRawInternal(size_in_bytes, &allocation_info_);
}
void RebuildSizeList();
bool needs_rebuild_;
+#ifdef DEBUG
+ // Does this free list contain a free block located at the address of 'node'?
+ bool Contains(FreeListNode* node);
+#endif
+
DISALLOW_EVIL_CONSTRUCTORS(OldSpaceFreeList);
};
// The free list for the map space.
class MapSpaceFreeList BASE_EMBEDDED {
public:
- MapSpaceFreeList();
+ explicit MapSpaceFreeList(AllocationSpace owner);
// Clear the free list.
void Reset();
// The head of the free list.
Address head_;
+ // The identity of the owning space, for building allocation Failure
+ // objects.
+ AllocationSpace owner_;
+
DISALLOW_EVIL_CONSTRUCTORS(MapSpaceFreeList);
};
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(int max_capacity, AllocationSpace id)
- : PagedSpace(max_capacity, id), free_list_(id) {
+ explicit OldSpace(int max_capacity, AllocationSpace id, bool executable)
+ : PagedSpace(max_capacity, id, executable), free_list_(id) {
}
// Returns maximum available bytes that the old space can have.
// pointer).
int AvailableFree() { return free_list_.available(); }
- // The top of allocation in a page in this space.
+ // The top of allocation in a page in this space. Undefined if page is unused.
Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
}
- // Allocates requested bytes. May return Failure if the space is full.
- Object* AllocateRaw(int size_in_bytes) {
- ASSERT_OBJECT_SIZE(size_in_bytes);
- return AllocateRawInternal(size_in_bytes, &allocation_info_);
- }
-
- // Allocates requested bytes for object relocation.
- Object* MCAllocateRaw(int size_in_bytes) {
- ASSERT_OBJECT_SIZE(size_in_bytes);
- return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
- }
-
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
void Free(Address start, int size_in_bytes) {
void PrintRSet();
#endif
+ protected:
+ // Virtual function in the superclass. Slow path of AllocateRaw.
+ HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ // Virtual function in the superclass. Allocate linearly at the start of
+ // the page after current_page (there is assumed to be one).
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
private:
// The space's free list.
OldSpaceFreeList free_list_;
// object in order to know when to move to the next page.
Address mc_end_of_relocation_;
- // Implementation of AllocateRaw. Allocates requested number of bytes using
- // the given allocation information according to the space's current
- // allocation mode.
- Object* AllocateRawInternal(int size_in_bytes, AllocationInfo* alloc_info);
-
- // Slow path of AllocateRaw functions.
- Object* SlowAllocateRaw(int size_in_bytes, AllocationInfo* alloc_info);
-
public:
TRACK_MEMORY("OldSpace")
};
class MapSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
- explicit MapSpace(int max_capacity) : PagedSpace(max_capacity, MAP_SPACE) { }
+ explicit MapSpace(int max_capacity, AllocationSpace id)
+ : PagedSpace(max_capacity, id, false), free_list_(id) { }
// The bytes available on the free list (ie, not above the linear allocation
// pointer).
int AvailableFree() { return free_list_.available(); }
- // The top of allocation in a page in this space.
+ // The top of allocation in a page in this space. Undefined if page is unused.
Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - kPageExtra;
}
- // Allocates requested bytes. May return Failure if the space is full.
- Object* AllocateRaw(int size_in_bytes) {
- ASSERT_OBJECT_SIZE(size_in_bytes);
- return AllocateRawInternal(size_in_bytes, &allocation_info_);
- }
-
- // Allocates requested bytes for object relocation.
- Object* MCAllocateRaw(int size_in_bytes) {
- ASSERT_OBJECT_SIZE(size_in_bytes);
- return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
- }
-
// Give a map-sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
+ protected:
+ // Virtual function in the superclass. Slow path of AllocateRaw.
+ HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ // Virtual function in the superclass. Allocate linearly at the start of
+ // the page after current_page (there is assumed to be one).
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
private:
// The space's free list.
MapSpaceFreeList free_list_;
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex];
- // Implementation of AllocateRaw. Allocates requested bytes using
- // the given allocation information.
- Object* AllocateRawInternal(int size_in_bytes, AllocationInfo* alloc_info);
-
- // Slow path of AllocateRaw functions.
- Object* SlowAllocateRaw(int size_int_bytes, AllocationInfo* alloc_info);
-
public:
TRACK_MEMORY("MapSpace")
};
// object and possibly extra remembered set words) bytes after the object
// area start of that page. The allocated chunk size is set in the output
// parameter chunk_size.
- static LargeObjectChunk* New(int size_in_bytes, size_t* chunk_size);
+ static LargeObjectChunk* New(int size_in_bytes,
+ size_t* chunk_size,
+ bool executable);
// Interpret a raw address as a large object chunk.
static LargeObjectChunk* FromAddress(Address address) {
};
-class LargeObjectSpace {
+class LargeObjectSpace : public Space {
friend class LargeObjectIterator;
public:
- LargeObjectSpace();
+ explicit LargeObjectSpace(AllocationSpace id, bool executable);
// Initializes internal data structures.
bool Setup();
// ECMA-262 section 15.5.4.5
%AddProperty($String.prototype, "charCodeAt", function(pos) {
+ var fast_answer = %_FastCharCodeAt(this, pos);
+ if (%_IsSmi(fast_answer)) {
+ return fast_answer;
+ }
var subject = ToString(this);
var index = TO_INTEGER(pos);
return %StringCharCodeAt(subject, index);
HandleScope scope;
// Enter the JS frame but don't add additional arguments.
- __ EnterJSFrame(0, 0);
+ __ EnterJSFrame(0);
// Push the function on the stack and call the runtime function.
__ Push(MemOperand(pp, 0));
__ mov(r1, Operand(r0));
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
- __ ExitJSFrame(DO_NOT_RETURN, 0);
+ __ ExitJSFrame(DO_NOT_RETURN);
// Do a tail-call of the compiled function.
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
// the stub cache only contains monomorphic stubs. Make sure that
// the bits are the least significant so they will be the ones
// masked out.
- ASSERT(Code::ExtractStateFromFlags(flags) == MONOMORPHIC);
- ASSERT(Code::kFlagsStateShift == 0);
+ ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
+ ASSERT(Code::kFlagsICStateShift == 0);
// Make sure that the code type is not included in the hash.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
bool Top::is_break() {
ExecutionAccess access;
- return is_break_no_lock();
-}
-
-
-bool Top::is_break_no_lock() {
return break_id_ != 0;
}
+
StackFrame::Id Top::break_frame_id() {
ExecutionAccess access;
return break_frame_id_;
static void set_break(StackFrame::Id break_frame_id, int break_id);
static bool check_break(int break_id);
static bool is_break();
- static bool is_break_no_lock();
static StackFrame::Id break_frame_id();
static int break_id();
}
+StringBuilder::StringBuilder(int size) {
+ buffer_ = NewArray<char>(size);
+ size_ = size;
+ position_ = 0;
+}
+
+
+void StringBuilder::AddString(const char* s) {
+ AddSubstring(s, strlen(s));
+}
+
+
+void StringBuilder::AddSubstring(const char* s, int n) {
+ ASSERT(!is_finalized() && position_ + n < size_);
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ memcpy(&buffer_[position_], s, n * kCharSize);
+ position_ += n;
+}
+
+
+void StringBuilder::AddFormatted(const char* format, ...) {
+ ASSERT(!is_finalized() && position_ < size_);
+ va_list args;
+ va_start(args, format);
+ int remaining = size_ - position_;
+ int n = OS::VSNPrintF(&buffer_[position_], remaining, format, args);
+ va_end(args);
+ if (n < 0 || n >= remaining) {
+ position_ = size_;
+ } else {
+ position_ += n;
+ }
+}
+
+
+void StringBuilder::AddPadding(char c, int count) {
+ for (int i = 0; i < count; i++) {
+ AddCharacter(c);
+ }
+}
+
+
+char* StringBuilder::Finalize() {
+ ASSERT(!is_finalized() && position_ < size_);
+ buffer_[position_] = '\0';
+ // Make sure nobody managed to add a 0-character to the
+ // buffer while building the string.
+ ASSERT(strlen(buffer_) == static_cast<size_t>(position_));
+ position_ = -1;
+ ASSERT(is_finalized());
+ return buffer_;
+}
+
} } // namespace v8::internal
template <typename T>
class Vector {
public:
+ Vector() : start_(NULL), length_(0) {}
Vector(T* data, int length) : start_(data), length_(length) {
ASSERT(length == 0 || (length > 0 && data != NULL));
}
// Releases the array underlying this vector. Once disposed the
// vector is empty.
void Dispose() {
+ if (is_empty()) return;
DeleteArray(start_);
start_ = NULL;
length_ = 0;
};
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+class StringBuilder {
+ public:
+ // Create a string builder with a buffer of the given size. The
+ // buffer is allocated through NewArray<char> and must be
+ // deallocated by the caller of Finalize().
+ explicit StringBuilder(int size);
+
+ StringBuilder(char* buffer, int size)
+ : buffer_(buffer), size_(size), position_(0) { }
+
+ ~StringBuilder() { if (!is_finalized()) Finalize(); }
+
+ int size() const { return size_; }
+
+ // Get the current position in the builder.
+ int position() const {
+ ASSERT(!is_finalized());
+ return position_;
+ }
+
+ // Reset the position.
+ void Reset() { position_ = 0; }
+
+ // Add a single character to the builder. It is not allowed to add
+ // 0-characters; use the Finalize() method to terminate the string
+ // instead.
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(!is_finalized() && position_ < size_);
+ buffer_[position_++] = c;
+ }
+
+ // Add an entire string to the builder. Uses strlen() internally to
+ // compute the length of the input string.
+ void AddString(const char* s);
+
+ // Add the first 'n' characters of the given string 's' to the
+ // builder. The input string must have enough characters.
+ void AddSubstring(const char* s, int n);
+
+ // Add formatted contents to the builder just like printf().
+ void AddFormatted(const char* format, ...);
+
+ // Add character padding to the builder. If count is non-positive,
+ // nothing is added to the builder.
+ void AddPadding(char c, int count);
+
+ // Finalize the string by 0-terminating it and returning the buffer.
+ char* Finalize();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+
+ char* buffer_;
+ int size_;
+ int position_;
+
+ bool is_finalized() const { return position_ < 0; }
+};
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
-# Copyright 2006-2008 Google Inc. All Rights Reserved.
+# Copyright 2006 Google Inc. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Note that we could easily compress the scripts mode but don't
# since we want it to remain readable.
lines = re.sub('//.*\n', '\n', lines) # end-of-line comments
- lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
+ lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
return lines
return string
-def MakeVersion(source, target):
- TEMPLATE = """
- #include "v8.h"
-
- void v8::V8::GetVersion(v8::VersionInfo *info) {
- info->major = %(major)s;
- info->minor = %(minor)s;
- info->build_major = %(build_major)s;
- info->build_minor = %(build_minor)s;
- info->revision = %(revision)s;
- }
-"""
- PATTERN = re.compile('\$[a-zA-Z]+:\s*([0-9]+)\s*\$')
- def VersionToInt(str):
- match = PATTERN.match(str)
- if match: return match.group(1)
- else: return str
- config = LoadConfigFrom(source)
- map = { }
- for key, value in config.items('VERSION'):
- map[key] = VersionToInt(value)
- output = TEMPLATE % map
- file = open(target, "w")
- file.write(output)
- file.close()
-
-
def ExpandConstants(lines, constants):
for key, value in constants.items():
lines = lines.replace(key, str(value))
return lines
+
def ExpandMacros(lines, macros):
for name, macro in macros.items():
start = lines.find(name, 0)
if delay: id = id[:-6]
if delay:
delay_ids.append((id, len(lines)))
- source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
else:
ids.append((id, len(lines)))
- source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
+ source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
# Build delay support functions
get_index_cases = [ ]