--- /dev/null
+2008-07-03: Version 0.1.0 (125876)
+
+ Initial export.
+
--- /dev/null
+This license applies to all parts of V8 that are not externally
+maintained libraries. The externally maintained libraries used by V8
+are:
+
+ - Jscre, located under third_party/jscre. This code is copyrighted
+ by the University of Cambridge and Apple Inc. and released under a
+ 2-clause BSD license.
+
+ - Dtoa, located under third_party/dtoa. This code is copyrighted by
+ David M. Gay and released under an MIT license.
+
+ - Strongtalk assembler, the basis of the files assembler-arm-inl.h,
+ assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h,
+ assembler-ia32.cc, assembler-ia32.h, assembler.cc and assembler.h.
+ This code is copyrighted by Sun Microsystems Inc. and released
+ under a 3-clause BSD license.
+
+These libraries have their own licenses; we recommend you read them,
+as their terms may differ from the terms below.
+
+Copyright 2006-2008, Google Inc. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+# Copyright 2008 Google Inc. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import platform
+import sys
+from os.path import join, dirname, abspath
+root_dir = dirname(File('SConstruct').rfile().abspath)
+sys.path.append(join(root_dir, 'tools'))
+import js2c
+
+
+def Abort(message):
+ print message
+ sys.exit(1)
+
+
+def GuessOS():
+ id = platform.system()
+ if id == 'Linux':
+ return 'linux'
+ elif id == 'Darwin':
+ return 'macos'
+ elif id == 'Windows':
+ return 'win32'
+ else:
+ Abort("Don't know how to build v8 for OS '%s'." % id)
+
+
+def GuessProcessor():
+ id = platform.machine()
+ if id.startswith('arm'):
+ return 'arm'
+ elif (not id) or id.startswith('x86'):
+ return 'ia32'
+ else:
+ Abort("Don't know how to build v8 for processor '%s'." % id)
+
+
+def GuessToolchain(os):
+ tools = Environment()['TOOLS']
+ if 'gcc' in tools:
+ if os == 'macos' and 'Kernel Version 8' in platform.version():
+ return 'gcc-darwin'
+ else:
+ return 'gcc'
+ elif 'msvc' in tools:
+ return 'msvc'
+ else:
+ tools = ', '.join(tools)
+ Abort("Don't know how to build v8 using these tools: %s" % tools)
+
+
+def GetOptions():
+ result = Options()
+ os_guess = GuessOS()
+ toolchain_guess = GuessToolchain(os_guess)
+ processor_guess = GuessProcessor()
+ result.Add('mode', 'debug or release', 'release')
+ result.Add('toolchain', 'the toolchain to use (gcc, gcc-darwin or msvc)', toolchain_guess)
+ result.Add('os', 'the os to build for (linux, macos or win32)', os_guess)
+ result.Add('processor', 'the processor to build for (arm or ia32)', processor_guess)
+ result.Add('snapshot', 'build using snapshots for faster start-up (on, off)', 'off')
+ result.Add('library', 'which type of library to produce (static, shared, default)', 'default')
+ return result
+
+
+def VerifyOptions(env):
+ if not env['mode'] in ['debug', 'release']:
+ Abort("Unknown build mode '%s'." % env['mode'])
+ if not env['toolchain'] in ['gcc', 'gcc-darwin', 'msvc']:
+ Abort("Unknown toolchain '%s'." % env['toolchain'])
+ if not env['os'] in ['linux', 'macos', 'win32']:
+ Abort("Unknown os '%s'." % env['os'])
+ if not env['processor'] in ['arm', 'ia32']:
+ Abort("Unknown processor '%s'." % env['processor'])
+ if not env['snapshot'] in ['on', 'off']:
+ Abort("Illegal value for option snapshot: '%s'." % env['snapshot'])
+ if not env['library'] in ['static', 'shared', 'default']:
+ Abort("Illegal value for option library: '%s'." % env['library'])
+
+
+def Start():
+ opts = GetOptions()
+ env = Environment(options=opts)
+ Help(opts.GenerateHelpText(env))
+ VerifyOptions(env)
+
+ os = env['os']
+ arch = env['processor']
+ toolchain = env['toolchain']
+ mode = env['mode']
+ use_snapshot = (env['snapshot'] == 'on')
+ library_type = env['library']
+
+ env.SConscript(
+ join('src', 'SConscript'),
+ build_dir=mode,
+ exports='toolchain arch os mode use_snapshot library_type',
+ duplicate=False
+ )
+
+
+Start()
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef _V8_DEBUG
+#define _V8_DEBUG
+
+#include "v8.h"
+
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define EXPORT __attribute__((visibility("default")))
+#else
+#define EXPORT
+#endif
+
+/**
+ * Debugger support for the V8 JavaScript engine.
+ */
+namespace v8 {
+
+// Debug events which can occour in the V8 JavaScript engine.
+enum DebugEvent {
+ Break = 1,
+ Exception = 2,
+ NewFunction = 3,
+ BeforeCompile = 4,
+ AfterCompile = 5,
+ PendingRequestProcessed = 6
+};
+
+
+/**
+ * Debug event callback function.
+ *
+ * \param event the debug event from which occoured (from the DebugEvent
+ * enumeration)
+ * \param exec_state execution state (JavaScript object)
+ * \param event_data event specific data (JavaScript object)
+ * \param data value passed by the user to AddDebugEventListener
+ */
+typedef void (*DebugEventCallback)(DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ Handle<Value> data);
+
+
+/**
+ * Debug message callback function.
+ *
+ * \param message the debug message
+ * \param length length of the message
+ */
+typedef void (*DebugMessageHandler)(const uint16_t* message, int length,
+ void* data);
+
+
+class EXPORT Debug {
+ public:
+ // Add a C debug event listener.
+ static bool AddDebugEventListener(DebugEventCallback that,
+ Handle<Value> data = Handle<Value>());
+
+ // Add a JavaScript debug event listener.
+ static bool AddDebugEventListener(v8::Handle<v8::Function> that,
+ Handle<Value> data = Handle<Value>());
+
+ // Remove a C debug event listener.
+ static void RemoveDebugEventListener(DebugEventCallback that);
+
+ // Remove a JavaScript debug event listener.
+ static void RemoveDebugEventListener(v8::Handle<v8::Function> that);
+
+ // Generate a stack dump.
+ static void StackDump();
+
+ // Break execution of JavaScript.
+ static void DebugBreak();
+
+ // Message based interface. The message protocol is JSON.
+ static void SetMessageHandler(DebugMessageHandler handler, void* data = NULL);
+ static void SendCommand(const uint16_t* command, int length);
+};
+
+
+} // namespace v8
+
+
+#undef EXPORT
+
+
+#endif // _V8_DEBUG
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/** \mainpage V8 API Reference Guide
+
+ Add text to introduce,
+
+ point back to code.google.com/apis/v8/index.html
+
+ etc etc etc
+ */
+#ifndef _V8
+#define _V8
+
+#include <stdio.h>
+
+#ifdef _WIN32
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef unsigned short uint16_t;
+typedef long long int64_t;
+#else
+#include <stdint.h>
+#endif
+
+/**
+ * The v8 javascript engine.
+ */
+namespace v8 {
+
+class Context;
+class String;
+class Value;
+class Utils;
+class Number;
+class Object;
+class Array;
+class Int32;
+class Uint32;
+class External;
+class Primitive;
+class Boolean;
+class Integer;
+class Function;
+class Date;
+class ImplementationUtilities;
+class Signature;
+template <class T> class Handle;
+template <class T> class Local;
+template <class T> class Persistent;
+class FunctionTemplate;
+class ObjectTemplate;
+class Data;
+
+
+// --- W e a k H a n d l e s
+
+
+/**
+ * A weak reference callback function.
+ *
+ * \param object the weak global object to be reclaimed by the garbage collector
+ * \param parameter the value passed in when making the weak global object
+ */
+typedef void (*WeakReferenceCallback)(Persistent<Object> object,
+ void* parameter);
+
+
+// --- H a n d l e s ---
+
+#define TYPE_CHECK(T, S) \
+ while (false) { \
+ *(static_cast<T**>(0)) = static_cast<S*>(0); \
+ }
+
+/**
+ * An object reference managed by the v8 garbage collector.
+ *
+ * All objects returned from v8 have to be tracked by the garbage
+ * collector so that it knows that the objects are still alive. Also,
+ * because the garbage collector may move objects, it is unsafe to
+ * point directly to an object. Instead, all objects are stored in
+ * handles which are known by the garbage collector and updated
+ * whenever an object moves. Handles should always be passed by value
+ * (except in cases like out-parameters) and they should never be
+ * allocated on the heap.
+ *
+ * There are two types of handles: local and persistent handles.
+ * Local handles are light-weight and transient and typically used in
+ * local operations. They are managed by HandleScopes. Persistent
+ * handles can be used when storing objects across several independent
+ * operations and have to be explicitly deallocated when they're no
+ * longer used.
+ *
+ * It is safe to extract the object stored in the handle by
+ * dereferencing the handle (for instance, to extract the Object* from
+ * an Handle<Object>); the value will still be governed by a handle
+ * behind the scenes and the same rules apply to these values as to
+ * their handles.
+ */
+template <class T> class Handle {
+ public:
+
+ /**
+ * Creates an empty handle.
+ */
+ Handle();
+
+ /**
+ * Creates a new handle for the specified value.
+ */
+ explicit Handle(T* val) : val_(val) { }
+
+ /**
+ * Creates a handle for the contents of the specified handle. This
+ * constructor allows you to pass handles as arguments by value and
+ * assign between handles. However, if you try to assign between
+ * incompatible handles, for instance from a Handle<String> to a
+ * Handle<Number> it will cause a compiletime error. Assigning
+ * between compatible handles, for instance assigning a
+ * Handle<String> to a variable declared as Handle<Value>, is legal
+ * because String is a subclass of Value.
+ */
+ template <class S> inline Handle(Handle<S> that)
+ : val_(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+
+ /**
+ * Returns true if the handle is empty.
+ */
+ bool IsEmpty() { return val_ == 0; }
+
+ T* operator->();
+
+ T* operator*();
+
+ /**
+ * Sets the handle to be empty. IsEmpty() will then return true.
+ */
+ void Clear() { this->val_ = 0; }
+
+ /**
+ * Checks whether two handles are the same.
+ * Returns true if both are empty, or if the objects
+ * to which they refer are identical.
+ * The handles' references are not checked.
+ */
+ template <class S> bool operator==(Handle<S> that) {
+ void** a = reinterpret_cast<void**>(**this);
+ void** b = reinterpret_cast<void**>(*that);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+
+ /**
+ * Checks whether two handles are different.
+ * Returns true if only one of the handles is empty, or if
+ * the objects to which they refer are different.
+ * The handles' references are not checked.
+ */
+ template <class S> bool operator!=(Handle<S> that) {
+ return !operator==(that);
+ }
+
+ template <class S> static inline Handle<T> Cast(Handle<S> that) {
+ if (that.IsEmpty()) return Handle<T>();
+ return Handle<T>(T::Cast(*that));
+ }
+
+ private:
+ T* val_;
+};
+
+
+/**
+ * A light-weight stack-allocated object handle. All operations
+ * that return objects from within v8 return them in local handles. They
+ * are created within HandleScopes, and all local handles allocated within a
+ * handle scope are destroyed when the handle scope is destroyed. Hence it
+ * is not necessary to explicitly deallocate local handles.
+ */
+template <class T> class Local : public Handle<T> {
+ public:
+ Local();
+ template <class S> inline Local(Local<S> that)
+ : Handle<T>(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+ template <class S> inline Local(S* that) : Handle<T>(that) { }
+ template <class S> static inline Local<T> Cast(Local<S> that) {
+ if (that.IsEmpty()) return Local<T>();
+ return Local<T>(T::Cast(*that));
+ }
+
+ /** Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
+ */
+ static Local<T> New(Handle<T> that);
+};
+
+
+/**
+ * An object reference that is independent of any handle scope. Where
+ * a Local handle only lives as long as the HandleScope where it was
+ * allocated, a Persistent handle remains valid until it is explicitly
+ * disposed.
+ *
+ * A persistent handle contains a reference to a storage cell within
+ * the v8 engine which holds an object value and which is updated by
+ * the garbage collector whenever the object is moved. A new storage
+ * cell can be created using Persistent::New and existing handles can
+ * be disposed using Persistent::Dispose. Since persistent handles
+ * are passed by value you may have many persistent handle objects
+ * that point to the same storage cell. For instance, if you pass a
+ * persistent handle as an argument to a function you will not get two
+ * different storage cells but rather two references to the same
+ * storage cell.
+ */
+template <class T> class Persistent : public Handle<T> {
+ public:
+
+ /**
+ * Creates an empty persistent handle that doesn't point to any
+ * storage cell.
+ */
+ Persistent();
+
+ /**
+ * Creates a persistent handle for the same storage cell as the
+ * specified handle. This constructor allows you to pass persistent
+ * handles as arguments by value and to assign between persistent
+ * handles. However, if you try to assign between incompatible
+ * persistent handles, for instance from a Persistent<String> to a
+ * Persistent<Number> it will cause a compiletime error. Assigning
+ * between compatible persistent handles, for instance assigning a
+ * Persistent<String> to a variable declared as Persistent<Value>,
+ * is legal because String is a subclass of Value.
+ */
+ template <class S> inline Persistent(Persistent<S> that)
+ : Handle<T>(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+
+ template <class S> inline Persistent(S* that) : Handle<T>(that) { }
+
+ template <class S> explicit inline Persistent(Handle<S> that)
+ : Handle<T>(*that) { }
+
+ template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
+ if (that.IsEmpty()) return Persistent<T>();
+ return Persistent<T>(T::Cast(*that));
+ }
+
+ /**
+ * Creates a new persistent handle for an existing (local or
+ * persistent) handle.
+ */
+ static Persistent<T> New(Handle<T> that);
+
+ /**
+ * Releases the storage cell referenced by this persistent handle.
+ * Does not remove the reference to the cell from any handles.
+ * This handle's reference, and any any other references to the storage
+ * cell remain and IsEmpty will still return false.
+ */
+ void Dispose();
+
+ /**
+ * Make the reference to this object weak. When only weak handles
+ * refer to the object, the garbage collector will perform a
+ * callback to the given V8::WeakReferenceCallback function, passing
+ * it the object reference and the given parameters.
+ */
+ void MakeWeak(void* parameters, WeakReferenceCallback callback);
+
+ /** Clears the weak reference to this object.*/
+ void ClearWeak();
+
+ /**
+ *Checks if the handle holds the only reference to an object.
+ */
+ bool IsNearDeath();
+
+ /**
+ * Returns true if the handle's reference is weak.
+ */
+ bool IsWeak();
+
+ private:
+ friend class ImplementationUtilities;
+ friend class ObjectTemplate;
+};
+
+
+/**
+ * A stack-allocated class that governs a number of local handles.
+ * After a handle scope has been created, all local handles will be
+ * allocated within that handle scope until either the handle scope is
+ * deleted or another handle scope is created. If there is already a
+ * handle scope and a new one is created, all allocations will take
+ * place in the new handle scope until that is deleted. After that,
+ * new handles will again be allocated in the original handle scope.
+ *
+ * After the handle scope of a local handle has been deleted the
+ * garbage collector will no longer track the object stored in the
+ * handle and may deallocate it. The behavior of accessing a handle
+ * for which the handle scope has been deleted is undefined.
+ */
+class HandleScope {
+ public:
+ HandleScope() : previous_(current_), is_closed_(false) {
+ current_.extensions = 0;
+ }
+
+ ~HandleScope() {
+ // TODO(1245391): In a perfect world, there would be a way of not
+ // having to check for expl icitly closed scopes maybe through
+ // subclassing HandleScope?
+ if (!is_closed_) RestorePreviousState();
+ }
+
+ /**
+ * TODO(1245391): Consider introducing a subclass for this.
+ * Closes the handle scope and returns the value as a handle in the
+ * previous scope, which is the new current scope after the call.
+ */
+ template <class T> Local<T> Close(Handle<T> value);
+
+ /**
+ * Counts the number of allocated handles.
+ */
+ static int NumberOfHandles();
+
+ /**
+ * Creates a new handle with the given value.
+ */
+ static void** CreateHandle(void* value);
+
+ private:
+ // Make it impossible to create heap-allocated or illegal handle
+ // scopes by disallowing certain operations.
+ HandleScope(const HandleScope&);
+ void operator=(const HandleScope&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+
+ class Data {
+ public:
+ int extensions;
+ void** next;
+ void** limit;
+ inline void Initialize() {
+ extensions = -1;
+ next = limit = NULL;
+ }
+ };
+
+ static Data current_;
+ const Data previous_;
+
+ /**
+ * Re-establishes the previous scope state. Should not be called for
+ * any other scope than the current scope and not more than once.
+ */
+ void RestorePreviousState() {
+ if (current_.extensions > 0) DeleteExtensions();
+ current_ = previous_;
+#ifdef DEBUG
+ ZapRange(current_.next, current_.limit);
+#endif
+ }
+
+ // TODO(1245391): Consider creating a subclass for this.
+ bool is_closed_;
+ void** RawClose(void** value);
+
+ /** Deallocates any extensions used by the current scope.*/
+ static void DeleteExtensions();
+
+#ifdef DEBUG
+ // Zaps the handles in the half-open interval [start, end).
+ static void ZapRange(void** start, void** end);
+#endif
+
+ friend class ImplementationUtilities;
+};
+
+
+// --- S p e c i a l o b j e c t s ---
+
+
+/**
+ * The superclass of values and API object templates.
+ */
+class Data {
+ private:
+ Data();
+};
+
+
+/**
+ * Pre-compilation data that can be associated with a script. This
+ * data can be calculated for a script in advance of actually
+ * compiling it, and stored between compilations. When script data
+ * is given to the compile method compilation will be faster.
+ */
+class ScriptData {
+ public:
+ virtual ~ScriptData() { }
+ static ScriptData* PreCompile(const char* input, int length);
+ static ScriptData* New(unsigned* data, int length);
+
+ virtual int Length() = 0;
+ virtual unsigned* Data() = 0;
+};
+
+
+/**
+ * The origin, within a file, of a script.
+ */
+class ScriptOrigin {
+ public:
+ ScriptOrigin(Handle<String> resource_name,
+ Handle<Integer> resource_line_offset = Handle<Integer>(),
+ Handle<Integer> resource_column_offset = Handle<Integer>())
+ : resource_name_(resource_name),
+ resource_line_offset_(resource_line_offset),
+ resource_column_offset_(resource_column_offset) { }
+ inline Handle<String> ResourceName();
+ inline Handle<Integer> ResourceLineOffset();
+ inline Handle<Integer> ResourceColumnOffset();
+ private:
+ Handle<String> resource_name_;
+ Handle<Integer> resource_line_offset_;
+ Handle<Integer> resource_column_offset_;
+};
+
+
+/**
+ * A compiled javascript script.
+ */
+class Script {
+ public:
+
+ /**
+ * Compiles the specified script. The ScriptOrigin* and ScriptData*
+ * parameters are owned by the caller of Script::Compile. No
+ * references to these objects are kept after compilation finishes.
+ */
+ static Local<Script> Compile(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL);
+
+ Local<Value> Run();
+};
+
+
+/**
+ * An error message.
+ */
+class Message {
+ public:
+ Local<String> Get();
+ Local<Value> GetSourceLine();
+
+ // TODO(1241256): Rewrite (or remove) this method. We don't want to
+ // deal with ownership of the returned string and we want to use
+ // javascript data structures exclusively.
+ char* GetUnderline(char* source_line, char underline_char);
+
+ Handle<String> GetScriptResourceName();
+
+ // TODO(1240903): Remove this when no longer used in WebKit V8
+ // bindings.
+ Handle<Value> GetSourceData();
+
+ int GetLineNumber();
+
+ // TODO(1245381): Print to a string instead of on a FILE.
+ static void PrintCurrentStackTrace(FILE* out);
+};
+
+
+// --- V a l u e ---
+
+
+/**
+ * The superclass of all javascript values and objects.
+ */
+class Value : public Data {
+ public:
+
+ /**
+ * Returns true if this value is the undefined value. See ECMA-262
+ * 4.3.10.
+ */
+ bool IsUndefined();
+
+ /**
+ * Returns true if this value is the null value. See ECMA-262
+ * 4.3.11.
+ */
+ bool IsNull();
+
+ /**
+ * Returns true if this value is true.
+ */
+ bool IsTrue();
+
+ /**
+ * Returns true if this value is false.
+ */
+ bool IsFalse();
+
+ /**
+ * Returns true if this value is an instance of the String type.
+ * See ECMA-262 8.4.
+ */
+ bool IsString();
+
+ /**
+ * Returns true if this value is a function.
+ */
+ bool IsFunction();
+
+ /**
+ * Returns true if this value is an array.
+ */
+ bool IsArray();
+
+ /**
+ * Returns true if this value is an object.
+ */
+ bool IsObject();
+
+ /**
+ * Returns true if this value is boolean.
+ */
+ bool IsBoolean();
+
+ /**
+ * Returns true if this value is a number.
+ */
+ bool IsNumber();
+
+ /**
+ * Returns true if this value is external.
+ */
+ bool IsExternal();
+
+ /**
+ * Returns true if this value is a 32-bit signed integer.
+ */
+ bool IsInt32();
+
+ Local<Boolean> ToBoolean();
+ Local<Number> ToNumber();
+ Local<String> ToString();
+ Local<String> ToDetailString();
+ Local<Object> ToObject();
+ Local<Integer> ToInteger();
+ Local<Uint32> ToUint32();
+ Local<Int32> ToInt32();
+
+ /**
+ * Attempts to convert a string to an array index.
+ * Returns an empty handle if the conversion fails.
+ */
+ Local<Uint32> ToArrayIndex();
+
+ bool BooleanValue();
+ double NumberValue();
+ int64_t IntegerValue();
+ uint32_t Uint32Value();
+ int32_t Int32Value();
+
+ /** JS == */
+ bool Equals(Handle<Value> that);
+ bool StrictEquals(Handle<Value> that);
+};
+
+
+/**
+ * The superclass of primitive values. See ECMA-262 4.3.2.
+ */
+class Primitive : public Value { };
+
+
+/**
+ * A primitive boolean value (ECMA-262, 4.3.14). Either the true
+ * or false value.
+ */
+class Boolean : public Primitive {
+ public:
+ bool Value();
+ static inline Handle<Boolean> New(bool value);
+};
+
+
+/**
+ * A javascript string value (ECMA-262, 4.3.17).
+ */
+class String : public Primitive {
+ public:
+ int Length();
+
+ /**
+ * Write the contents of the string to an external buffer.
+ * If no arguments are given, expects that buffer is large
+ * enough to hold the entire string and NULL terminator. Copies
+ * the contents of the string and the NULL terminator into
+ * buffer.
+ *
+ * Copies up to length characters into the output buffer.
+ * Only null-terminates if there is enough space in the buffer.
+ *
+ * \param buffer The buffer into which the string will be copied.
+ * \param start The starting position within the string at which
+ * copying begins.
+ * \param length The number of bytes to copy from the string.
+ * \return The number of characters copied to the buffer
+ * excluding the NULL terminator.
+ */
+ int Write(uint16_t* buffer, int start = 0, int length = -1); // UTF-16
+ int WriteAscii(char* buffer,
+ int start = 0,
+ int length = -1); // literally ascii
+
+ /**
+ * Returns true if the string is external
+ */
+ bool IsExternal();
+
+ /**
+ * Returns true if the string is both external and ascii
+ */
+ bool IsExternalAscii();
+ /**
+ * An ExternalStringResource is a wrapper around a two-byte string
+ * buffer that resides outside the V8's heap. Implement an
+ * ExternalStringResource to manage the life cycle of the underlying
+ * buffer.
+ */
+ class ExternalStringResource {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ virtual ~ExternalStringResource() {}
+ /** The string data from the underlying buffer.*/
+ virtual const uint16_t* data() const = 0;
+ /** The length of the string. That is, the number of two-byte characters.*/
+ virtual size_t length() const = 0;
+ protected:
+ ExternalStringResource() {}
+ private:
+ ExternalStringResource(const ExternalStringResource&);
+ void operator=(const ExternalStringResource&);
+ };
+
+ /**
+ * An ExternalAsciiStringResource is a wrapper around an ascii
+ * string buffer that resides outside V8's heap. Implement an
+ * ExternalAsciiStringResource to manage the life cycle of the
+ * underlying buffer.
+ */
+
+ class ExternalAsciiStringResource {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ virtual ~ExternalAsciiStringResource() {}
+ /** The string data from the underlying buffer.*/
+ virtual const char* data() const = 0;
+ /** The number of ascii characters in the string.*/
+ virtual size_t length() const = 0;
+ protected:
+ ExternalAsciiStringResource() {}
+ private:
+ ExternalAsciiStringResource(const ExternalAsciiStringResource&);
+ void operator=(const ExternalAsciiStringResource&);
+ };
+
+ /**
+ * Get the ExternalStringResource for an external string. Only
+ * valid if IsExternal() returns true.
+ */
+ ExternalStringResource* GetExternalStringResource();
+
+ /**
+ * Get the ExternalAsciiStringResource for an external ascii string.
+ * Only valid if IsExternalAscii() returns true.
+ */
+ ExternalAsciiStringResource* GetExternalAsciiStringResource();
+
+ static String* Cast(v8::Value* obj);
+
+ /**
+ * Allocates a new string from either utf-8 encoded or ascii data.
+ * The second parameter 'length' gives the buffer length.
+ * If the data is utf-8 encoded, the caller must
+ * be careful to supply the length parameter.
+ * If it is not given, the function calls
+ * 'strlen' to determine the buffer length, it might be
+ * wrong if '\0' character is in the 'data'.
+ */
+ static Local<String> New(const char* data, int length = -1);
+
+ /** Allocates a new string from utf16 data.*/
+ static Local<String> New(const uint16_t* data, int length = -1);
+
+ /** Creates a symbol. Returns one if it exists already.*/
+ static Local<String> NewSymbol(const char* data, int length = -1);
+
+ /**
+ * Creates a new external string using the data defined in the given
+ * resource. The resource is deleted when the external string is no
+ * longer live on V8's heap. The caller of this function should not
+ * delete or modify the resource. Neither should the underlying buffer be
+ * deallocated or modified except through the destructor of the
+ * external string resource.
+ */
+ static Local<String> NewExternal(ExternalStringResource* resource);
+
+ /**
+ * Creates a new external string using the ascii data defined in the given
+ * resource. The resource is deleted when the external string is no
+ * longer live on V8's heap. The caller of this function should not
+ * delete or modify the resource. Neither should the underlying buffer be
+ * deallocated or modified except through the destructor of the
+ * external string resource.
+ */
+ static Local<String> NewExternal(ExternalAsciiStringResource* resource);
+
+ /** Creates an undetectable string from the supplied character.*/
+ static Local<String> NewUndetectable(const char* data, int length = -1);
+
+ /** Creates an undetectable string from the supplied unsigned integer.*/
+ static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
+
+ /**
+ * Converts an object to an ascii string.
+ * Useful if you want to print the object.
+ */
+ class AsciiValue {
+ public:
+ explicit AsciiValue(Handle<v8::Value> obj);
+ ~AsciiValue();
+ char* operator*() { return str_; }
+ private:
+ char* str_;
+ };
+
+ /**
+ * Converts an object to a two-byte string.
+ */
+ class Value {
+ public:
+ explicit Value(Handle<v8::Value> obj);
+ ~Value();
+ uint16_t* operator*() { return str_; }
+ private:
+ uint16_t* str_;
+ };
+};
+
+
+/**
+ * A javascript number value (ECMA-262, 4.3.20)
+ */
+class Number : public Primitive {
+ public:
+ double Value();
+ static Local<Number> New(double value);
+ static Number* Cast(v8::Value* obj);
+ private:
+ Number();
+};
+
+
+/**
+ * A javascript value representing a signed integer.
+ */
+class Integer : public Number {
+ public:
+ static Local<Integer> New(int32_t value);
+ int64_t Value();
+ static Integer* Cast(v8::Value* obj);
+ private:
+ Integer();
+};
+
+
+/**
+ * A javascript value representing a 32-bit signed integer.
+ */
+class Int32 : public Integer {
+ public:
+ int32_t Value();
+ private:
+ Int32();
+};
+
+
+/**
+ * A javascript value representing a 32-bit unsigned integer.
+ */
+class Uint32 : public Integer {
+ public:
+ uint32_t Value();
+ private:
+ Uint32();
+};
+
+
+/**
+ * An instance of the built-in Date constructor (ECMA-262, 15.9).
+ */
+class Date : public Value {
+ public:
+ static Local<Value> New(double time);
+};
+
+
+enum PropertyAttribute {
+ None = 0,
+ ReadOnly = 1 << 0,
+ DontEnum = 1 << 1,
+ DontDelete = 1 << 2
+};
+
+/**
+ * A javascript object (ECMA-262, 4.3.3)
+ */
+class Object : public Value {
+ public:
+ bool Set(Handle<Value> key,
+ Handle<Value> value,
+ PropertyAttribute attribs = None);
+ Local<Value> Get(Handle<Value> key);
+
+ // TODO(1245389): Replace the type-specific versions of these
+ // functions with generic ones that accept a Handle<Value> key.
+ bool Has(Handle<String> key);
+ bool Delete(Handle<String> key);
+ bool Has(uint32_t index);
+ bool Delete(uint32_t index);
+
+ /**
+ * Get the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ Local<Value> GetPrototype();
+
+ /**
+ * Call builtin Object.prototype.toString on this object.
+ * This is different from Value::ToString() that may call
+ * user-defined toString function. This one does not.
+ */
+ Local<String> ObjectProtoToString();
+
+ // TODO(1245384): Naming, consistent.
+ int InternalFieldCount();
+ Local<Value> GetInternal(int index);
+ void SetInternal(int index, Handle<Value> value);
+
+ // Testers for local properties.
+ bool HasRealNamedProperty(Handle<String> key);
+ bool HasRealIndexedProperty(uint32_t index);
+ bool HasRealNamedCallbackProperty(Handle<String> key);
+
+ /**
+ * If result.IsEmpty() no real property was located in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ Handle<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+
+ /** Tests for a named lookup interceptor.*/
+ bool HasNamedLookupInterceptor();
+
+ /** Tests for an index lookup interceptor.*/
+ bool HasIndexedLookupInterceptor();
+
+
+ static Local<Object> New();
+ static Object* Cast(Value* obj);
+ private:
+ Object();
+};
+
+
+/**
+ * An instance of the built-in array constructor (ECMA-262, 15.4.2).
+ */
+class Array : public Object {
+ public:
+ uint32_t Length();
+
+ static Local<Array> New(int length = 0);
+ static Array* Cast(Value* obj);
+ private:
+ Array();
+};
+
+
+/**
+ * A javascript function object (ECMA-262, 15.3).
+ */
+class Function : public Object {
+ public:
+ Local<Object> NewInstance();
+ Local<Object> NewInstance(int argc, Handle<Value> argv[]);
+ Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
+ void SetName(Handle<String> name);
+ Handle<Value> GetName();
+ static Function* Cast(Value* obj);
+ private:
+ Function();
+};
+
+
+/**
+ * A javascript value that wraps a c++ void*. This type of value is
+ * mainly used to associate c++ data structures with javascript
+ * objects.
+ */
+class External : public Value {
+ public:
+ static Local<External> New(void* value);
+ static External* Cast(Value* obj);
+ void* Value();
+ private:
+ External();
+};
+
+
+// --- T e m p l a t e s ---
+
+
+/**
+ * The superclass of object and function templates.
+ */
+class Template : public Data {
+ public:
+ /** Adds a property to each instance created by this template.*/
+ void Set(Handle<String> name, Handle<Data> value,
+ PropertyAttribute attributes = None);
+ inline void Set(const char* name, Handle<Data> value);
+ private:
+ Template();
+
+ friend class ObjectTemplate;
+ friend class FunctionTemplate;
+};
+
+
+/**
+ * The argument information given to function call callbacks. This
+ * class provides access to information about context of the call,
+ * including the receiver, the number and values of arguments, and
+ * the holder of the function.
+ */
+class Arguments {
+ public:
+ inline int Length() const;
+ inline Local<Value> operator[](int i) const;
+ inline Local<Function> Callee() const;
+ inline Local<Object> This() const;
+ inline Local<Object> Holder() const;
+ inline bool IsConstructCall() const;
+ inline Local<Value> Data() const;
+ private:
+ Arguments();
+ friend class ImplementationUtilities;
+ inline Arguments(Local<Value> data,
+ Local<Object> holder,
+ Local<Function> callee,
+ bool is_construct_call,
+ void** values, int length);
+ Local<Value> data_;
+ Local<Object> holder_;
+ Local<Function> callee_;
+ bool is_construct_call_;
+ void** values_;
+ int length_;
+};
+
+
+/**
+ * The information passed to an accessor callback about the context
+ * of the property access.
+ */
+class AccessorInfo {
+ public:
+ inline AccessorInfo(Local<Object> self,
+ Local<Value> data,
+ Local<Object> holder)
+ : self_(self), data_(data), holder_(holder) { }
+ inline Local<Value> Data() const;
+ inline Local<Object> This() const;
+ inline Local<Object> Holder() const;
+ private:
+ Local<Object> self_;
+ Local<Value> data_;
+ Local<Object> holder_;
+};
+
+
+typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
+
+typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
+
+/**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See objectTemplate::SetAccessor.
+ */
+typedef Handle<Value> (*AccessorGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+typedef void (*AccessorSetter)(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * NamedProperty[Getter|Setter] are used as interceptors on object.
+ * See ObjectTemplate::SetNamedPropertyHandler.
+ */
+typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is true to indicate the property is found.
+ */
+typedef Handle<Boolean> (*NamedPropertyQuery)(Local<String> property,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * Otherwise, the return value is the value of deleted expression.
+ */
+typedef Handle<Boolean> (*NamedPropertyDeleter)(Local<String> property,
+ const AccessorInfo& info);
+
+/**
+ * TODO(758124): Add documentation?
+ */
+typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
+
+/**
+ * TODO(758124): Add documentation?
+ */
+typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is true to indicate the property is found.
+ */
+typedef Handle<Boolean> (*IndexedPropertyQuery)(uint32_t index,
+ const AccessorInfo& info);
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * Otherwise, the return value is the value of deleted expression.
+ */
+typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
+ const AccessorInfo& info);
+
+
+typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
+
+
+/**
+ * TODO(758124): Clarify documentation? Determines whether host
+ * objects can read or write an accessor? (What does the default
+ * allow? Both or neither?) If a host object needs access check and
+ * the check failed, some properties (accessors created by API) are
+ * still accessible. Such properties have AccessControl to allow read
+ * or write.
+ */
+enum AccessControl {
+ DEFAULT = 0,
+ ALL_CAN_READ = 1,
+ ALL_CAN_WRITE = 2
+};
+
+
+/**
+ * Undocumented security features.
+ */
+enum AccessType {
+ ACCESS_GET,
+ ACCESS_SET,
+ ACCESS_HAS,
+ ACCESS_DELETE,
+ ACCESS_KEYS
+};
+
+typedef bool (*NamedSecurityCallback)(Local<Object> global,
+ Local<Value> key,
+ AccessType type,
+ Local<Value> data);
+
+typedef bool (*IndexedSecurityCallback)(Local<Object> global,
+ uint32_t index,
+ AccessType type,
+ Local<Value> data);
+
+
+/**
+ * TODO(758124): Make sure this documentation is up to date.
+ *
+ * A FunctionTemplate is used to create functions at runtime. There can only be
+ * ONE function created in an environment.
+ *
+ * A FunctionTemplate can have properties, these properties are added to the
+ * function object which it is created.
+ *
+ * A FunctionTemplate has a corresponding instance template which is used to
+ * create object instances when the function used as a constructor. Properties
+ * added to the instance template are added to each object instance.
+ *
+ * A FunctionTemplate can have a prototype template. The prototype template
+ * is used to create the prototype object of the function.
+ *
+ * Following example illustrates relationship between FunctionTemplate and
+ * various pieces:
+ *
+ * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ * t->Set("func_property", v8::Number::New(1));
+ *
+ * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
+ * proto_t->Set("proto_method", v8::FunctionTemplate::New(InvokeCallback));
+ * proto_t->Set("proto_const", v8::Number::New(2));
+ *
+ * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
+ * instance_t->SetAccessor("instance_accessor", InstanceAccessorCallback);
+ * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback, ...);
+ * instance_t->Set("instance_property", Number::New(3));
+ *
+ * v8::Local<v8::Function> function = t->GetFunction();
+ * v8::Local<v8::Object> instance = function->NewInstance();
+ *
+ * Let's use "function" as the JS variable name of the function object
+ * and "instance" for the instance object created above, the following
+ * JavaScript statements hold:
+ *
+ * func_property in function == true
+ * function.func_property == 1
+ *
+ * function.prototype.proto_method() invokes 'callback'
+ * function.prototype.proto_const == 2
+ *
+ * instance instanceof function == true
+ * instance.instance_accessor calls InstanceAccessorCallback
+ * instance.instance_property == 3
+ *
+ *
+ * Inheritance:
+ *
+ * A FunctionTemplate can inherit from another one by calling Inherit method.
+ * Following graph illustrates the semantic of inheritance:
+ *
+ * FunctionTemplate Parent -> Parent() . prototype -> { }
+ * ^ ^
+ * | Inherit(Parent) | .__proto__
+ * | |
+ * FunctionTemplate Child -> Child() . prototype -> { }
+ *
+ * A FunctionTemplate 'Child' inherits from 'Parent', the prototype object
+ * of Child() function has __proto__ pointing to Parent() function's prototype
+ * object. An instance of Child function has all properties on parents'
+ * instance templates.
+ *
+ * Let Parent be the FunctionTemplate initialized in previous section and
+ * create a Child function template by:
+ *
+ * Local<FunctionTemplate> parent = t;
+ * Local<FunctionTemplate> child = FunctionTemplate::New();
+ * child->Inherit(parent);
+ *
+ * Local<Function> child_function = child->GetFunction();
+ * Local<Object> child_instance = child_function->NewInstance();
+ *
+ * The following JS code holds:
+ * child_func.prototype.__proto__ == function.prototype;
+ * child_instance.instance_accessor calls InstanceAccessorCallback
+ * child_instance.instance_property == 3;
+ */
+class FunctionTemplate : public Template {
+ public:
+ /** Creates a function template.*/
+ static Local<FunctionTemplate> New(InvocationCallback callback = 0,
+ Handle<Value> data = Handle<Value>(),
+ Handle<Signature> signature =
+ Handle<Signature>());
+ /** Returns the unique function instance in the current execution context.*/
+ Local<Function> GetFunction();
+
+ void SetCallHandler(InvocationCallback callback,
+ Handle<Value> data = Handle<Value>());
+ void SetLookupHandler(LookupCallback handler);
+
+ Local<ObjectTemplate> InstanceTemplate();
+
+ /** Causes the function template to inherit from a parent function template.*/
+ void Inherit(Handle<FunctionTemplate> parent);
+
+ /**
+ * A PrototypeTemplate is the template used to create the prototype object
+ * of the function created by this template.
+ */
+ Local<ObjectTemplate> PrototypeTemplate();
+
+ int InternalFieldCount();
+
+ /** Sets the number of internal fields on the object template.*/
+ void SetInternalFieldCount(int value);
+
+ void SetClassName(Handle<String> name);
+
+ /**
+ * Determines whether the __proto__ accessor ignores instances of the function template.
+ * Call with a value of true to make the __proto__ accessor ignore instances of the function template.
+ * Call with a value of false to make the __proto__ accessor not ignore instances of the function template.
+ * By default, instances of a function template are not ignored.
+ * TODO(758124): What does "not ignored" mean?
+ */
+ void SetHiddenPrototype(bool value);
+
+ /**
+ * Returns true if the given object is an instance of this function template.
+ */
+ bool HasInstance(Handle<Value> object);
+
+ private:
+ FunctionTemplate();
+ void AddInstancePropertyAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter,
+ Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attributes);
+ void SetNamedInstancePropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data);
+ void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data);
+ void SetInstanceCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data);
+
+ friend class Context;
+ friend class ObjectTemplate;
+};
+
+
+/**
+ * ObjectTemplate: (TODO(758124): Add comments.)
+ */
+class ObjectTemplate : public Template {
+ public:
+ static Local<ObjectTemplate> New();
+ /** Creates a new instance of this template.*/
+ Local<Object> NewInstance();
+
+ /**
+ * Sets an accessor on the object template.
+ * /param name (TODO(758124): Describe)
+ * /param getter (TODO(758124): Describe)
+ * /param setter (TODO(758124): Describe)
+ * /param data ((TODO(758124): Describe)
+ * /param settings settings must be one of:
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2
+ * /param attribute (TODO(758124): Describe)
+ */
+ void SetAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
+
+ /**
+ * Sets a named property handler on the object template.
+ * /param getter (TODO(758124): Describe)
+ * /param setter (TODO(758124): Describe)
+ * /param query (TODO(758124): Describe)
+ * /param deleter (TODO(758124): Describe)
+ * /param enumerator (TODO(758124): Describe)
+ * /param data (TODO(758124): Describe)
+ */
+ void SetNamedPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter = 0,
+ NamedPropertyQuery query = 0,
+ NamedPropertyDeleter deleter = 0,
+ NamedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Sets an indexed property handler on the object template.
+ * /param getter (TODO(758124): Describe)
+ * /param setter (TODO(758124): Describe)
+ * /param query (TODO(758124): Describe)
+ * /param deleter (TODO(758124): Describe)
+ * /param enumerator (TODO(758124): Describe)
+ * /param data (TODO(758124): Describe)
+ */
+ void SetIndexedPropertyHandler(IndexedPropertyGetter getter,
+ IndexedPropertySetter setter = 0,
+ IndexedPropertyQuery query = 0,
+ IndexedPropertyDeleter deleter = 0,
+ IndexedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+ /**
+ * Sets the callback to be used when calling instances created from
+ * this template as a function. If no callback is set, instances
+ * behave like normal javascript objects that cannot be called as a
+ * function.
+ */
+ void SetCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data = Handle<Value>());
+
+ /** Make object instances of the template as undetectable.*/
+ void MarkAsUndetectable();
+
+ /** TODO(758124): Clarify documentation: Object instances of the
+ * template need access check.*/
+ void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
+ IndexedSecurityCallback indexed_handler,
+ Handle<Value> data = Handle<Value>());
+
+ private:
+ ObjectTemplate();
+ static Local<ObjectTemplate> New(Handle<FunctionTemplate> constructor);
+ friend class FunctionTemplate;
+};
+
+
+/**
+ * A function signature which specifies which receivers and arguments
+ * in can legally be called with.
+ */
+class Signature : public Data {
+ public:
+ static Local<Signature> New(Handle<FunctionTemplate> receiver =
+ Handle<FunctionTemplate>(),
+ int argc = 0,
+ Handle<FunctionTemplate> argv[] = 0);
+ private:
+ Signature();
+};
+
+
+/**
+ * A utility for determining the type of objects based on which
+ * template they were constructed from.
+ */
+class TypeSwitch : public Data {
+ public:
+ static Local<TypeSwitch> New(Handle<FunctionTemplate> type);
+ static Local<TypeSwitch> New(int argc, Handle<FunctionTemplate> types[]);
+ int match(Handle<Value> value);
+ private:
+ TypeSwitch();
+};
+
+
+// --- E x t e n s i o n s ---
+
+
+/**
+ * Ignore
+ */
+class Extension {
+ public:
+ Extension(const char* name,
+ const char* source = 0,
+ int dep_count = 0,
+ const char** deps = 0);
+ virtual ~Extension() { }
+ virtual v8::Handle<v8::FunctionTemplate>
+ GetNativeFunction(v8::Handle<v8::String> name) {
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+
+ const char* name() { return name_; }
+ const char* source() { return source_; }
+ int dependency_count() { return dep_count_; }
+ const char** dependencies() { return deps_; }
+ void set_auto_enable(bool value) { auto_enable_ = value; }
+ bool auto_enable() { return auto_enable_; }
+
+ private:
+ const char* name_;
+ const char* source_;
+ int dep_count_;
+ const char** deps_;
+ bool auto_enable_;
+};
+
+
+void RegisterExtension(Extension* extension);
+
+
+/**
+ * Ignore
+ */
+class DeclareExtension {
+ public:
+ inline DeclareExtension(Extension* extension) {
+ RegisterExtension(extension);
+ }
+};
+
+
+// --- S t a t i c s ---
+
+
+Handle<Primitive> Undefined();
+Handle<Primitive> Null();
+Handle<Boolean> True();
+Handle<Boolean> False();
+
+
+/**
+ * A set of constraints that specifies the limits of the runtime's
+ * memory use.
+ */
+class ResourceConstraints {
+ public:
+ ResourceConstraints();
+ int max_young_space_size() { return max_young_space_size_; }
+ void set_max_young_space_size(int value) { max_young_space_size_ = value; }
+ int max_old_space_size() { return max_old_space_size_; }
+ void set_max_old_space_size(int value) { max_old_space_size_ = value; }
+ uint32_t* stack_limit() { return stack_limit_; }
+ void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+ private:
+ int max_young_space_size_;
+ int max_old_space_size_;
+ uint32_t* stack_limit_;
+};
+
+
+bool SetResourceConstraints(ResourceConstraints* constraints);
+
+
+// --- E x c e p t i o n s ---
+
+
+typedef void (*FatalErrorCallback)(const char* location, const char* message);
+
+
+typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> data);
+
+
+/**
+ * Schedules an exception to be thrown when returning to javascript. When an
+ * exception has been scheduled it is illegal to invoke any javascript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke javascript operations.
+ */
+Handle<Value> ThrowException(Handle<Value> exception);
+
+/**
+ * Create new error objects by calling the corresponding error object
+ * constructor with the message.
+ */
+class Exception {
+ public:
+ static Local<Value> RangeError(Handle<String> message);
+ static Local<Value> ReferenceError(Handle<String> message);
+ static Local<Value> SyntaxError(Handle<String> message);
+ static Local<Value> TypeError(Handle<String> message);
+ static Local<Value> Error(Handle<String> message);
+};
+
+
+/**
+ * Ignore
+ */
+struct VersionInfo {
+ int major, minor, build_major, build_minor, revision;
+};
+
+// --- C o u n t e r s C a l l b a c k s
+
+typedef int* (*CounterLookupCallback)(const wchar_t* name);
+
+// --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
+typedef void (*FailedAccessCheckCallback)(Local<Object> target,
+ AccessType type,
+ Local<Value> data);
+
+// --- G a r b a g e C o l l e c t i o n C a l l b a c k s
+
+/**
+ * Applications can register a callback function which is called
+ * before and after a major Garbage Collection.
+ * Allocations are not allowed in the callback function, you therefore.
+ * cannot manipulate objects (set or delete properties for example)
+ * since it is likely such operations will result in the allocation of objects.
+ */
+typedef void (*GCCallback)();
+
+
+// --- C o n t e x t G e n e r a t o r
+
+/**
+ * Applications must provide a callback function which is called to generate
+ * a context if a context wasn't deserialized from the snapshot.
+ */
+
+typedef Persistent<Context> (*ContextGenerator)();
+
+
+/**
+ * Container class for static utility functions.
+ */
+class V8 {
+ public:
+ static void SetFatalErrorHandler(FatalErrorCallback that);
+
+ // TODO(758124): Clarify documentation: Prevent top level from
+ // calling V8::FatalProcessOutOfMemory if HasOutOfMemoryException();
+ static void IgnoreOutOfMemoryException();
+
+ // Check if V8 is dead.
+ static bool IsDead();
+
+ /**
+ * TODO(758124): Clarify documentation - what is the "ones" in
+ * "existing ones": Adds a message listener, does not overwrite any
+ * existing ones with the same callback function.
+ */
+ static bool AddMessageListener(MessageCallback that,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Remove all message listeners from the specified callback function.
+ */
+ static void RemoveMessageListeners(MessageCallback that);
+
+ /**
+ * Sets v8 flags from a string.
+ * TODO(758124): Describe flags?
+ */
+ static void SetFlagsFromString(const char* str, int length);
+
+ /** Sets the version fields in the given VersionInfo struct.*/
+ static void GetVersion(VersionInfo* info);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
+ */
+ static void SetCounterFunction(CounterLookupCallback);
+
+ /**
+ * Enables the computation of a sliding window of states. The sliding
+ * window information is recorded in statistics counters.
+ */
+ static void EnableSlidingStateWindow();
+
+ /** Callback function for reporting failed access checks.*/
+ static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
+
+ /**
+ * Enables the host application to receive a notification before a major GC.
+ * Allocations are not allowed in the callback function, you therefore
+ * cannot manipulate objects (set or delete properties for example)
+ * since it is likely such operations will result in the allocation of objects.
+ */
+ static void SetGlobalGCPrologueCallback(GCCallback);
+
+ /**
+ * Enables the host application to receive a notification after a major GC.
+ * (TODO(758124): is the following true for this one too?)
+ * Allocations are not allowed in the callback function, you therefore
+ * cannot manipulate objects (set or delete properties for example)
+ * since it is likely such operations will result in the allocation of objects.
+ */
+ static void SetGlobalGCEpilogueCallback(GCCallback);
+
+ /**
+ * Allows the host application to group objects together. If one object
+ * in the group is alive, all objects in the group are alive.
+ * After each GC, object groups are removed. It is intended to be used
+ * in the before-GC callback function to simulate DOM tree connections
+ * among JS wrapper objects.
+ */
+ static void AddObjectToGroup(void* id, Persistent<Object> obj);
+
+ /**
+ * Initializes from snapshot if possible. Otherwise, attempts to initialize
+ * from scratch.
+ */
+ static bool Initialize();
+
+ private:
+ V8();
+
+ static void** GlobalizeReference(void** handle);
+ static void DisposeGlobal(void** global_handle);
+ static void MakeWeak(void** global_handle, void* data, WeakReferenceCallback);
+ static void ClearWeak(void** global_handle);
+ static bool IsGlobalNearDeath(void** global_handle);
+ static bool IsGlobalWeak(void** global_handle);
+
+ template <class T> friend class Handle;
+ template <class T> friend class Local;
+ template <class T> friend class Persistent;
+ friend class Context;
+};
+
+
+/**
+ * An external exception handler.
+ */
+class TryCatch {
+ public:
+
+ /**
+ * Creates a new try/catch block and registers it with v8.
+ */
+ TryCatch();
+
+ /**
+ * Unregisters and deletes this try/catch block.
+ */
+ ~TryCatch();
+
+ /**
+ * Returns true if an exception has been caught by this try/catch block.
+ */
+ bool HasCaught();
+
+ /**
+ * Returns the exception caught by this try/catch block. If no exception has
+ * been caught an empty handle is returned.
+ *
+ * The returned handle is valid until this TryCatch block has been destroyed.
+ */
+ Local<Value> Exception();
+
+ /**
+ * Clears any exceptions that may have been caught by this try/catch block.
+ * After this method has been called, HasCaught() will return false.
+ *
+ * It is not necessary to clear a try/catch block before using it again; if
+ * another exception is thrown the previously caught exception will just be
+ * overwritten. However, it is often a good idea since it makes it easier
+ * to determine which operation threw a given exception.
+ */
+ void Reset();
+
+ void SetVerbose(bool value);
+
+ public:
+ TryCatch* next_;
+ void* exception_;
+ bool is_verbose_;
+};
+
+
+// --- C o n t e x t ---
+
+
+/**
+ * Ignore
+ */
+class ExtensionConfiguration {
+ public:
+ ExtensionConfiguration(int name_count, const char* names[])
+ : name_count_(name_count), names_(names) { }
+ private:
+ friend class ImplementationUtilities;
+ int name_count_;
+ const char** names_;
+};
+
+
+/**
+ * A sandboxed execution context with its own set of built-in objects
+ * and functions.
+ */
+class Context {
+ public:
+ Local<Object> Global();
+
+ static Persistent<Context> New(ExtensionConfiguration* extensions = 0,
+ Handle<ObjectTemplate> global_template =
+ Handle<ObjectTemplate>(),
+ Handle<Value> global_object = Handle<Value>());
+
+ /** Returns the context that is on the top of the stack.*/
+ static Local<Context> Current();
+
+ /** Returns the security context used to start JS execution.*/
+ static Local<Context> GetSecurityContext();
+
+ /**
+ * Sets the security token for the context. To access an object in
+ * another context, the security tokens must match.
+ */
+ void SetSecurityToken(Handle<Value> token);
+
+ /** Returns the security token of this context.*/
+ Handle<Value> GetSecurityToken();
+
+ void Enter();
+ void Exit();
+
+ /** Returns true if the context has experienced an out of memory situation.*/
+ bool HasOutOfMemoryException();
+
+ /** Returns true if called from within a context.*/
+ static bool InContext();
+
+ /** Returns true if called from within a security context.*/
+ static bool InSecurityContext();
+
+ /**
+ * Stack-allocated class which sets the execution context for all
+ * operations executed within a local scope.
+ */
+ class Scope {
+ public:
+ inline Scope(Handle<Context> context) : context_(context) {
+ context_->Enter();
+ }
+ inline ~Scope() { context_->Exit(); }
+ private:
+ Handle<Context> context_;
+ };
+
+ private:
+ friend class Value;
+ friend class Script;
+ friend class Object;
+ friend class Function;
+};
+
+
+/**
+ * Multiple threads in V8 are allowed, but only one thread at a time is
+ * allowed to use V8. The definition of using V8' includes accessing
+ * handles or holding onto object pointers obtained from V8 handles.
+ * It is up to the user of V8 to ensure (perhaps with locking) that
+ * this constraint is not violated.
+ *
+ * If you wish to start using V8 in a thread you can do this by constructing
+ * a v8::Locker object. After the code using V8 has completed for the
+ * current thread you can call the destructor. This can be combined
+ * with C++ scope-based construction as follows:
+ *
+ * ...
+ * {
+ * v8::Locker locker;
+ * ...
+ * // Code using V8 goes here.
+ * ...
+ * } // Destructor called here
+ *
+ * If you wish to stop using V8 in a thread A you can do this by either
+ * by destroying the v8::Locker object as above or by constructing a
+ * v8::Unlocker object:
+ *
+ * {
+ * v8::Unlocker unlocker;
+ * ...
+ * // Code not using V8 goes here while V8 can run in another thread.
+ * ...
+ * } // Destructor called here.
+ *
+ * The Unlocker object is intended for use in a long-running callback
+ * from V8, where you want to release the V8 lock for other threads to
+ * use.
+ *
+ * The v8::Locker is a recursive lock. That is, you can lock more than
+ * once in a given thread. This can be useful if you have code that can
+ * be called either from code that holds the lock or from code that does
+ * not. The Unlocker is not recursive so you can not have several
+ * Unlockers on the stack at once, and you can not use an Unlocker in a
+ * thread that is not inside a Locker's scope.
+ *
+ * An unlocker will unlock several lockers if it has to and reinstate
+ * the correct depth of locking on its destruction. eg.:
+ *
+ * // V8 not locked.
+ * {
+ * v8::Locker locker;
+ * // V8 locked.
+ * {
+ * v8::Locker another_locker;
+ * // V8 still locked (2 levels).
+ * {
+ * v8::Unlocker unlocker;
+ * // V8 not locked.
+ * }
+ * // V8 locked again (2 levels).
+ * }
+ * // V8 still locked (1 level).
+ * }
+ * // V8 Now no longer locked.
+ */
+class Unlocker {
+ public:
+ Unlocker();
+ ~Unlocker();
+};
+
+
+class Locker {
+ public:
+ Locker();
+ ~Locker();
+#ifdef DEBUG
+ static void AssertIsLocked();
+#else
+ static inline void AssertIsLocked() { }
+#endif
+ /*
+ * Fires a timer every n ms that will switch between
+ * multiple threads that are in contention for the V8 lock.
+ */
+ static void StartPreemption(int every_n_ms);
+ static void StopPreemption();
+ private:
+ bool has_lock_;
+ bool top_level_;
+};
+
+
+
+// --- I m p l e m e n t a t i o n ---
+
+template <class T>
+Handle<T>::Handle() : val_(0) { }
+
+
+template <class T>
+Local<T>::Local() : Handle<T>() { }
+
+
+template <class T>
+Local<T> Local<T>::New(Handle<T> that) {
+ if (that.IsEmpty()) return Local<T>();
+ void** p = reinterpret_cast<void**>(*that);
+ return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
+}
+
+
+template <class T>
+Persistent<T> Persistent<T>::New(Handle<T> that) {
+ if (that.IsEmpty()) return Persistent<T>();
+ void** p = reinterpret_cast<void**>(*that);
+ return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
+}
+
+
+template <class T>
+bool Persistent<T>::IsNearDeath() {
+ if (this->IsEmpty()) return false;
+ return V8::IsGlobalNearDeath(reinterpret_cast<void**>(**this));
+}
+
+
+template <class T>
+bool Persistent<T>::IsWeak() {
+ if (this->IsEmpty()) return false;
+ return V8::IsGlobalWeak(reinterpret_cast<void**>(**this));
+}
+
+
+template <class T>
+void Persistent<T>::Dispose() {
+ if (this->IsEmpty()) return;
+ V8::DisposeGlobal(reinterpret_cast<void**>(**this));
+}
+
+
+template <class T>
+Persistent<T>::Persistent() : Handle<T>() { }
+
+template <class T>
+void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
+ V8::MakeWeak(reinterpret_cast<void**>(**this), parameters, callback);
+}
+
+template <class T>
+void Persistent<T>::ClearWeak() {
+ V8::ClearWeak(reinterpret_cast<void**>(**this));
+}
+
+template <class T>
+T* Handle<T>::operator->() {
+ return val_;
+}
+
+
+template <class T>
+T* Handle<T>::operator*() {
+ return val_;
+}
+
+
+Local<Value> Arguments::operator[](int i) const {
+ if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
+ return Local<Value>(reinterpret_cast<Value*>(values_ - i));
+}
+
+
+Local<Function> Arguments::Callee() const {
+ return callee_;
+}
+
+
+Local<Object> Arguments::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
+}
+
+
+Local<Object> Arguments::Holder() const {
+ return holder_;
+}
+
+
+Local<Value> Arguments::Data() const {
+ return data_;
+}
+
+
+bool Arguments::IsConstructCall() const {
+ return is_construct_call_;
+}
+
+
+int Arguments::Length() const {
+ return length_;
+}
+
+
+Local<Value> AccessorInfo::Data() const {
+ return data_;
+}
+
+
+Local<Object> AccessorInfo::This() const {
+ return self_;
+}
+
+
+Local<Object> AccessorInfo::Holder() const {
+ return holder_;
+}
+
+
+template <class T>
+Local<T> HandleScope::Close(Handle<T> value) {
+ void** after = RawClose(reinterpret_cast<void**>(*value));
+ return Local<T>(reinterpret_cast<T*>(after));
+}
+
+Handle<String> ScriptOrigin::ResourceName() {
+ return resource_name_;
+}
+
+
+Handle<Integer> ScriptOrigin::ResourceLineOffset() {
+ return resource_line_offset_;
+}
+
+
+Handle<Integer> ScriptOrigin::ResourceColumnOffset() {
+ return resource_column_offset_;
+}
+
+
+Handle<Boolean> Boolean::New(bool value) {
+ return value ? True() : False();
+}
+
+
+void Template::Set(const char* name, v8::Handle<Data> value) {
+ Set(v8::String::New(name), value);
+}
+
+
+/**
+ * \example evaluator.cc
+ * A simple evaluator that takes a list of expressions on the
+ * command-line and executes them.
+ */
+
+
+/**
+ * \example process.cc
+ */
+
+
+} // namespace v8
+
+
+#undef EXPORT
+#undef TYPE_CHECK
+
+
+#endif // _V8
--- /dev/null
+# Copyright 2008 Google Inc. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+from os.path import join, dirname, abspath
+root_dir = dirname(File('SConstruct').rfile().abspath)
+sys.path.append(join(root_dir, 'tools'))
+import js2c
+Import('toolchain arch os mode use_snapshot library_type')
+
+
+BUILD_OPTIONS_MAP = {
+ 'gcc': {
+ 'debug': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ },
+ 'release': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ }
+ },
+ 'gcc-darwin': {
+ 'debug': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ },
+ 'release': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ }
+ },
+ 'msvc': {
+ 'debug': {
+ 'default': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /Od /Gm /MTd',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'DEBUG', '_DEBUG', 'ENABLE_DISASSEMBLER'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /DEBUG',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/W3 /WX /wd4355 /wd4800'
+ },
+ 'dtoa': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /Od /Gm /MTd',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'DEBUG', '_DEBUG', 'ENABLE_DISASSEMBLER'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /DEBUG',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4018 /wd4244'
+ },
+ 'jscre': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /Od /Gm /MTd',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'DEBUG', '_DEBUG', 'ENABLE_DISASSEMBLER', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /DEBUG',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4003 /wd4005 /wd4018 /wd4133'
+ }
+ },
+ 'release': {
+ 'default': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /O2 /MT',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /OPT:REF /OPT:ICF /SUBSYSTEM:CONSOLE',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/W3 /WX /wd4355 /wd4800'
+ },
+ 'dtoa': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /O2 /MT',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /OPT:REF /OPT:ICF /SUBSYSTEM:CONSOLE',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4018 /wd4244'
+ },
+ 'jscre': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /O2 /MT',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /OPT:REF /OPT:ICF /SUBSYSTEM:CONSOLE',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4003 /wd4005 /wd4018 /wd4133'
+ }
+ }
+ }
+}
+
+
+PLATFORM_INDEPENDENT_SOURCES = '''
+accessors.cc
+allocation.cc
+api.cc
+assembler.cc
+ast.cc
+bootstrapper.cc
+builtins.cc
+checks.cc
+code-stubs.cc
+codegen.cc
+compiler.cc
+contexts.cc
+conversions.cc
+counters.cc
+dateparser.cc
+debug.cc
+execution.cc
+factory.cc
+flags.cc
+frames.cc
+global-handles.cc
+handles.cc
+hashmap.cc
+heap.cc
+ic.cc
+jsregexp.cc
+log.cc
+mark-compact.cc
+messages.cc
+objects-debug.cc
+objects.cc
+parser.cc
+prettyprinter.cc
+property.cc
+rewriter.cc
+runtime.cc
+scanner.cc
+scopeinfo.cc
+scopes.cc
+serialize.cc
+snapshot-common.cc
+spaces.cc
+string-stream.cc
+stub-cache.cc
+token.cc
+top.cc
+unicode.cc
+usage-analyzer.cc
+utils.cc
+v8-counters.cc
+v8.cc
+v8threads.cc
+variables.cc
+zone.cc
+'''.split()
+
+
+PLATFORM_DEPENDENT_SOURCES = {
+ 'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'disasm-arm.cc', 'disassembler-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'macro-assembler-arm.cc', 'simulator-arm.cc', 'stub-cache-arm.cc'],
+ 'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'cpu-ia32.cc', 'disasm-ia32.cc', 'disassembler-ia32.cc', 'frames-ia32.cc', 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'simulator-ia32.cc', 'stub-cache-ia32.cc'],
+ 'os:linux': ['platform-linux.cc'],
+ 'os:macos': ['platform-macos.cc'],
+ 'os:win32': ['platform-win32.cc']
+}
+
+
+LIBRARY_FILES = '''
+runtime.js
+v8natives.js
+array.js
+string.js
+uri.js
+math.js
+messages.js
+apinatives.js
+debug-delay.js
+mirror-delay.js
+date-delay.js
+regexp-delay.js
+'''.split()
+
+
+JSCRE_FILES = '''
+pcre_compile.cpp
+pcre_exec.cpp
+pcre_tables.cpp
+pcre_ucp_searchfuncs.cpp
+pcre_xclass.cpp
+'''.split()
+
+
+def Abort(message):
+ print message
+ sys.exit(1)
+
+
+def BuildObject(env, input, **kw):
+ if library_type == 'static':
+ return env.StaticObject(input, **kw)
+ elif library_type == 'shared':
+ return env.SharedObject(input, **kw)
+ else:
+ return env.Object(input, **kw)
+
+
+def ConfigureBuild():
+ env = Environment()
+ options = BUILD_OPTIONS_MAP[toolchain][mode]['default']
+ env.Replace(**options)
+ env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile $LOGFILE')
+
+ # Build the standard platform-independent source files.
+ source_files = PLATFORM_INDEPENDENT_SOURCES
+ source_files += PLATFORM_DEPENDENT_SOURCES["arch:%s" % arch]
+ source_files += PLATFORM_DEPENDENT_SOURCES["os:%s" % os]
+ full_source_files = [s for s in source_files]
+
+ # Combine the javascript library files into a single C++ file and
+ # compile it.
+ library_files = [s for s in LIBRARY_FILES]
+ library_files.append('macros.py')
+ libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries_empty.cc'], library_files)
+ libraries_obj = BuildObject(env, libraries_src, CPPPATH=['.'])
+
+ # Build JSCRE.
+ jscre_env = env.Copy()
+ jscre_options = BUILD_OPTIONS_MAP[toolchain][mode]['jscre']
+ jscre_env.Replace(**jscre_options)
+ jscre_files = [join('third_party', 'jscre', s) for s in JSCRE_FILES]
+ jscre_obj = BuildObject(jscre_env, jscre_files)
+
+ # Build dtoa.
+ dtoa_env = env.Copy()
+ dtoa_options = BUILD_OPTIONS_MAP[toolchain][mode]['dtoa']
+ dtoa_env.Replace(**dtoa_options)
+ dtoa_files = ['dtoa-config.c']
+ dtoa_obj = BuildObject(dtoa_env, dtoa_files)
+
+ full_source_objs = BuildObject(env, full_source_files)
+ non_snapshot_files = [jscre_obj, dtoa_obj, full_source_objs]
+
+ # Create snapshot if necessary.
+ empty_snapshot_obj = BuildObject(env, 'snapshot-empty.cc')
+ if use_snapshot:
+ mksnapshot_src = 'mksnapshot.cc'
+ mksnapshot = env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+ snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
+ snapshot_obj = BuildObject(env, snapshot_cc, CPPPATH=['.'])
+ libraries_obj = BuildObject(env, libraries_empty_src, CPPPATH=['.'])
+ else:
+ snapshot_obj = empty_snapshot_obj
+
+ all_files = [non_snapshot_files, libraries_obj, snapshot_obj]
+ if library_type == 'static':
+ env.StaticLibrary('v8', all_files)
+ elif library_type == 'shared':
+ # There seems to be a glitch in the way scons decides where to put
+ # .pdb files when compiling using msvc so we specify it manually.
+ # This should not affect any other platforms.
+ env.SharedLibrary('v8', all_files, PDB='v8.dll.pdb')
+ else:
+ env.Library('v8', all_files)
+
+
+ConfigureBuild()
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "execution.h"
+#include "factory.h"
+#include "scopeinfo.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 { namespace internal {
+
+
+template <class C>
+static C* FindInPrototypeChain(Object* obj, bool* found_it) {
+ ASSERT(!*found_it);
+ while (!Is<C>(obj)) {
+ if (obj == Heap::null_value()) return NULL;
+ obj = obj->GetPrototype();
+ }
+ *found_it = true;
+ return C::cast(obj);
+}
+
+
+// Entry point that never should be called.
+Object* Accessors::IllegalSetter(JSObject*, Object*, void*) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Object* Accessors::IllegalGetAccessor(Object* object, void*) {
+ UNREACHABLE();
+ return object;
+}
+
+
+Object* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
+ // According to ECMA-262, section 8.6.2.2, page 28, setting
+ // read-only properties must be silently ignored.
+ return value;
+}
+
+
+//
+// Accessors::ArrayLength
+//
+
+
+Object* Accessors::ArrayGetLength(Object* object, void*) {
+ // Traverse the prototype chain until we reach an array.
+ bool found_it = false;
+ JSArray* holder = FindInPrototypeChain<JSArray>(object, &found_it);
+ if (!found_it) return Smi::FromInt(0);
+ return holder->length();
+}
+
+
+// The helper function will 'flatten' Number objects.
+Object* Accessors::FlattenNumber(Object* value) {
+ if (value->IsNumber() || !value->IsJSValue()) return value;
+ JSValue* wrapper = JSValue::cast(value);
+ ASSERT(
+ Top::context()->global_context()->number_function()->has_initial_map());
+ Map* number_map =
+ Top::context()->global_context()->number_function()->initial_map();
+ if (wrapper->map() == number_map) return wrapper->value();
+ return value;
+}
+
+
+Object* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
+ value = FlattenNumber(value);
+
+ // Need to call methods that may trigger GC.
+ HandleScope scope;
+
+ // Protect raw pointers.
+ Handle<JSObject> object_handle(object);
+ Handle<Object> value_handle(value);
+
+ bool has_exception;
+ Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
+ if (has_exception) return Failure::Exception();
+ Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ // Restore raw pointers,
+ object = *object_handle;
+ value = *value_handle;
+
+ if (uint32_v->Number() == number_v->Number()) {
+ if (object->IsJSArray()) {
+ return JSArray::cast(object)->SetElementsLength(*uint32_v);
+ } else {
+ // This means one of the object's prototypes is a JSArray and
+ // the object does not have a 'length' property.
+ return object->AddProperty(Heap::length_symbol(), value, NONE);
+ }
+ }
+
+ return Top::Throw(*Factory::NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+}
+
+
+const AccessorDescriptor Accessors::ArrayLength = {
+ ArrayGetLength,
+ ArraySetLength,
+ 0
+};
+
+
+//
+// Accessors::StringLength
+//
+
+
+Object* Accessors::StringGetLength(Object* object, void*) {
+ Object* value = object;
+ if (object->IsJSValue()) value = JSValue::cast(object)->value();
+ if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
+ // If object is not a string we return 0 to be compatible with WebKit.
+ // Note: Firefox returns the length of ToString(object).
+ return Smi::FromInt(0);
+}
+
+
+const AccessorDescriptor Accessors::StringLength = {
+ StringGetLength,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptSource
+//
+
+
+Object* Accessors::ScriptGetSource(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->source();
+}
+
+
+const AccessorDescriptor Accessors::ScriptSource = {
+ ScriptGetSource,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptName
+//
+
+
+Object* Accessors::ScriptGetName(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->name();
+}
+
+
+const AccessorDescriptor Accessors::ScriptName = {
+ ScriptGetName,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptLineOffset
+//
+
+
+Object* Accessors::ScriptGetLineOffset(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->line_offset();
+}
+
+
+const AccessorDescriptor Accessors::ScriptLineOffset = {
+ ScriptGetLineOffset,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptColumnOffset
+//
+
+
+Object* Accessors::ScriptGetColumnOffset(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->column_offset();
+}
+
+
+const AccessorDescriptor Accessors::ScriptColumnOffset = {
+ ScriptGetColumnOffset,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptType
+//
+
+
+Object* Accessors::ScriptGetType(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->type();
+}
+
+
+const AccessorDescriptor Accessors::ScriptType = {
+ ScriptGetType,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::FunctionPrototype
+//
+
+
+Object* Accessors::FunctionGetPrototype(Object* object, void*) {
+ bool found_it = false;
+ JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ if (!function->has_prototype()) {
+ Object* prototype = Heap::AllocateFunctionPrototype(function);
+ if (prototype->IsFailure()) return prototype;
+ Object* result = function->SetPrototype(prototype);
+ if (result->IsFailure()) return result;
+ }
+ return function->prototype();
+}
+
+
+Object* Accessors::FunctionSetPrototype(JSObject* object,
+ Object* value,
+ void*) {
+ bool found_it = false;
+ JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ if (function->has_initial_map()) {
+ // If the function has allocated the initial map
+ // replace it with a copy containing the new prototype.
+ Object* new_map = function->initial_map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+ Object* result = Map::cast(new_map)->EnsureNoMapTransitions();
+ if (result->IsFailure()) return result;
+ function->set_initial_map(Map::cast(new_map));
+ }
+ Object* prototype = function->SetPrototype(value);
+ if (prototype->IsFailure()) return prototype;
+ ASSERT(function->prototype() == value);
+ return function;
+}
+
+
+const AccessorDescriptor Accessors::FunctionPrototype = {
+ FunctionGetPrototype,
+ FunctionSetPrototype,
+ 0
+};
+
+
+//
+// Accessors::FunctionLength
+//
+
+
+Object* Accessors::FunctionGetLength(Object* object, void*) {
+ bool found_it = false;
+ JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Smi::FromInt(0);
+ // Check if already compiled.
+ if (!function->is_compiled()) {
+ // If the function isn't compiled yet, the length is not computed
+ // correctly yet. Compile it now and return the right length.
+ HandleScope scope;
+ Handle<JSFunction> function_handle(function);
+ if (!CompileLazy(function_handle, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+ return Smi::FromInt(function_handle->shared()->length());
+ } else {
+ return Smi::FromInt(function->shared()->length());
+ }
+}
+
+
+const AccessorDescriptor Accessors::FunctionLength = {
+ FunctionGetLength,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::FunctionName
+//
+
+
+Object* Accessors::FunctionGetName(Object* object, void*) {
+ bool found_it = false;
+ JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ return holder->shared()->name();
+}
+
+
+const AccessorDescriptor Accessors::FunctionName = {
+ FunctionGetName,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::FunctionArguments
+//
+
+
+Object* Accessors::FunctionGetArguments(Object* object, void*) {
+ HandleScope scope;
+ bool found_it = false;
+ JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ Handle<JSFunction> function(holder);
+
+ // Find the top invocation of the function by traversing frames.
+ for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+ // Skip all frames that aren't invocations of the given function.
+ JavaScriptFrame* frame = it.frame();
+ if (frame->function() != *function) continue;
+
+ // If there is an arguments variable in the stack, we return that.
+ int index = ScopeInfo<>::StackSlotIndex(frame->FindCode(),
+ Heap::arguments_symbol());
+ if (index >= 0) return frame->GetExpression(index);
+
+ // If there isn't an arguments variable in the stack, we need to
+ // find the frame that holds the actual arguments passed to the
+ // function on the stack.
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+
+ // Get the number of arguments and construct an arguments object
+ // mirror for the right frame.
+ const int length = frame->GetProvidedParametersCount();
+ Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
+
+ // Copy the parameters to the arguments object.
+ FixedArray* array = FixedArray::cast(arguments->elements());
+ ASSERT(array->length() == length);
+ for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+
+ // Return the freshly allocated arguments object.
+ return *arguments;
+ }
+
+ // No frame corresponding to the given function found. Return null.
+ return Heap::null_value();
+}
+
+
+const AccessorDescriptor Accessors::FunctionArguments = {
+ FunctionGetArguments,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::FunctionCaller
+//
+
+
+Object* Accessors::FunctionGetCaller(Object* object, void*) {
+ HandleScope scope;
+ bool found_it = false;
+ JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ Handle<JSFunction> function(holder);
+
+ // Find the top invocation of the function by traversing frames.
+ for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+ // Skip all frames that aren't invocations of the given function.
+ if (it.frame()->function() != *function) continue;
+ // Once we have found the frame, we need to go to the caller
+ // frame. This may require skipping through a number of top-level
+ // frames, e.g. frames for scripts not functions.
+ while (true) {
+ it.Advance();
+ if (it.done()) return Heap::null_value();
+ JSFunction* caller = JSFunction::cast(it.frame()->function());
+ if (!caller->shared()->is_toplevel()) return caller;
+ }
+ }
+
+ // No frame corresponding to the given function found. Return null.
+ return Heap::null_value();
+}
+
+
+const AccessorDescriptor Accessors::FunctionCaller = {
+ FunctionGetCaller,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::ObjectPrototype
+//
+
+
+Object* Accessors::ObjectGetPrototype(Object* receiver, void*) {
+ Object* current = receiver->GetPrototype();
+ while (current->IsJSObject() &&
+ JSObject::cast(current)->map()->is_hidden_prototype()) {
+ current = current->GetPrototype();
+ }
+ return current;
+}
+
+
+Object* Accessors::ObjectSetPrototype(JSObject* receiver,
+ Object* value,
+ void*) {
+ // Before we can set the prototype we need to be sure
+ // prototype cycles are prevented.
+ // It is suficient to validate the receiver is not in the new prototype chain.
+
+ // Silently ignore the change if value is not a JSObject or null.
+ // SpiderMonkey behaves this way.
+ if (!value->IsJSObject() && !value->IsNull()) return value;
+
+ for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+ if (JSObject::cast(pt) == receiver) {
+ // Cycle detected.
+ HandleScope scope;
+ return Top::Throw(*Factory::NewError("cyclic_proto",
+ HandleVector<Object>(NULL, 0)));
+ }
+ }
+
+ // Find the first object in the chain whose prototype object is not
+ // hidden and set the new prototype on that object.
+ JSObject* current = receiver;
+ Object* current_proto = receiver->GetPrototype();
+ while (current_proto->IsJSObject() &&
+ JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+ current = JSObject::cast(current_proto);
+ current_proto = current_proto->GetPrototype();
+ }
+
+ // Set the new prototype of the object.
+ Object* new_map = current->map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+ Object* result = Map::cast(new_map)->EnsureNoMapTransitions();
+ if (result->IsFailure()) return result;
+ Map::cast(new_map)->set_prototype(value);
+ current->set_map(Map::cast(new_map));
+
+ // To be consistant with other Set functions, return the value.
+ return value;
+}
+
+
+const AccessorDescriptor Accessors::ObjectPrototype = {
+ ObjectGetPrototype,
+ ObjectSetPrototype,
+ 0
+};
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ACCESSORS_H_
+#define V8_ACCESSORS_H_
+
+namespace v8 { namespace internal {
+
+// The list of accessor descriptors. This is a second-order macro
+// taking a macro to be applied to all accessor descriptor names.
+#define ACCESSOR_DESCRIPTOR_LIST(V) \
+ V(FunctionPrototype) \
+ V(FunctionLength) \
+ V(FunctionName) \
+ V(FunctionArguments) \
+ V(FunctionCaller) \
+ V(ArrayLength) \
+ V(StringLength) \
+ V(ScriptSource) \
+ V(ScriptName) \
+ V(ScriptLineOffset) \
+ V(ScriptColumnOffset) \
+ V(ScriptType) \
+ V(ObjectPrototype)
+
+// Accessors contains all prodefined proxy accessors.
+
+class Accessors : public AllStatic {
+ public:
+ // Accessor descriptors.
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+ static const AccessorDescriptor name;
+ ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+
+ enum DescriptorId {
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+ k##name,
+ ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+ descriptorCount
+ };
+
+ // Accessor functions called directly from the runtime system.
+ static Object* FunctionGetPrototype(Object* object, void*);
+ static Object* FunctionSetPrototype(JSObject* object, Object* value, void*);
+ private:
+ // Accessor functions only used through the descriptor.
+ static Object* FunctionGetLength(Object* object, void*);
+ static Object* FunctionGetName(Object* object, void*);
+ static Object* FunctionGetArguments(Object* object, void*);
+ static Object* FunctionGetCaller(Object* object, void*);
+ static Object* ArraySetLength(JSObject* object, Object* value, void*);
+ static Object* ArrayGetLength(Object* object, void*);
+ static Object* StringGetLength(Object* object, void*);
+ static Object* ScriptGetName(Object* object, void*);
+ static Object* ScriptGetSource(Object* object, void*);
+ static Object* ScriptGetLineOffset(Object* object, void*);
+ static Object* ScriptGetColumnOffset(Object* object, void*);
+ static Object* ScriptGetType(Object* object, void*);
+ static Object* ObjectGetPrototype(Object* receiver, void*);
+ static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
+
+ // Helper functions.
+ static Object* FlattenNumber(Object* value);
+ static Object* IllegalSetter(JSObject*, Object*, void*);
+ static Object* IllegalGetAccessor(Object* object, void*);
+ static Object* ReadOnlySetAccessor(JSObject*, Object* value, void*);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ACCESSORS_H_
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+namespace v8 { namespace internal {
+
+
+void* Malloced::New(size_t size) {
+ ASSERT(NativeAllocationChecker::allocation_allowed());
+ void* result = malloc(size);
+ if (result == NULL) V8::FatalProcessOutOfMemory("Malloced operator new");
+ return result;
+}
+
+
+void Malloced::Delete(void* p) {
+ free(p);
+}
+
+
+void Malloced::FatalProcessOutOfMemory() {
+ V8::FatalProcessOutOfMemory("Out of memory");
+}
+
+
+#ifdef DEBUG
+
+static void* invalid = static_cast<void*>(NULL);
+
+void* Embedded::operator new(size_t size) {
+ UNREACHABLE();
+ return invalid;
+}
+
+
+void Embedded::operator delete(void* p) {
+ UNREACHABLE();
+}
+
+
+void* AllStatic::operator new(size_t size) {
+ UNREACHABLE();
+ return invalid;
+}
+
+
+void AllStatic::operator delete(void* p) {
+ UNREACHABLE();
+}
+
+#endif
+
+
+char* StrDup(const char* str) {
+ int length = strlen(str);
+ char* result = NewArray<char>(length + 1);
+ memcpy(result, str, length * kCharSize);
+ result[length] = '\0';
+ return result;
+}
+
+
+int NativeAllocationChecker::allocation_disallowed_ = 0;
+
+
+PreallocatedStorage PreallocatedStorage::in_use_list_(0);
+PreallocatedStorage PreallocatedStorage::free_list_(0);
+bool PreallocatedStorage::preallocated_ = false;
+
+
+void PreallocatedStorage::Init(size_t size) {
+ ASSERT(free_list_.next_ == &free_list_);
+ ASSERT(free_list_.previous_ == &free_list_);
+ PreallocatedStorage* free_chunk =
+ reinterpret_cast<PreallocatedStorage*>(new char[size]);
+ free_list_.next_ = free_list_.previous_ = free_chunk;
+ free_chunk->next_ = free_chunk->previous_ = &free_list_;
+ free_chunk->size_ = size - sizeof(PreallocatedStorage);
+ preallocated_ = true;
+}
+
+
+void* PreallocatedStorage::New(size_t size) {
+ if (!preallocated_) {
+ return FreeStoreAllocationPolicy::New(size);
+ }
+ ASSERT(free_list_.next_ != &free_list_);
+ ASSERT(free_list_.previous_ != &free_list_);
+ size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
+ // Search for exact fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ == size) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Search for first fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ PreallocatedStorage* left_over =
+ reinterpret_cast<PreallocatedStorage*>(
+ reinterpret_cast<char*>(storage + 1) + size);
+ left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
+ ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
+ storage->size_);
+ storage->size_ = size;
+ left_over->LinkTo(&free_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Allocation failure.
+ ASSERT(false);
+ return NULL;
+}
+
+
+// We don't attempt to coalesce.
+void PreallocatedStorage::Delete(void* p) {
+ if (p == NULL) {
+ return;
+ }
+ if (!preallocated_) {
+ FreeStoreAllocationPolicy::Delete(p);
+ return;
+ }
+ PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
+ ASSERT(storage->next_->previous_ == storage);
+ ASSERT(storage->previous_->next_ == storage);
+ storage->Unlink();
+ storage->LinkTo(&free_list_);
+}
+
+
+void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
+ next_ = other->next_;
+ other->next_->previous_ = this;
+ previous_ = other;
+ other->next_ = this;
+}
+
+
+void PreallocatedStorage::Unlink() {
+ next_->previous_ = previous_;
+ previous_->next_ = next_;
+}
+
+
+PreallocatedStorage::PreallocatedStorage(size_t size)
+ : size_(size) {
+ previous_ = next_ = this;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_H_
+#define V8_ALLOCATION_H_
+
+namespace v8 { namespace internal {
+
+
+// A class that controls whether allocation is allowed. This is for
+// the C++ heap only!
+class NativeAllocationChecker {
+ public:
+ typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
+ explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
+ : allowed_(allowed) {
+#ifdef DEBUG
+ if (allowed == DISALLOW) {
+ allocation_disallowed_++;
+ }
+#endif
+ }
+ ~NativeAllocationChecker() {
+#ifdef DEBUG
+ if (allowed_ == DISALLOW) {
+ allocation_disallowed_--;
+ }
+#endif
+ ASSERT(allocation_disallowed_ >= 0);
+ }
+ static inline bool allocation_allowed() {
+ return allocation_disallowed_ == 0;
+ }
+ private:
+ // This static counter ensures that NativeAllocationCheckers can be nested.
+ static int allocation_disallowed_;
+ // This flag applies to this particular instance.
+ NativeAllocationAllowed allowed_;
+};
+
+
+// Superclass for classes managed with new & delete.
+class Malloced {
+ public:
+ void* operator new(size_t size) { return New(size); }
+ void operator delete(void* p) { Delete(p); }
+
+ static void FatalProcessOutOfMemory();
+ static void* New(size_t size);
+ static void Delete(void* p);
+};
+
+
+// A macro is used for defining the base class used for embedded instances.
+// The reason is some compilers allocate a minimum of one word for the
+// superclass. The macro prevents the use of new & delete in debug mode.
+// In release mode we are not willing to pay this overhead.
+
+#ifdef DEBUG
+// Superclass for classes with instances allocated inside stack
+// activations or inside other objects.
+class Embedded {
+ public:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+};
+#define BASE_EMBEDDED : public Embedded
+#else
+#define BASE_EMBEDDED
+#endif
+
+
+// Superclass for classes only using statics.
+class AllStatic {
+#ifdef DEBUG
+ public:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+#endif
+};
+
+
+template <typename T>
+static T* NewArray(int size) {
+ ASSERT(NativeAllocationChecker::allocation_allowed());
+ T* result = new T[size];
+ if (result == NULL) Malloced::FatalProcessOutOfMemory();
+ return result;
+}
+
+
+template <typename T>
+static void DeleteArray(T* array) {
+ delete[] array;
+}
+
+
+// The normal strdup function uses malloc. This version of StrDup
+// uses new and calls the FatalProcessOutOfMemory handler if
+// allocation fails.
+char* StrDup(const char* str);
+
+
+// Allocation policy for allocating in the C free store using malloc
+// and free. Used as the default policy for lists.
+class FreeStoreAllocationPolicy {
+ public:
+ INLINE(static void* New(size_t size)) { return Malloced::New(size); }
+ INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
+};
+
+
+// Allocation policy for allocating in preallocated space.
+// Used as an allocation policy for ScopeInfo when generating
+// stack traces.
+class PreallocatedStorage : public AllStatic {
+ public:
+ explicit PreallocatedStorage(size_t size);
+ size_t size() { return size_; }
+ static void* New(size_t size);
+ static void Delete(void* p);
+
+ // Preallocate a set number of bytes.
+ static void Init(size_t size);
+
+ private:
+ size_t size_;
+ PreallocatedStorage* previous_;
+ PreallocatedStorage* next_;
+ static bool preallocated_;
+
+ static PreallocatedStorage in_use_list_;
+ static PreallocatedStorage free_list_;
+
+ void LinkTo(PreallocatedStorage* other);
+ void Unlink();
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "platform.h"
+#include "serialize.h"
+#include "snapshot.h"
+
+
+namespace i = v8::internal;
+#define LOG_API(expr) LOG(ApiEntryCall(expr))
+
+
+namespace v8 {
+
+
+#define ON_BAILOUT(location, code) \
+ if (IsDeadCheck(location)) { \
+ code; \
+ UNREACHABLE(); \
+ }
+
+
+#define EXCEPTION_PREAMBLE() \
+ thread_local.IncrementCallDepth(); \
+ ASSERT(!i::Top::external_caught_exception()); \
+ bool has_pending_exception = false
+
+
+#define EXCEPTION_BAILOUT_CHECK(value) \
+ do { \
+ thread_local.DecrementCallDepth(); \
+ if (has_pending_exception) { \
+ if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
+ if (!thread_local.IgnoreOutOfMemory()) \
+ i::V8::FatalProcessOutOfMemory(NULL); \
+ } \
+ bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
+ i::Top::optional_reschedule_exception(call_depth_is_zero); \
+ return value; \
+ } \
+ } while (false)
+
+
+// --- D a t a t h a t i s s p e c i f i c t o a t h r e a d ---
+
+
+static i::HandleScopeImplementer thread_local;
+
+
+// --- E x c e p t i o n B e h a v i o r ---
+
+
+static bool has_shut_down = false;
+static FatalErrorCallback exception_behavior = NULL;
+
+
+static void DefaultFatalErrorHandler(const char* location,
+ const char* message) {
+ API_Fatal(location, message);
+}
+
+
+
+static FatalErrorCallback& GetFatalErrorHandler() {
+ if (exception_behavior == NULL) {
+ exception_behavior = DefaultFatalErrorHandler;
+ }
+ return exception_behavior;
+}
+
+
+
+// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
+// The default fatal error handler is called and execution is stopped.
+void i::V8::FatalProcessOutOfMemory(const char* location) {
+ has_shut_down = true;
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, "Allocation failed - process out of memory");
+ // If the callback returns, we stop execution.
+ UNREACHABLE();
+}
+
+
+void V8::SetFatalErrorHandler(FatalErrorCallback that) {
+ exception_behavior = that;
+}
+
+
+bool Utils::ReportApiFailure(const char* location, const char* message) {
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, message);
+ has_shut_down = true;
+ return false;
+}
+
+
+bool V8::IsDead() {
+ return has_shut_down;
+}
+
+
+static inline bool ApiCheck(bool condition,
+ const char* location,
+ const char* message) {
+ return condition ? true : Utils::ReportApiFailure(location, message);
+}
+
+
+static bool ReportV8Dead(const char* location) {
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, "V8 is no longer useable");
+ return true;
+}
+
+
+static bool ReportEmptyHandle(const char* location) {
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, "Reading from empty handle");
+ return true;
+}
+
+
+/**
+ * IsDeadCheck checks that the vm is useable. If, for instance, the vm has been
+ * out of memory at some point this check will fail. It should be called on
+ * entry to all methods that touch anything in the heap, except destructors
+ * which you sometimes can't avoid calling after the vm has crashed. Functions
+ * that call EnsureInitialized or ON_BAILOUT don't have to also call
+ * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
+ * can arrange to return if the VM is dead. This is needed to ensure that no VM
+ * heap allocations are attempted on a dead VM. EnsureInitialized has the
+ * advantage over ON_BAILOUT that it actually initializes the VM if this has not
+ * yet been done.
+ */
+static inline bool IsDeadCheck(const char* location) {
+ return has_shut_down ? ReportV8Dead(location) : false;
+}
+
+
+static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
+ return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
+}
+
+
+static inline bool EmptyCheck(const char* location, v8::Data* obj) {
+ return (obj == 0) ? ReportEmptyHandle(location) : false;
+}
+
+// --- S t a t i c s ---
+
+
+static i::StringInputBuffer write_input_buffer;
+
+
+static void EnsureInitialized(const char* location) {
+ if (IsDeadCheck(location)) return;
+ ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
+}
+
+
+v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
+ if (IsDeadCheck("v8::Undefined()")) return v8::Handle<v8::Primitive>();
+ EnsureInitialized("v8::Undefined()");
+ return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
+}
+
+
+v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
+ if (IsDeadCheck("v8::Null()")) return v8::Handle<v8::Primitive>();
+ EnsureInitialized("v8::Null()");
+ return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
+}
+
+
+v8::Handle<v8::Boolean> ImplementationUtilities::True() {
+ if (IsDeadCheck("v8::True()")) return v8::Handle<v8::Boolean>();
+ EnsureInitialized("v8::True()");
+ return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
+}
+
+
+v8::Handle<v8::Boolean> ImplementationUtilities::False() {
+ if (IsDeadCheck("v8::False()")) return v8::Handle<v8::Boolean>();
+ EnsureInitialized("v8::False()");
+ return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
+}
+
+
+void V8::SetFlagsFromString(const char* str, int length) {
+ i::FlagList::SetFlagsFromString(str, length);
+}
+
+
+v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
+ if (IsDeadCheck("v8::ThrowException()")) return v8::Handle<Value>();
+ i::Top::ScheduleThrow(*Utils::OpenHandle(*value));
+ return v8::Undefined();
+}
+
+
+RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
+
+
+RegisteredExtension::RegisteredExtension(Extension* extension)
+ : extension_(extension), state_(UNVISITED) { }
+
+
+void RegisteredExtension::Register(RegisteredExtension* that) {
+ that->next_ = RegisteredExtension::first_extension_;
+ RegisteredExtension::first_extension_ = that;
+}
+
+
+void RegisterExtension(Extension* that) {
+ RegisteredExtension* extension = new RegisteredExtension(that);
+ RegisteredExtension::Register(extension);
+}
+
+
+Extension::Extension(const char* name,
+ const char* source,
+ int dep_count,
+ const char** deps)
+ : name_(name),
+ source_(source),
+ dep_count_(dep_count),
+ deps_(deps),
+ auto_enable_(false) { }
+
+
+v8::Handle<Primitive> Undefined() {
+ LOG_API("Undefined");
+ return ImplementationUtilities::Undefined();
+}
+
+
+v8::Handle<Primitive> Null() {
+ LOG_API("Null");
+ return ImplementationUtilities::Null();
+}
+
+
+v8::Handle<Boolean> True() {
+ LOG_API("True");
+ return ImplementationUtilities::True();
+}
+
+
+v8::Handle<Boolean> False() {
+ LOG_API("False");
+ return ImplementationUtilities::False();
+}
+
+
+ResourceConstraints::ResourceConstraints()
+ : max_young_space_size_(0),
+ max_old_space_size_(0),
+ stack_limit_(NULL) { }
+
+
+bool SetResourceConstraints(ResourceConstraints* constraints) {
+ bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
+ constraints->max_old_space_size());
+ if (!result) return false;
+ if (constraints->stack_limit() != NULL) {
+ uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
+ i::StackGuard::SetStackLimit(limit);
+ }
+ return true;
+}
+
+
+void** V8::GlobalizeReference(void** obj) {
+ LOG_API("Persistent::New");
+ if (IsDeadCheck("V8::Persistent::New")) return NULL;
+ i::Handle<i::Object> result =
+ i::GlobalHandles::Create(*reinterpret_cast<i::Object**>(obj));
+ return reinterpret_cast<void**>(result.location());
+}
+
+
+void V8::MakeWeak(void** object, void* parameters,
+ WeakReferenceCallback callback) {
+ LOG_API("MakeWeak");
+ i::GlobalHandles::MakeWeak(reinterpret_cast<i::Object**>(object), parameters,
+ callback);
+}
+
+
+void V8::ClearWeak(void** obj) {
+ LOG_API("ClearWeak");
+ i::GlobalHandles::ClearWeakness(reinterpret_cast<i::Object**>(obj));
+}
+
+
+bool V8::IsGlobalNearDeath(void** obj) {
+ LOG_API("IsGlobalNearDeath");
+ if (has_shut_down) return false;
+ return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj));
+}
+
+
+bool V8::IsGlobalWeak(void** obj) {
+ LOG_API("IsGlobalWeak");
+ if (has_shut_down) return false;
+ return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj));
+}
+
+
+void V8::DisposeGlobal(void** obj) {
+ LOG_API("DisposeGlobal");
+ if (has_shut_down) return;
+ i::GlobalHandles::Destroy(reinterpret_cast<i::Object**>(obj));
+}
+
+// --- H a n d l e s ---
+
+
+HandleScope::Data HandleScope::current_ = { -1, NULL, NULL };
+
+
+int HandleScope::NumberOfHandles() {
+ int n = thread_local.Blocks()->length();
+ if (n == 0) return 0;
+ return ((n - 1) * i::kHandleBlockSize) +
+ (current_.next - thread_local.Blocks()->last());
+}
+
+
+void** v8::HandleScope::CreateHandle(void* value) {
+ void** result = current_.next;
+ if (result == current_.limit) {
+ // Make sure there's at least one scope on the stack and that the
+ // top of the scope stack isn't a barrier.
+ if (!ApiCheck(current_.extensions >= 0,
+ "v8::HandleScope::CreateHandle()",
+ "Cannot create a handle without a HandleScope")) {
+ return NULL;
+ }
+ // If there's more room in the last block, we use that. This is used
+ // for fast creation of scopes after scope barriers.
+ if (!thread_local.Blocks()->is_empty()) {
+ void** limit = &thread_local.Blocks()->last()[i::kHandleBlockSize];
+ if (current_.limit != limit) {
+ current_.limit = limit;
+ }
+ }
+
+ // If we still haven't found a slot for the handle, we extend the
+ // current handle scope by allocating a new handle block.
+ if (result == current_.limit) {
+ // If there's a spare block, use it for growing the current scope.
+ result = thread_local.GetSpareOrNewBlock();
+ // Add the extension to the global list of blocks, but count the
+ // extension as part of the current scope.
+ thread_local.Blocks()->Add(result);
+ current_.extensions++;
+ current_.limit = &result[i::kHandleBlockSize];
+ }
+ }
+
+ // Update the current next field, set the value in the created
+ // handle, and return the result.
+ ASSERT(result < current_.limit);
+ current_.next = result + 1;
+ *result = value;
+ return result;
+}
+
+
+void Context::Enter() {
+ if (IsDeadCheck("v8::Context::Enter()")) return;
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+
+ thread_local.AddEnteredContext(i::GlobalHandles::Create(i::Top::context()));
+ i::Top::set_context(*env);
+
+ thread_local.AddSecurityContext(
+ i::GlobalHandles::Create(i::Top::security_context()));
+ i::Top::set_security_context(*env);
+}
+
+
+void Context::Exit() {
+ if (has_shut_down) return;
+
+ // Content of 'last_context' and 'last_security_context' could be NULL.
+ i::Handle<i::Object> last_context = thread_local.RemoveLastEnteredContext();
+ i::Top::set_context(static_cast<i::Context*>(*last_context));
+ i::GlobalHandles::Destroy(last_context.location());
+
+ i::Handle<i::Object> last_security_context =
+ thread_local.RemoveLastSecurityContext();
+ i::Top::set_security_context(
+ static_cast<i::Context*>(*last_security_context));
+ i::GlobalHandles::Destroy(last_security_context.location());
+}
+
+
+void v8::HandleScope::DeleteExtensions() {
+ ASSERT(current_.extensions != 0);
+ thread_local.DeleteExtensions(current_.extensions);
+}
+
+
+#ifdef DEBUG
+void HandleScope::ZapRange(void** start, void** end) {
+ if (start == NULL) return;
+ for (void** p = start; p < end; p++) {
+ *p = reinterpret_cast<void*>(v8::internal::kHandleZapValue);
+ }
+}
+#endif
+
+
+void** v8::HandleScope::RawClose(void** value) {
+ if (!ApiCheck(!is_closed_,
+ "v8::HandleScope::Close()",
+ "Local scope has already been closed")) {
+ return 0;
+ }
+ LOG_API("CloseHandleScope");
+
+ // Read the result before popping the handle block.
+ i::Object* result = reinterpret_cast<i::Object*>(*value);
+ is_closed_ = true;
+ RestorePreviousState();
+
+ // Allocate a new handle on the previous handle block.
+ i::Handle<i::Object> handle(result);
+ return reinterpret_cast<void**>(handle.location());
+}
+
+
+// --- N e a n d e r ---
+
+
+// A constructor cannot easily return an error value, therefore it is necessary
+// to check for a dead VM with ON_BAILOUT before constructing any Neander
+// objects. To remind you about this there is no HandleScope in the
+// NeanderObject constructor. When you add one to the site calling the
+// constructor you should check that you ensured the VM was not dead first.
+NeanderObject::NeanderObject(int size) {
+ EnsureInitialized("v8::Nowhere");
+ value_ = i::Factory::NewNeanderObject();
+ i::Handle<i::FixedArray> elements = i::Factory::NewFixedArray(size);
+ value_->set_elements(*elements);
+}
+
+
+int NeanderObject::size() {
+ return i::FixedArray::cast(value_->elements())->length();
+}
+
+
+NeanderArray::NeanderArray() : obj_(2) {
+ obj_.set(0, i::Smi::FromInt(0));
+}
+
+
+int NeanderArray::length() {
+ return i::Smi::cast(obj_.get(0))->value();
+}
+
+
+i::Object* NeanderArray::get(int offset) {
+ ASSERT(0 <= offset);
+ ASSERT(offset < length());
+ return obj_.get(offset + 1);
+}
+
+
+// This method cannot easily return an error value, therefore it is necessary
+// to check for a dead VM with ON_BAILOUT before calling it. To remind you
+// about this there is no HandleScope in this method. When you add one to the
+// site calling this method you should check that you ensured the VM was not
+// dead first.
+void NeanderArray::add(i::Handle<i::Object> value) {
+ int length = this->length();
+ int size = obj_.size();
+ if (length == size - 1) {
+ i::Handle<i::FixedArray> new_elms = i::Factory::NewFixedArray(2 * size);
+ for (int i = 0; i < length; i++)
+ new_elms->set(i + 1, get(i));
+ obj_.value()->set_elements(*new_elms);
+ }
+ obj_.set(length + 1, *value);
+ obj_.set(0, i::Smi::FromInt(length + 1));
+}
+
+
+void NeanderArray::set(int index, i::Object* value) {
+ if (index < 0 || index >= this->length()) return;
+ obj_.set(index + 1, value);
+}
+
+
+// --- T e m p l a t e ---
+
+
+static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
+ that->set_tag(i::Smi::FromInt(type));
+}
+
+
+void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
+ v8::PropertyAttribute attribute) {
+ if (IsDeadCheck("v8::Template::SetProperty()")) return;
+ HandleScope scope;
+ i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ Utils::OpenHandle(this)->set_property_list(*list);
+ }
+ NeanderArray array(list);
+ array.add(Utils::OpenHandle(*name));
+ array.add(Utils::OpenHandle(*value));
+ array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
+}
+
+
+// --- F u n c t i o n T e m p l a t e ---
+static void InitializeFunctionTemplate(
+ i::Handle<i::FunctionTemplateInfo> info) {
+ info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
+ info->set_flag(0);
+}
+
+
+int FunctionTemplate::InternalFieldCount() {
+ if (IsDeadCheck("v8::FunctionTemplate::InternalFieldCount()")) {
+ return 0;
+ }
+ return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
+}
+
+
+Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
+ if (IsDeadCheck("v8::FunctionTemplate::PrototypeTemplate()")) {
+ return Local<ObjectTemplate>();
+ }
+ i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
+ if (result->IsUndefined()) {
+ result = Utils::OpenHandle(*ObjectTemplate::New());
+ Utils::OpenHandle(this)->set_prototype_template(*result);
+ }
+ return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
+}
+
+
+void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
+ if (IsDeadCheck("v8::FunctionTemplate::Inherit()")) return;
+ Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
+}
+
+
+void FunctionTemplate::SetInternalFieldCount(int value) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetInternalFieldCount()")) return;
+ ApiCheck(i::Smi::IsValid(value),
+ "v8::FunctionTemplate::SetInternalFieldCount()",
+ "Invalid internal field count");
+ Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
+}
+
+
+// To distinguish the function templates, so that we can find them in the
+// function cache of the global context.
+static int next_serial_number = 0;
+
+
+Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
+ v8::Handle<Value> data, v8::Handle<Signature> signature) {
+ EnsureInitialized("v8::FunctionTemplate::New()");
+ LOG_API("FunctionTemplate::New");
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
+ i::Handle<i::FunctionTemplateInfo> obj =
+ i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
+ InitializeFunctionTemplate(obj);
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number++));
+ obj->set_internal_field_count(i::Smi::FromInt(0));
+ if (callback != 0) {
+ if (data.IsEmpty()) data = v8::Undefined();
+ Utils::ToLocal(obj)->SetCallHandler(callback, data);
+ }
+ obj->set_undetectable(false);
+ obj->set_needs_access_check(false);
+
+ if (!signature.IsEmpty())
+ obj->set_signature(*Utils::OpenHandle(*signature));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
+ int argc, Handle<FunctionTemplate> argv[]) {
+ EnsureInitialized("v8::Signature::New()");
+ LOG_API("Signature::New");
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::SIGNATURE_INFO_TYPE);
+ i::Handle<i::SignatureInfo> obj =
+ i::Handle<i::SignatureInfo>::cast(struct_obj);
+ if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
+ if (argc > 0) {
+ i::Handle<i::FixedArray> args = i::Factory::NewFixedArray(argc);
+ for (int i = 0; i < argc; i++) {
+ if (!argv[i].IsEmpty())
+ args->set(i, *Utils::OpenHandle(*argv[i]));
+ }
+ obj->set_args(*args);
+ }
+ return Utils::ToLocal(obj);
+}
+
+
+Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
+ Handle<FunctionTemplate> types[1] = { type };
+ return TypeSwitch::New(1, types);
+}
+
+
+Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
+ EnsureInitialized("v8::TypeSwitch::New()");
+ LOG_API("TypeSwitch::New");
+ i::Handle<i::FixedArray> vector = i::Factory::NewFixedArray(argc);
+ for (int i = 0; i < argc; i++)
+ vector->set(i, *Utils::OpenHandle(*types[i]));
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::TYPE_SWITCH_INFO_TYPE);
+ i::Handle<i::TypeSwitchInfo> obj =
+ i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
+ obj->set_types(*vector);
+ return Utils::ToLocal(obj);
+}
+
+
+int TypeSwitch::match(v8::Handle<Value> value) {
+ LOG_API("TypeSwitch::match");
+ i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+ i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
+ i::FixedArray* types = i::FixedArray::cast(info->types());
+ for (int i = 0; i < types->length(); i++) {
+ if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
+ return i + 1;
+ }
+ return 0;
+}
+
+
+void FunctionTemplate::SetCallHandler(InvocationCallback callback,
+ v8::Handle<Value> data) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ obj->set_callback(*FromCData(callback));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_call_code(*obj);
+}
+
+
+void FunctionTemplate::SetLookupHandler(LookupCallback handler) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetLookupHandler()")) return;
+ HandleScope scope;
+ Utils::OpenHandle(this)->set_lookup_callback(*FromCData(handler));
+}
+
+
+void FunctionTemplate::AddInstancePropertyAccessor(
+ v8::Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter,
+ v8::Handle<Value> data,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes) {
+ if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+ ASSERT(getter != NULL);
+ obj->set_getter(*FromCData(getter));
+ obj->set_setter(*FromCData(setter));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ obj->set_name(*Utils::OpenHandle(*name));
+ if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
+ if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
+ obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+
+ i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ Utils::OpenHandle(this)->set_property_accessors(*list);
+ }
+ NeanderArray array(list);
+ array.add(obj);
+}
+
+
+Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
+ if (IsDeadCheck("v8::FunctionTemplate::InstanceTemplate()")
+ || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ return Local<ObjectTemplate>();
+ if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
+ Local<ObjectTemplate> templ =
+ ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
+ Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
+ }
+ i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
+ Utils::OpenHandle(this)->instance_template()));
+ return Utils::ToLocal(result);
+}
+
+
+// --- O b j e c t T e m p l a t e ---
+
+
+Local<ObjectTemplate> ObjectTemplate::New() {
+ return New(Local<FunctionTemplate>());
+}
+
+
+Local<ObjectTemplate> ObjectTemplate::New(
+ v8::Handle<FunctionTemplate> constructor) {
+ if (IsDeadCheck("v8::ObjectTemplate::New()")) return Local<ObjectTemplate>();
+ EnsureInitialized("v8::ObjectTemplate::New()");
+ LOG_API("ObjectTemplate::New");
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
+ i::Handle<i::ObjectTemplateInfo> obj =
+ i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
+ InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+ if (!constructor.IsEmpty())
+ obj->set_constructor(*Utils::OpenHandle(*constructor));
+ return Utils::ToLocal(obj);
+}
+
+
+// Ensure that the object template has a constructor. If no
+// constructor is available we create one.
+static void EnsureConstructor(ObjectTemplate* object_template) {
+ if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ constructor->set_instance_template(*Utils::OpenHandle(object_template));
+ Utils::OpenHandle(object_template)->set_constructor(*constructor);
+ }
+}
+
+
+void ObjectTemplate::SetAccessor(v8::Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter,
+ v8::Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attribute) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetAccessor()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->AddInstancePropertyAccessor(name,
+ getter,
+ setter,
+ data,
+ settings,
+ attribute);
+}
+
+
+void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
+}
+
+
+void ObjectTemplate::MarkAsUndetectable() {
+ if (IsDeadCheck("v8::ObjectTemplate::MarkAsUndetectable()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ cons->set_undetectable(true);
+}
+
+
+void ObjectTemplate::SetAccessCheckCallbacks(
+ NamedSecurityCallback named_callback,
+ IndexedSecurityCallback indexed_callback,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetAccessCheckCallbacks()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+
+ i::Handle<i::Struct> struct_info =
+ i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ i::Handle<i::AccessCheckInfo> info =
+ i::Handle<i::AccessCheckInfo>::cast(struct_info);
+ info->set_named_callback(*FromCData(named_callback));
+ info->set_indexed_callback(*FromCData(indexed_callback));
+ if (data.IsEmpty()) data = v8::Undefined();
+ info->set_data(*Utils::OpenHandle(*data));
+
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ cons->set_needs_access_check(true);
+ cons->set_access_check_info(*info);
+}
+
+
+void ObjectTemplate::SetIndexedPropertyHandler(
+ IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetIndexedPropertyHandler()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
+}
+
+
+void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetCallAsFunctionHandler()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
+}
+
+
+void FunctionTemplate::SetClassName(Handle<String> name) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetClassName()")) return;
+ Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
+}
+
+
+void FunctionTemplate::SetHiddenPrototype(bool value) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetHiddenPrototype()")) return;
+ Utils::OpenHandle(this)->set_hidden_prototype(value);
+}
+
+
+void FunctionTemplate::SetNamedInstancePropertyHandler(
+ NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) obj->set_getter(*FromCData(getter));
+ if (setter != 0) obj->set_setter(*FromCData(setter));
+ if (query != 0) obj->set_query(*FromCData(query));
+ if (remover != 0) obj->set_deleter(*FromCData(remover));
+ if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_named_property_handler(*obj);
+}
+
+
+void FunctionTemplate::SetIndexedInstancePropertyHandler(
+ IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck(
+ "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) obj->set_getter(*FromCData(getter));
+ if (setter != 0) obj->set_setter(*FromCData(setter));
+ if (query != 0) obj->set_query(*FromCData(query));
+ if (remover != 0) obj->set_deleter(*FromCData(remover));
+ if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
+}
+
+
+void FunctionTemplate::SetInstanceCallAsFunctionHandler(
+ InvocationCallback callback,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ obj->set_callback(*FromCData(callback));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_instance_call_handler(*obj);
+}
+
+
+ScriptData* ScriptData::PreCompile(const char* input, int length) {
+ unibrow::Utf8InputBuffer<> buf(input, length);
+ return i::PreParse(&buf, NULL);
+}
+
+
+ScriptData* ScriptData::New(unsigned* data, int length) {
+ return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
+}
+
+
+// --- S c r i p t ---
+
+
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin,
+ v8::ScriptData* script_data) {
+ ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
+ LOG_API("Script::Compile");
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Handle<i::String> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ if (origin != NULL) {
+ if (!origin->ResourceName().IsEmpty()) {
+ name_obj = Utils::OpenHandle(*origin->ResourceName());
+ }
+ if (!origin->ResourceLineOffset().IsEmpty()) {
+ line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+ }
+ if (!origin->ResourceColumnOffset().IsEmpty()) {
+ column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
+ }
+ }
+ EXCEPTION_PREAMBLE();
+ i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data);
+ // We assert that the pre-data is sane, even though we can actually
+ // handle it if it turns out not to be in release mode.
+ ASSERT(pre_data == NULL || pre_data->SanityCheck());
+ // If the pre-data isn't sane we simply ignore it
+ if (pre_data != NULL && !pre_data->SanityCheck())
+ pre_data = NULL;
+ i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
+ name_obj,
+ line_offset,
+ column_offset,
+ NULL,
+ pre_data);
+ has_pending_exception = boilerplate.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Script>());
+ i::Handle<i::JSFunction> result =
+ i::Factory::NewFunctionFromBoilerplate(boilerplate,
+ i::Top::global_context());
+ return Local<Script>(ToApi<Script>(result));
+}
+
+
+Local<Value> Script::Run() {
+ ON_BAILOUT("v8::Script::Run()", return Local<Value>());
+ LOG_API("Script::Run");
+ i::Object* raw_result = NULL;
+ {
+ HandleScope scope;
+ i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> global(i::Top::context()->global());
+ i::Handle<i::Object> result =
+ i::Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ raw_result = *result;
+ }
+ i::Handle<i::Object> result(raw_result);
+ return Utils::ToLocal(result);
+}
+
+
+// --- E x c e p t i o n s ---
+
+
+v8::TryCatch::TryCatch()
+ : next_(i::Top::try_catch_handler()),
+ exception_(i::Heap::the_hole_value()),
+ is_verbose_(false) {
+ i::Top::RegisterTryCatchHandler(this);
+}
+
+
+v8::TryCatch::~TryCatch() {
+ i::Top::UnregisterTryCatchHandler(this);
+}
+
+
+bool v8::TryCatch::HasCaught() {
+ return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
+}
+
+
+v8::Local<Value> v8::TryCatch::Exception() {
+ if (HasCaught()) {
+ // Check for out of memory exception.
+ i::Object* exception = reinterpret_cast<i::Object*>(exception_);
+ return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
+ } else {
+ return v8::Local<Value>();
+ }
+}
+
+
+void v8::TryCatch::Reset() {
+ exception_ = i::Heap::the_hole_value();
+}
+
+
+void v8::TryCatch::SetVerbose(bool value) {
+ is_verbose_ = value;
+}
+
+
+// --- M e s s a g e ---
+
+
+Local<String> Message::Get() {
+ ON_BAILOUT("v8::Message::Get()", return Local<String>());
+ HandleScope scope;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
+ Local<String> result = Utils::ToLocal(raw_result);
+ return scope.Close(result);
+}
+
+
+v8::Handle<String> Message::GetScriptResourceName() {
+ if (IsDeadCheck("v8::Message::GetScriptResourceName()")) {
+ return Local<String>();
+ }
+ HandleScope scope;
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ // Return this.script.name.
+ i::Handle<i::JSValue> script =
+ i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
+ i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
+ if (!resource_name->IsString()) {
+ return Local<String>();
+ }
+ Local<String> result =
+ Utils::ToLocal(i::Handle<i::String>::cast(resource_name));
+ return scope.Close(result);
+}
+
+
+// TODO(1240903): Remove this when no longer used in WebKit V8 bindings.
+Handle<Value> Message::GetSourceData() {
+ Handle<String> data = GetScriptResourceName();
+ if (data.IsEmpty()) return v8::Undefined();
+ return data;
+}
+
+static i::Handle<i::Object> CallV8HeapFunction(const char* name,
+ i::Handle<i::Object> recv,
+ int argc,
+ i::Object** argv[],
+ bool* has_pending_exception) {
+ i::Handle<i::String> fmt_str = i::Factory::LookupAsciiSymbol(name);
+ i::Object* object_fun = i::Top::builtins()->GetProperty(*fmt_str);
+ i::Handle<i::JSFunction> fun =
+ i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
+ i::Handle<i::Object> value =
+ i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+ return value;
+}
+
+
+static i::Handle<i::Object> CallV8HeapFunction(const char* name,
+ i::Handle<i::Object> data,
+ bool* has_pending_exception) {
+ i::Object** argv[1] = { data.location() };
+ return CallV8HeapFunction(name,
+ i::Top::builtins(),
+ 1,
+ argv,
+ has_pending_exception);
+}
+
+
+int Message::GetLineNumber() {
+ ON_BAILOUT("v8::Message::GetLineNumber()", return -1);
+ HandleScope scope;
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
+ Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ return static_cast<int>(result->Number());
+}
+
+
+Local<Value> Message::GetSourceLine() {
+ ON_BAILOUT("v8::Message::GetSourceLine()", return Local<Value>());
+ HandleScope scope;
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
+ Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Value>());
+ return scope.Close(Utils::ToLocal(result));
+}
+
+
+char* Message::GetUnderline(char* source_line, char underline_char) {
+ if (IsDeadCheck("v8::Message::GetUnderline()")) return 0;
+ HandleScope scope;
+
+ i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
+ int start_pos = static_cast<int>(GetProperty(data_obj, "startPos")->Number());
+ int end_pos = static_cast<int>(GetProperty(data_obj, "endPos")->Number());
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
+ "GetPositionInLine",
+ data_obj,
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ int start_col = static_cast<int>(start_col_obj->Number());
+ int end_col = start_col + (end_pos - start_pos);
+
+ // Any tabs before or between the selected columns have to be
+ // expanded into spaces. We assume that a tab character advances
+ // the cursor up until the next 8-character boundary and at least
+ // one character.
+ int real_start_col = 0;
+ for (int i = 0; i < start_col; i++) {
+ real_start_col++;
+ if (source_line[i] == '\t') {
+ real_start_col++;
+ while (real_start_col % 8 != 0)
+ real_start_col++;
+ }
+ }
+ int real_end_col = real_start_col;
+ for (int i = start_col; i < end_col; i++) {
+ real_end_col++;
+ if (source_line[i] == '\t') {
+ while (real_end_col % 8 != 0)
+ real_end_col++;
+ }
+ }
+ char* result = i::NewArray<char>(real_end_col + 1);
+ for (int i = 0; i < real_start_col; i++)
+ result[i] = ' ';
+ for (int i = real_start_col; i < real_end_col; i++)
+ result[i] = underline_char;
+ result[real_end_col] = '\0';
+ return result;
+}
+
+
+void Message::PrintCurrentStackTrace(FILE* out) {
+ if (IsDeadCheck("v8::Message::PrintCurrentStackTrace()")) return;
+ i::Top::PrintCurrentStackTrace(out);
+}
+
+
+// --- D a t a ---
+
+bool Value::IsUndefined() {
+ if (IsDeadCheck("v8::Value::IsUndefined()")) return false;
+ return Utils::OpenHandle(this)->IsUndefined();
+}
+
+
+bool Value::IsNull() {
+ if (IsDeadCheck("v8::Value::IsNull()")) return false;
+ return Utils::OpenHandle(this)->IsNull();
+}
+
+
+bool Value::IsTrue() {
+ if (IsDeadCheck("v8::Value::IsTrue()")) return false;
+ return Utils::OpenHandle(this)->IsTrue();
+}
+
+
+bool Value::IsFalse() {
+ if (IsDeadCheck("v8::Value::IsFalse()")) return false;
+ return Utils::OpenHandle(this)->IsFalse();
+}
+
+
+bool Value::IsFunction() {
+ if (IsDeadCheck("v8::Value::IsFunction()")) return false;
+ return Utils::OpenHandle(this)->IsJSFunction();
+}
+
+
+bool Value::IsString() {
+ if (IsDeadCheck("v8::Value::IsString()")) return false;
+ return Utils::OpenHandle(this)->IsString();
+}
+
+
+bool Value::IsArray() {
+ if (IsDeadCheck("v8::Value::IsArray()")) return false;
+ return Utils::OpenHandle(this)->IsJSArray();
+}
+
+
+bool Value::IsObject() {
+ if (IsDeadCheck("v8::Value::IsObject()")) return false;
+ return Utils::OpenHandle(this)->IsJSObject();
+}
+
+
+bool Value::IsNumber() {
+ if (IsDeadCheck("v8::Value::IsNumber()")) return false;
+ return Utils::OpenHandle(this)->IsNumber();
+}
+
+
+bool Value::IsBoolean() {
+ if (IsDeadCheck("v8::Value::IsBoolean()")) return false;
+ return Utils::OpenHandle(this)->IsBoolean();
+}
+
+
+bool Value::IsExternal() {
+ if (IsDeadCheck("v8::Value::IsExternal()")) return false;
+ return Utils::OpenHandle(this)->IsProxy();
+}
+
+
+bool Value::IsInt32() {
+ if (IsDeadCheck("v8::Value::IsInt32()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return true;
+ if (obj->IsNumber()) {
+ double value = obj->Number();
+ return i::FastI2D(i::FastD2I(value)) == value;
+ }
+ return false;
+}
+
+
+Local<String> Value::ToString() {
+ if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
+ LOG_API("ToString");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> str;
+ if (obj->IsString()) {
+ str = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ str = i::Execution::ToString(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<String>());
+ }
+ return Local<String>(ToApi<String>(str));
+}
+
+
+Local<String> Value::ToDetailString() {
+ if (IsDeadCheck("v8::Value::ToDetailString()")) return Local<String>();
+ LOG_API("ToDetailString");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> str;
+ if (obj->IsString()) {
+ str = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ str = i::Execution::ToDetailString(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<String>());
+ }
+ return Local<String>(ToApi<String>(str));
+}
+
+
+Local<v8::Object> Value::ToObject() {
+ if (IsDeadCheck("v8::Value::ToObject()")) return Local<v8::Object>();
+ LOG_API("ToObject");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> val;
+ if (obj->IsJSObject()) {
+ val = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ val = i::Execution::ToObject(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ }
+ return Local<v8::Object>(ToApi<Object>(val));
+}
+
+
+Local<Boolean> Value::ToBoolean() {
+ if (IsDeadCheck("v8::Value::ToBoolean()")) return Local<Boolean>();
+ LOG_API("ToBoolean");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> val =
+ obj->IsBoolean() ? obj : i::Execution::ToBoolean(obj);
+ return Local<Boolean>(ToApi<Boolean>(val));
+}
+
+
+Local<Number> Value::ToNumber() {
+ if (IsDeadCheck("v8::Value::ToNumber()")) return Local<Number>();
+ LOG_API("ToNumber");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsNumber()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToNumber(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Number>());
+ }
+ return Local<Number>(ToApi<Number>(num));
+}
+
+
+Local<Integer> Value::ToInteger() {
+ if (IsDeadCheck("v8::Value::ToInteger()")) return Local<Integer>();
+ LOG_API("ToInteger");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsSmi()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToInteger(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Integer>());
+ }
+ return Local<Integer>(ToApi<Integer>(num));
+}
+
+
+External* External::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::External::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsProxy(),
+ "v8::External::Cast()",
+ "Could not convert to external");
+ return static_cast<External*>(that);
+}
+
+
+v8::Object* v8::Object::Cast(Value* that) {
+ if (IsDeadCheck("v8::Object::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSObject(),
+ "v8::Object::Cast()",
+ "Could not convert to object");
+ return static_cast<v8::Object*>(that);
+}
+
+
+v8::Function* v8::Function::Cast(Value* that) {
+ if (IsDeadCheck("v8::Function::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSFunction(),
+ "v8::Function::Cast()",
+ "Could not convert to function");
+ return static_cast<v8::Function*>(that);
+}
+
+
+v8::String* v8::String::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::String::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsString(),
+ "v8::String::Cast()",
+ "Could not convert to string");
+ return static_cast<v8::String*>(that);
+}
+
+
+v8::Number* v8::Number::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::Number::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsNumber(),
+ "v8::Number::Cast()",
+ "Could not convert to number");
+ return static_cast<v8::Number*>(that);
+}
+
+
+v8::Integer* v8::Integer::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::Integer::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsNumber(),
+ "v8::Integer::Cast()",
+ "Could not convert to number");
+ return static_cast<v8::Integer*>(that);
+}
+
+
+v8::Array* v8::Array::Cast(Value* that) {
+ if (IsDeadCheck("v8::Array::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSArray(),
+ "v8::Array::Cast()",
+ "Could not convert to array");
+ return static_cast<v8::Array*>(that);
+}
+
+
+bool Value::BooleanValue() {
+ if (IsDeadCheck("v8::Value::BooleanValue()")) return false;
+ LOG_API("BooleanValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> value =
+ obj->IsBoolean() ? obj : i::Execution::ToBoolean(obj);
+ return value->IsTrue();
+}
+
+
+double Value::NumberValue() {
+ if (IsDeadCheck("v8::Value::NumberValue()")) return i::OS::nan_value();
+ LOG_API("NumberValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsNumber()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToNumber(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(i::OS::nan_value());
+ }
+ return num->Number();
+}
+
+
+int64_t Value::IntegerValue() {
+ if (IsDeadCheck("v8::Value::IntegerValue()")) return 0;
+ LOG_API("IntegerValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsNumber()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToInteger(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ }
+ if (num->IsSmi()) {
+ return i::Smi::cast(*num)->value();
+ } else {
+ return static_cast<int64_t>(num->Number());
+ }
+}
+
+
+Local<Int32> Value::ToInt32() {
+ if (IsDeadCheck("v8::Value::ToInt32()")) return Local<Int32>();
+ LOG_API("ToInt32");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsSmi()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToInt32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Int32>());
+ }
+ return Local<Int32>(ToApi<Int32>(num));
+}
+
+
+Local<Uint32> Value::ToUint32() {
+ if (IsDeadCheck("v8::Value::ToUint32()")) return Local<Uint32>();
+ LOG_API("ToUInt32");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsSmi()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToUint32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+ }
+ return Local<Uint32>(ToApi<Uint32>(num));
+}
+
+
+Local<Uint32> Value::ToArrayIndex() {
+ if (IsDeadCheck("v8::Value::ToArrayIndex()")) return Local<Uint32>();
+ LOG_API("ToArrayIndex");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
+ return Local<Uint32>();
+ }
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> string_obj =
+ i::Execution::ToString(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+ i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
+ uint32_t index;
+ if (str->AsArrayIndex(&index)) {
+ i::Handle<i::Object> value;
+ if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
+ value = i::Handle<i::Object>(i::Smi::FromInt(index));
+ } else {
+ value = i::Factory::NewNumber(index);
+ }
+ return Utils::Uint32ToLocal(value);
+ }
+ return Local<Uint32>();
+}
+
+
+int32_t Value::Int32Value() {
+ if (IsDeadCheck("v8::Value::Int32Value()")) return 0;
+ LOG_API("Int32Value");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ LOG_API("Int32Value (slow)");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> num =
+ i::Execution::ToInt32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ if (num->IsSmi()) {
+ return i::Smi::cast(*num)->value();
+ } else {
+ return static_cast<int32_t>(num->Number());
+ }
+ }
+}
+
+
+bool Value::Equals(Handle<Value> that) {
+ if (IsDeadCheck("v8::Value::Equals()")
+ || EmptyCheck("v8::Value::Equals()", this)
+ || EmptyCheck("v8::Value::Equals()", that))
+ return false;
+ LOG_API("Equals");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ i::Object** args[1] = { other.location() };
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result =
+ CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(false);
+ return *result == i::Smi::FromInt(i::EQUAL);
+}
+
+
+bool Value::StrictEquals(Handle<Value> that) {
+ if (IsDeadCheck("v8::Value::StrictEquals()")
+ || EmptyCheck("v8::Value::StrictEquals()", this)
+ || EmptyCheck("v8::Value::StrictEquals()", that))
+ return false;
+ LOG_API("StrictEquals");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ // Must check HeapNumber first, since NaN !== NaN.
+ if (obj->IsHeapNumber()) {
+ if (!other->IsNumber()) return false;
+ double x = obj->Number();
+ double y = other->Number();
+ // Must check explicitly for NaN:s on Windows, but -0 works fine.
+ return x == y && !isnan(x) && !isnan(y);
+ } else if (*obj == *other) { // Also covers Booleans.
+ return true;
+ } else if (obj->IsSmi()) {
+ return other->IsNumber() && obj->Number() == other->Number();
+ } else if (obj->IsString()) {
+ return other->IsString() &&
+ i::String::cast(*obj)->Equals(i::String::cast(*other));
+ } else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
+ return other->IsUndefined() || other->IsUndetectableObject();
+ } else {
+ return false;
+ }
+}
+
+
+uint32_t Value::Uint32Value() {
+ if (IsDeadCheck("v8::Value::Uint32Value()")) return 0;
+ LOG_API("Uint32Value");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> num =
+ i::Execution::ToUint32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ if (num->IsSmi()) {
+ return i::Smi::cast(*num)->value();
+ } else {
+ return static_cast<uint32_t>(num->Number());
+ }
+ }
+}
+
+
+bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
+ v8::PropertyAttribute attribs) {
+ ON_BAILOUT("v8::Object::Set()", return false);
+ i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj = i::SetProperty(
+ self,
+ key_obj,
+ value_obj,
+ static_cast<PropertyAttributes>(attribs));
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(false);
+ return true;
+}
+
+
+Local<Value> v8::Object::Get(v8::Handle<Value> key) {
+ ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+ i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = i::GetProperty(self, key_obj);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ return Utils::ToLocal(result);
+}
+
+
+Local<Value> v8::Object::GetPrototype() {
+ ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
+ i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result = i::GetPrototype(self);
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::Object::ObjectProtoToString() {
+ ON_BAILOUT("v8::Object::ObjectProtoToString()", return Local<v8::String>());
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+
+ i::Handle<i::Object> name(self->class_name());
+
+ // Native implementation of Object.prototype.toString (v8natives.js):
+ // var c = %ClassOf(this);
+ // if (c === 'Arguments') c = 'Object';
+ // return "[object " + c + "]";
+
+ if (!name->IsString()) {
+ return v8::String::New("[object ]");
+
+ } else {
+ i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
+ if (class_name->IsEqualTo(i::CStrVector("Arguments"))) {
+ return v8::String::New("[object Object]");
+
+ } else {
+ const char* prefix = "[object ";
+ Local<String> str = Utils::ToLocal(class_name);
+ const char* postfix = "]";
+
+ size_t prefix_len = strlen(prefix);
+ size_t str_len = str->Length();
+ size_t postfix_len = strlen(postfix);
+
+ size_t buf_len = prefix_len + str_len + postfix_len;
+ char* buf = i::NewArray<char>(buf_len);
+
+ // Write prefix.
+ char* ptr = buf;
+ memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ ptr += prefix_len;
+
+ // Write real content.
+ str->WriteAscii(ptr, 0, str_len);
+ ptr += str_len;
+
+ // Write postfix.
+ memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+
+ // Copy the buffer into a heap-allocated string and return it.
+ Local<String> result = v8::String::New(buf, buf_len);
+ i::DeleteArray(buf);
+ return result;
+ }
+ }
+}
+
+
+bool v8::Object::Delete(v8::Handle<String> key) {
+ ON_BAILOUT("v8::Object::Delete()", return false);
+ HandleScope scope;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ return i::DeleteProperty(self, key_obj)->IsTrue();
+}
+
+
+bool v8::Object::Has(v8::Handle<String> key) {
+ ON_BAILOUT("v8::Object::Has()", return false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ return self->HasProperty(*key_obj);
+}
+
+
+bool v8::Object::Delete(uint32_t index) {
+ ON_BAILOUT("v8::Object::DeleteProperty()", return false);
+ HandleScope scope;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ return i::DeleteElement(self, index)->IsTrue();
+}
+
+
+bool v8::Object::Has(uint32_t index) {
+ ON_BAILOUT("v8::Object::HasProperty()", return false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ return self->HasElement(index);
+}
+
+
+bool v8::Object::HasRealNamedProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
+ return Utils::OpenHandle(this)->HasRealNamedProperty(
+ *Utils::OpenHandle(*key));
+}
+
+
+bool v8::Object::HasRealIndexedProperty(uint32_t index) {
+ ON_BAILOUT("v8::Object::HasRealIndexedProperty()", return false);
+ return Utils::OpenHandle(this)->HasRealElementProperty(index);
+}
+
+
+bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::HasRealNamedCallbackProperty()", return false);
+ return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
+ *Utils::OpenHandle(*key));
+}
+
+
+bool v8::Object::HasNamedLookupInterceptor() {
+ ON_BAILOUT("v8::Object::HasNamedLookupInterceptor()", return false);
+ return Utils::OpenHandle(this)->HasNamedInterceptor();
+}
+
+
+bool v8::Object::HasIndexedLookupInterceptor() {
+ ON_BAILOUT("v8::Object::HasIndexedLookupInterceptor()", return false);
+ return Utils::OpenHandle(this)->HasIndexedInterceptor();
+}
+
+
+Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+ Handle<String> key) {
+ ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
+ return Local<Value>());
+ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::LookupResult lookup;
+ self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
+ if (lookup.IsValid()) {
+ PropertyAttributes attributes;
+ i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+ &lookup,
+ *key_obj,
+ &attributes));
+ return Utils::ToLocal(result);
+ }
+ return Local<Value>(); // No real property was found in protoype chain.
+}
+
+
+Local<v8::Object> Function::NewInstance() {
+ return NewInstance(0, NULL);
+}
+
+
+Local<v8::Object> Function::NewInstance(int argc,
+ v8::Handle<v8::Value> argv[]) {
+ ON_BAILOUT("v8::Function::NewInstance()", return Local<v8::Object>());
+ LOG_API("Function::NewInstance");
+ HandleScope scope;
+ i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> returned =
+ i::Execution::New(function, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
+}
+
+
+Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
+ v8::Handle<v8::Value> argv[]) {
+ ON_BAILOUT("v8::Function::Call()", return Local<v8::Value>());
+ LOG_API("Function::Call");
+ i::Object* raw_result = NULL;
+ {
+ HandleScope scope;
+ i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> returned =
+ i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ raw_result = *returned;
+ }
+ i::Handle<i::Object> result(raw_result);
+ return Utils::ToLocal(result);
+}
+
+
+void Function::SetName(v8::Handle<v8::String> name) {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ func->shared()->set_name(*Utils::OpenHandle(*name));
+}
+
+
+Handle<Value> Function::GetName() {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name()));
+}
+
+
+int String::Length() {
+ if (IsDeadCheck("v8::String::Length()")) return 0;
+ return Utils::OpenHandle(this)->length();
+}
+
+
+int String::WriteAscii(char* buffer, int start, int length) {
+ if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
+ LOG_API("String::WriteAscii");
+ ASSERT(start >= 0 && length >= -1);
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ // Flatten the string for efficiency. This applies whether we are
+ // using StringInputBuffer or Get(i) to access the characters.
+ str->TryFlatten();
+ int end = length;
+ if ( (length == -1) || (length > str->length() - start) )
+ end = str->length() - start;
+ if (end < 0) return 0;
+ write_input_buffer.Reset(start, *str);
+ int i;
+ for (i = 0; i < end; i++) {
+ char c = static_cast<char>(write_input_buffer.GetNext());
+ if (c == '\0') c = ' ';
+ buffer[i] = c;
+ }
+ if (length == -1 || i < length)
+ buffer[i] = '\0';
+ return i;
+}
+
+
+int String::Write(uint16_t* buffer, int start, int length) {
+ if (IsDeadCheck("v8::String::Write()")) return 0;
+ LOG_API("String::Write");
+ ASSERT(start >= 0 && length >= -1);
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ // Flatten the string for efficiency. This applies whether we are
+ // using StringInputBuffer or Get(i) to access the characters.
+ str->TryFlatten();
+ int end = length;
+ if ( (length == -1) || (length > str->length() - start) )
+ end = str->length() - start;
+ if (end < 0) return 0;
+ write_input_buffer.Reset(start, *str);
+ int i;
+ for (i = 0; i < end; i++)
+ buffer[i] = write_input_buffer.GetNext();
+ if (length == -1 || i < length)
+ buffer[i] = '\0';
+ return i;
+}
+
+
+bool v8::String::IsExternal() {
+ EnsureInitialized("v8::String::IsExternal()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ return str->IsExternalTwoByteString();
+}
+
+
+bool v8::String::IsExternalAscii() {
+ EnsureInitialized("v8::String::IsExternalAscii()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ return str->IsExternalAsciiString();
+}
+
+
+v8::String::ExternalStringResource* v8::String::GetExternalStringResource() {
+ EnsureInitialized("v8::String::GetExternalStringResource()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ ASSERT(str->IsExternalTwoByteString());
+ void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+ return reinterpret_cast<ExternalStringResource*>(resource);
+}
+
+
+v8::String::ExternalAsciiStringResource*
+ v8::String::GetExternalAsciiStringResource() {
+ EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ ASSERT(str->IsExternalAsciiString());
+ void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+ return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+}
+
+
+double Number::Value() {
+ if (IsDeadCheck("v8::Number::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->Number();
+}
+
+
+bool Boolean::Value() {
+ if (IsDeadCheck("v8::Boolean::Value()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->IsTrue();
+}
+
+
+int64_t Integer::Value() {
+ if (IsDeadCheck("v8::Integer::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ return static_cast<int64_t>(obj->Number());
+ }
+}
+
+
+int32_t Int32::Value() {
+ if (IsDeadCheck("v8::Int32::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ return static_cast<int32_t>(obj->Number());
+ }
+}
+
+
+void* External::Value() {
+ if (IsDeadCheck("v8::External::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
+}
+
+
+int v8::Object::InternalFieldCount() {
+ if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ return obj->GetInternalFieldCount();
+}
+
+
+Local<Value> v8::Object::GetInternal(int index) {
+ if (IsDeadCheck("v8::Object::GetInternal()")) return Local<Value>();
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(obj->GetInternalField(index));
+ return Utils::ToLocal(value);
+}
+
+
+void v8::Object::SetInternal(int index, v8::Handle<Value> value) {
+ if (IsDeadCheck("v8::Object::SetInternal()")) return;
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ obj->SetInternalField(index, *val);
+}
+
+
+// --- E n v i r o n m e n t ---
+
+bool v8::V8::Initialize() {
+ if (i::V8::HasBeenSetup()) return true;
+ HandleScope scope;
+ if (i::Snapshot::Initialize()) {
+ i::Serializer::disable();
+ return true;
+ } else {
+ return i::V8::Initialize(NULL);
+ }
+}
+
+
+Persistent<Context> v8::Context::New(v8::ExtensionConfiguration* extensions,
+ v8::Handle<ObjectTemplate> global_template,
+ v8::Handle<Value> global_object) {
+ EnsureInitialized("v8::Context::New()");
+ LOG_API("Context::New");
+ ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
+ // Make sure that the global_template has a constructor.
+ if (!global_template.IsEmpty() &&
+ Utils::OpenHandle(*global_template)->constructor()->IsUndefined()) {
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Utils::OpenHandle(*templ)->set_instance_template(
+ *Utils::OpenHandle(*global_template));
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ Utils::OpenHandle(*global_template)->set_constructor(*constructor);
+ }
+
+ i::Handle<i::Context> env = i::Bootstrapper::CreateEnvironment(
+ Utils::OpenHandle(*global_object),
+ global_template, extensions);
+ if (!ApiCheck(!env.is_null(),
+ "v8::Context::New()",
+ "Could not initialize environment"))
+ return Persistent<Context>();
+ return Persistent<Context>(Utils::ToLocal(env));
+}
+
+
+void v8::Context::SetSecurityToken(Handle<Value> token) {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
+ // The global object of an environment is always a real global
+ // object with security token and reference to the builtins object.
+ i::JSGlobalObject::cast(env->global())->set_security_token(*token_handle);
+}
+
+
+Handle<Value> v8::Context::GetSecurityToken() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Object* security_token =
+ i::JSGlobalObject::cast(env->global())->security_token();
+ i::Handle<i::Object> token_handle(security_token);
+ return Utils::ToLocal(token_handle);
+}
+
+
+bool Context::HasOutOfMemoryException() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ return env->has_out_of_memory();
+}
+
+
+bool Context::InContext() {
+ return i::Top::context() != NULL;
+}
+
+
+bool Context::InSecurityContext() {
+ return i::Top::security_context() != NULL;
+}
+
+
+v8::Local<v8::Context> Context::Current() {
+ if (IsDeadCheck("v8::Context::Current()")) return Local<Context>();
+ i::Handle<i::Context> context(i::Top::global_context());
+ return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Context> Context::GetSecurityContext() {
+ if (IsDeadCheck("v8::Context::GetSecurityContext()")) return Local<Context>();
+ ASSERT(i::Top::security_context() != NULL);
+ i::Handle<i::Context> context(i::Top::security_context());
+ return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Object> Context::Global() {
+ if (IsDeadCheck("v8::Context::Global()")) return Local<v8::Object>();
+ i::Object** ctx = reinterpret_cast<i::Object**>(this);
+ i::Handle<i::Context> context =
+ i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::JSObject> global(context->global());
+ return Utils::ToLocal(global);
+}
+
+
+Local<v8::Object> ObjectTemplate::NewInstance() {
+ ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>());
+ LOG_API("ObjectTemplate::NewInstance");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj =
+ i::Execution::InstantiateObject(Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
+}
+
+
+Local<v8::Function> FunctionTemplate::GetFunction() {
+ ON_BAILOUT("v8::FunctionTemplate::GetFunction()",
+ return Local<v8::Function>());
+ LOG_API("FunctionTemplate::GetFunction");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj =
+ i::Execution::InstantiateFunction(Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Function>());
+ return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
+}
+
+
+bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
+ ON_BAILOUT("v8::FunctionTemplate::HasInstanceOf()", return false);
+ i::Object* obj = *Utils::OpenHandle(*value);
+ return obj->IsInstanceOf(*Utils::OpenHandle(this));
+}
+
+
+Local<External> v8::External::New(void* data) {
+ STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
+ LOG_API("External::New");
+ EnsureInitialized("v8::External::New()");
+ i::Handle<i::Proxy> obj = i::Factory::NewProxy(static_cast<i::Address>(data));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<String> v8::String::New(const char* data, int length) {
+ EnsureInitialized("v8::String::New()");
+ LOG_API("String::New(char)");
+ if (length == -1) length = strlen(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewUndetectable(const char* data, int length) {
+ EnsureInitialized("v8::String::NewUndetectable()");
+ LOG_API("String::NewUndetectable(char)");
+ if (length == -1) length = strlen(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ result->MarkAsUndetectable();
+ return Utils::ToLocal(result);
+}
+
+
+static int TwoByteStringLength(const uint16_t* data) {
+ int length = 0;
+ while (data[length] != '\0') length++;
+ return length;
+}
+
+
+Local<String> v8::String::New(const uint16_t* data, int length) {
+ EnsureInitialized("v8::String::New()");
+ LOG_API("String::New(uint16_)");
+ if (length == -1) length = TwoByteStringLength(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
+ EnsureInitialized("v8::String::NewUndetectable()");
+ LOG_API("String::NewUndetectable(uint16_)");
+ if (length == -1) length = TwoByteStringLength(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ result->MarkAsUndetectable();
+ return Utils::ToLocal(result);
+}
+
+
+i::Handle<i::String> NewExternalStringHandle(
+ v8::String::ExternalStringResource* resource) {
+ i::Handle<i::String> result =
+ i::Factory::NewExternalStringFromTwoByte(resource);
+ return result;
+}
+
+
+i::Handle<i::String> NewExternalAsciiStringHandle(
+ v8::String::ExternalAsciiStringResource* resource) {
+ i::Handle<i::String> result =
+ i::Factory::NewExternalStringFromAscii(resource);
+ return result;
+}
+
+
+static void DisposeExternalString(v8::Persistent<v8::Object> obj,
+ void* parameter) {
+ v8::String::ExternalStringResource* resource =
+ reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Decrement(total_size);
+ delete resource;
+ obj.Dispose();
+}
+
+
+static void DisposeExternalAsciiString(v8::Persistent<v8::Object> obj,
+ void* parameter) {
+ v8::String::ExternalAsciiStringResource* resource =
+ reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Decrement(total_size);
+ delete resource;
+ obj.Dispose();
+}
+
+
+Local<String> v8::String::NewExternal(
+ v8::String::ExternalStringResource* resource) {
+ EnsureInitialized("v8::String::NewExternal()");
+ LOG_API("String::NewExternal");
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Increment(total_size);
+ i::Handle<i::String> result = NewExternalStringHandle(resource);
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalString);
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewExternal(
+ v8::String::ExternalAsciiStringResource* resource) {
+ EnsureInitialized("v8::String::NewExternal()");
+ LOG_API("String::NewExternal");
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Increment(total_size);
+ i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalAsciiString);
+ return Utils::ToLocal(result);
+}
+
+
+Local<v8::Object> v8::Object::New() {
+ EnsureInitialized("v8::Object::New()");
+ LOG_API("Object::New");
+ i::Handle<i::JSObject> obj =
+ i::Factory::NewJSObject(i::Top::object_function());
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Value> v8::Date::New(double time) {
+ EnsureInitialized("v8::Date::New()");
+ LOG_API("Date::New");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj =
+ i::Execution::NewDate(time, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Value>());
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Array> v8::Array::New(int length) {
+ EnsureInitialized("v8::Array::New()");
+ LOG_API("Array::New");
+ i::Handle<i::JSArray> obj = i::Factory::NewJSArray(length);
+ return Utils::ToLocal(obj);
+}
+
+
+uint32_t v8::Array::Length() {
+ if (IsDeadCheck("v8::Array::Length()")) return 0;
+ i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
+ i::Object* length = obj->length();
+ if (length->IsSmi()) {
+ return i::Smi::cast(length)->value();
+ } else {
+ return static_cast<uint32_t>(length->Number());
+ }
+}
+
+
+Local<String> v8::String::NewSymbol(const char* data, int length) {
+ EnsureInitialized("v8::String::NewSymbol()");
+ LOG_API("String::NewSymbol(char)");
+ if (length == -1) length = strlen(data);
+ i::Handle<i::String> result =
+ i::Factory::LookupSymbol(i::Vector<const char>(data, length));
+ return Utils::ToLocal(result);
+}
+
+
+Local<Number> v8::Number::New(double value) {
+ EnsureInitialized("v8::Number::New()");
+ i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ return Utils::NumberToLocal(result);
+}
+
+
+Local<Integer> v8::Integer::New(int32_t value) {
+ EnsureInitialized("v8::Integer::New()");
+ if (i::Smi::IsValid(value)) {
+ return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value)));
+ }
+ i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ return Utils::IntegerToLocal(result);
+}
+
+
+void V8::IgnoreOutOfMemoryException() {
+ thread_local.SetIgnoreOutOfMemory(true);
+}
+
+
+bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
+ EnsureInitialized("v8::V8::AddMessageListener()");
+ ON_BAILOUT("v8::V8::AddMessageListener()", return false);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::message_listeners());
+ NeanderObject obj(2);
+ obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+ obj.set(1, data.IsEmpty() ?
+ i::Heap::undefined_value() :
+ *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ return true;
+}
+
+
+void V8::RemoveMessageListeners(MessageCallback that) {
+ EnsureInitialized("v8::V8::RemoveMessageListener()");
+ ON_BAILOUT("v8::V8::RemoveMessageListeners()", return);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::message_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
+ if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+ listeners.set(i, i::Heap::undefined_value());
+ }
+ }
+}
+
+
+void V8::SetCounterFunction(CounterLookupCallback callback) {
+ if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
+ i::StatsTable::SetCounterFunction(callback);
+}
+
+
+void V8::EnableSlidingStateWindow() {
+ if (IsDeadCheck("v8::V8::EnableSlidingStateWindow()")) return;
+ i::Logger::EnableSlidingStateWindow();
+}
+
+
+void V8::SetFailedAccessCheckCallbackFunction(
+ FailedAccessCheckCallback callback) {
+ if (IsDeadCheck("v8::V8::SetFailedAccessCheckCallbackFunction()")) return;
+ i::Top::SetFailedAccessCheckCallback(callback);
+}
+
+
+void V8::AddObjectToGroup(void* group_id, Persistent<Object> obj) {
+ if (IsDeadCheck("v8::V8::AddObjectToGroup()")) return;
+ i::GlobalHandles::AddToGroup(group_id, reinterpret_cast<i::Object**>(*obj));
+}
+
+
+void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
+ if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
+ i::Heap::SetGlobalGCPrologueCallback(callback);
+}
+
+
+void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
+ if (IsDeadCheck("v8::V8::SetGlobalGCEpilogueCallback()")) return;
+ i::Heap::SetGlobalGCEpilogueCallback(callback);
+}
+
+
+String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
+ EnsureInitialized("v8::String::AsciiValue::AsciiValue()");
+ HandleScope scope;
+ Handle<String> str = obj->ToString();
+ int length = str->Length();
+ str_ = i::NewArray<char>(length + 1);
+ str->WriteAscii(str_);
+}
+
+
+String::AsciiValue::~AsciiValue() {
+ i::DeleteArray(str_);
+}
+
+
+String::Value::Value(v8::Handle<v8::Value> obj) {
+ EnsureInitialized("v8::String::Value::Value()");
+ HandleScope scope;
+ Handle<String> str = obj->ToString();
+ int length = str->Length();
+ str_ = i::NewArray<uint16_t>(length + 1);
+ str->Write(str_);
+}
+
+
+String::Value::~Value() {
+ i::DeleteArray(str_);
+}
+
+Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
+ LOG_API("RangeError");
+ ON_BAILOUT("v8::Exception::RangeError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewRangeError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
+ LOG_API("ReferenceError");
+ ON_BAILOUT("v8::Exception::ReferenceError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewReferenceError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
+ LOG_API("SyntaxError");
+ ON_BAILOUT("v8::Exception::SyntaxError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewSyntaxError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
+ LOG_API("TypeError");
+ ON_BAILOUT("v8::Exception::TypeError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewTypeError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
+ LOG_API("Error");
+ ON_BAILOUT("v8::Exception::Error()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+
+// --- D e b u g S u p p o r t ---
+
+
+bool Debug::AddDebugEventListener(DebugEventCallback that, Handle<Value> data) {
+ EnsureInitialized("v8::V8::AddDebugEventListener()");
+ ON_BAILOUT("v8::V8::AddDebugEventListener()", return false);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ NeanderObject obj(2);
+ obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+ obj.set(1, data.IsEmpty() ?
+ i::Heap::undefined_value() :
+ *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ i::Debugger::UpdateActiveDebugger();
+ return true;
+}
+
+
+bool Debug::AddDebugEventListener(v8::Handle<v8::Function> that,
+ Handle<Value> data) {
+ ON_BAILOUT("v8::V8::AddDebugEventListener()", return false);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ NeanderObject obj(2);
+ obj.set(0, *Utils::OpenHandle(*that));
+ obj.set(1, data.IsEmpty() ?
+ i::Heap::undefined_value() :
+ *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ i::Debugger::UpdateActiveDebugger();
+ return true;
+}
+
+
+void Debug::RemoveDebugEventListener(DebugEventCallback that) {
+ EnsureInitialized("v8::V8::RemoveDebugEventListener()");
+ ON_BAILOUT("v8::V8::RemoveDebugEventListener()", return);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ // When removing a C debug event listener only consider proxy objects.
+ if (listener.get(0)->IsProxy()) {
+ i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
+ if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+ listeners.set(i, i::Heap::undefined_value());
+ }
+ }
+ }
+ i::Debugger::UpdateActiveDebugger();
+}
+
+
+void Debug::RemoveDebugEventListener(v8::Handle<v8::Function> that) {
+ ON_BAILOUT("v8::V8::RemoveDebugEventListener()", return);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ // When removing a JavaScript debug event listener only consider JavaScript
+ // function objects.
+ if (listener.get(0)->IsJSFunction()) {
+ i::JSFunction* callback = i::JSFunction::cast(listener.get(0));
+ i::Handle<i::JSFunction> callback_fun(callback);
+ if (callback_fun.is_identical_to(Utils::OpenHandle(*that))) {
+ listeners.set(i, i::Heap::undefined_value());
+ }
+ }
+ }
+ i::Debugger::UpdateActiveDebugger();
+}
+
+
+void Debug::DebugBreak() {
+ i::StackGuard::DebugBreak();
+}
+
+
+void Debug::SetMessageHandler(v8::DebugMessageHandler handler, void* data) {
+ i::Debugger::SetMessageHandler(handler, data);
+}
+
+
+void Debug::SendCommand(const uint16_t* command, int length) {
+ i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length));
+}
+
+
+namespace internal {
+
+
+HandleScopeImplementer* HandleScopeImplementer::instance() {
+ return &thread_local;
+}
+
+
+char* HandleScopeImplementer::ArchiveThread(char* storage) {
+ return thread_local.ArchiveThreadHelper(storage);
+}
+
+
+char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
+ ImplementationUtilities::HandleScopeData* current =
+ ImplementationUtilities::CurrentHandleScope();
+ handle_scope_data_ = *current;
+ memcpy(storage, this, sizeof(*this));
+
+ Initialize();
+ current->Initialize();
+
+ return storage + ArchiveSpacePerThread();
+}
+
+
+int HandleScopeImplementer::ArchiveSpacePerThread() {
+ return sizeof(thread_local);
+}
+
+
+char* HandleScopeImplementer::RestoreThread(char* storage) {
+ return thread_local.RestoreThreadHelper(storage);
+}
+
+
+char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
+ memcpy(this, storage, sizeof(*this));
+ *ImplementationUtilities::CurrentHandleScope() = handle_scope_data_;
+ return storage + ArchiveSpacePerThread();
+}
+
+
+void HandleScopeImplementer::Iterate(
+ ObjectVisitor* v,
+ List<void**>* blocks,
+ ImplementationUtilities::HandleScopeData* handle_data) {
+ // Iterate over all handles in the blocks except for the last.
+ for (int i = blocks->length() - 2; i >= 0; --i) {
+ Object** block =
+ reinterpret_cast<Object**>(blocks->at(i));
+ v->VisitPointers(block, &block[kHandleBlockSize]);
+ }
+
+ // Iterate over live handles in the last block (if any).
+ if (!blocks->is_empty()) {
+ v->VisitPointers(reinterpret_cast<Object**>(blocks->last()),
+ reinterpret_cast<Object**>(handle_data->next));
+ }
+}
+
+
+void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
+ ImplementationUtilities::HandleScopeData* current =
+ ImplementationUtilities::CurrentHandleScope();
+ Iterate(v, thread_local.Blocks(), current);
+}
+
+
+char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
+ HandleScopeImplementer* thread_local =
+ reinterpret_cast<HandleScopeImplementer*>(storage);
+ List<void**>* blocks_of_archived_thread = thread_local->Blocks();
+ ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread =
+ &thread_local->handle_scope_data_;
+ Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread);
+
+ return storage + ArchiveSpacePerThread();
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_API_H_
+#define V8_API_H_
+
+#include "factory.h"
+
+namespace v8 {
+
+// Constants used in the implementation of the API. The most natural thing
+// would usually be to place these with the classes that use them, but
+// we want to keep them out of v8.h because it is an externally
+// visible file.
+class Consts {
+ public:
+ enum TemplateType {
+ FUNCTION_TEMPLATE = 0,
+ OBJECT_TEMPLATE = 1
+ };
+};
+
+
+// Utilities for working with neander-objects, primitive
+// env-independent JSObjects used by the api.
+class NeanderObject {
+ public:
+ explicit NeanderObject(int size);
+ inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
+ inline NeanderObject(v8::internal::Object* obj);
+ inline v8::internal::Object* get(int index);
+ inline void set(int index, v8::internal::Object* value);
+ inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
+ int size();
+ private:
+ v8::internal::Handle<v8::internal::JSObject> value_;
+};
+
+
+// Utilities for working with neander-arrays, a simple extensible
+// array abstraction built on neander-objects.
+class NeanderArray {
+ public:
+ NeanderArray();
+ inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
+ inline v8::internal::Handle<v8::internal::JSObject> value() {
+ return obj_.value();
+ }
+
+ void add(v8::internal::Handle<v8::internal::Object> value);
+
+ int length();
+
+ v8::internal::Object* get(int index);
+ // Change the value at an index to undefined value. If the index is
+ // out of bounds, the request is ignored. Returns the old value.
+ void set(int index, v8::internal::Object* value);
+ private:
+ NeanderObject obj_;
+};
+
+
+NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
+ : value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
+
+
+NeanderObject::NeanderObject(v8::internal::Object* obj)
+ : value_(v8::internal::Handle<v8::internal::JSObject>(
+ v8::internal::JSObject::cast(obj))) { }
+
+
+NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
+ : obj_(obj) { }
+
+
+v8::internal::Object* NeanderObject::get(int offset) {
+ ASSERT(value()->HasFastElements());
+ return v8::internal::FixedArray::cast(value()->elements())->get(offset);
+}
+
+
+void NeanderObject::set(int offset, v8::internal::Object* value) {
+ ASSERT(value_->HasFastElements());
+ v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
+}
+
+
+template <typename T> static inline T ToCData(v8::internal::Object* obj) {
+ STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ return reinterpret_cast<T>(
+ reinterpret_cast<int>(v8::internal::Proxy::cast(obj)->proxy()));
+}
+
+
+template <typename T>
+static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+ STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ return v8::internal::Factory::NewProxy(
+ reinterpret_cast<v8::internal::Address>(reinterpret_cast<int>(obj)));
+}
+
+
+v8::Arguments::Arguments(v8::Local<v8::Value> data,
+ v8::Local<v8::Object> holder,
+ v8::Local<v8::Function> callee,
+ bool is_construct_call,
+ void** values, int length)
+ : data_(data), holder_(holder), callee_(callee),
+ is_construct_call_(is_construct_call),
+ values_(values), length_(length) { }
+
+
+enum ExtensionTraversalState {
+ UNVISITED, VISITED, INSTALLED
+};
+
+
+class RegisteredExtension {
+ public:
+ explicit RegisteredExtension(Extension* extension);
+ static void Register(RegisteredExtension* that);
+ Extension* extension() { return extension_; }
+ RegisteredExtension* next() { return next_; }
+ RegisteredExtension* next_auto() { return next_auto_; }
+ ExtensionTraversalState state() { return state_; }
+ void set_state(ExtensionTraversalState value) { state_ = value; }
+ static RegisteredExtension* first_extension() { return first_extension_; }
+ private:
+ Extension* extension_;
+ RegisteredExtension* next_;
+ RegisteredExtension* next_auto_;
+ ExtensionTraversalState state_;
+ static RegisteredExtension* first_extension_;
+ static RegisteredExtension* first_auto_extension_;
+};
+
+
+class ImplementationUtilities {
+ public:
+ static v8::Handle<v8::Primitive> Undefined();
+ static v8::Handle<v8::Primitive> Null();
+ static v8::Handle<v8::Boolean> True();
+ static v8::Handle<v8::Boolean> False();
+
+ static int GetNameCount(ExtensionConfiguration* that) {
+ return that->name_count_;
+ }
+
+ static const char** GetNames(ExtensionConfiguration* that) {
+ return that->names_;
+ }
+
+ static v8::Arguments NewArguments(Local<Value> data,
+ Local<Object> holder,
+ Local<Function> callee,
+ bool is_construct_call,
+ void** argv, int argc) {
+ return v8::Arguments(data, holder, callee, is_construct_call, argv, argc);
+ }
+
+ // Introduce an alias for the handle scope data to allow non-friends
+ // to access the HandleScope data.
+ typedef v8::HandleScope::Data HandleScopeData;
+
+ static HandleScopeData* CurrentHandleScope() {
+ return &v8::HandleScope::current_;
+ }
+
+#ifdef DEBUG
+ static void ZapHandleRange(void** begin, void** end) {
+ v8::HandleScope::ZapRange(begin, end);
+ }
+#endif
+};
+
+
+class Utils {
+ public:
+ static bool ReportApiFailure(const char* location, const char* message);
+
+ static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
+ static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
+
+ static inline Local<Context> ToLocal(
+ v8::internal::Handle<v8::internal::Context> obj);
+ static inline Local<Value> ToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Function> ToLocal(
+ v8::internal::Handle<v8::internal::JSFunction> obj);
+ static inline Local<String> ToLocal(
+ v8::internal::Handle<v8::internal::String> obj);
+ static inline Local<Object> ToLocal(
+ v8::internal::Handle<v8::internal::JSObject> obj);
+ static inline Local<Array> ToLocal(
+ v8::internal::Handle<v8::internal::JSArray> obj);
+ static inline Local<External> ToLocal(
+ v8::internal::Handle<v8::internal::Proxy> obj);
+ static inline Local<Message> MessageToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Number> NumberToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Integer> IntegerToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Uint32> Uint32ToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<FunctionTemplate> ToLocal(
+ v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
+ static inline Local<ObjectTemplate> ToLocal(
+ v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
+ static inline Local<Signature> ToLocal(
+ v8::internal::Handle<v8::internal::SignatureInfo> obj);
+ static inline Local<TypeSwitch> ToLocal(
+ v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
+
+ static inline v8::internal::Handle<v8::internal::TemplateInfo>
+ OpenHandle(Template* that);
+ static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
+ OpenHandle(FunctionTemplate* that);
+ static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
+ OpenHandle(ObjectTemplate* that);
+ static inline v8::internal::Handle<v8::internal::Object>
+ OpenHandle(Data* data);
+ static inline v8::internal::Handle<v8::internal::JSObject>
+ OpenHandle(v8::Object* data);
+ static inline v8::internal::Handle<v8::internal::JSArray>
+ OpenHandle(v8::Array* data);
+ static inline v8::internal::Handle<v8::internal::String>
+ OpenHandle(String* data);
+ static inline v8::internal::Handle<v8::internal::JSFunction>
+ OpenHandle(Script* data);
+ static inline v8::internal::Handle<v8::internal::JSFunction>
+ OpenHandle(Function* data);
+ static inline v8::internal::Handle<v8::internal::JSObject>
+ OpenHandle(Message* message);
+ static inline v8::internal::Handle<v8::internal::Context>
+ OpenHandle(v8::Context* context);
+ static inline v8::internal::Handle<v8::internal::SignatureInfo>
+ OpenHandle(v8::Signature* sig);
+ static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
+ OpenHandle(v8::TypeSwitch* that);
+};
+
+
+template <class T>
+static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+ return reinterpret_cast<T*>(obj.location());
+}
+
+
+template <class T>
+v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
+ HandleScope* scope) {
+ return Utils::OpenHandle(*scope->Close(Utils::ToLocal(*this)));
+}
+
+
+// Implementations of ToLocal
+
+#define MAKE_TO_LOCAL(Name, From, To) \
+ Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
+ return Local<To>(reinterpret_cast<To*>(obj.location())); \
+ }
+
+MAKE_TO_LOCAL(ToLocal, Context, Context)
+MAKE_TO_LOCAL(ToLocal, Object, Value)
+MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
+MAKE_TO_LOCAL(ToLocal, String, String)
+MAKE_TO_LOCAL(ToLocal, JSObject, Object)
+MAKE_TO_LOCAL(ToLocal, JSArray, Array)
+MAKE_TO_LOCAL(ToLocal, Proxy, External)
+MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
+MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
+MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
+MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
+MAKE_TO_LOCAL(MessageToLocal, Object, Message)
+MAKE_TO_LOCAL(NumberToLocal, Object, Number)
+MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
+MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+
+#undef MAKE_TO_LOCAL
+
+
+// Implementations of OpenHandle
+
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle(v8::From* that) { \
+ return v8::internal::Handle<v8::internal::To>( \
+ reinterpret_cast<v8::internal::To**>(that)); \
+ }
+
+MAKE_OPEN_HANDLE(Template, TemplateInfo)
+MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
+MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
+MAKE_OPEN_HANDLE(Signature, SignatureInfo)
+MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
+MAKE_OPEN_HANDLE(Data, Object)
+MAKE_OPEN_HANDLE(Object, JSObject)
+MAKE_OPEN_HANDLE(Array, JSArray)
+MAKE_OPEN_HANDLE(String, String)
+MAKE_OPEN_HANDLE(Script, JSFunction)
+MAKE_OPEN_HANDLE(Function, JSFunction)
+MAKE_OPEN_HANDLE(Message, JSObject)
+MAKE_OPEN_HANDLE(Context, Context)
+
+#undef MAKE_OPEN_HANDLE
+
+
+namespace internal {
+
+// This class is here in order to be able to declare it a friend of
+// HandleScope. Moving these methods to be members of HandleScope would be
+// neat in some ways, but it would expose external implementation details in
+// our public header file, which is undesirable.
+//
+// There is a singleton instance of this class to hold the per-thread data.
+// For multithreaded V8 programs this data is copied in and out of storage
+// so that the currently executing thread always has its own copy of this
+// data.
+class HandleScopeImplementer {
+ public:
+
+ HandleScopeImplementer()
+ : blocks(0),
+ entered_contexts(0),
+ security_contexts(0) {
+ Initialize();
+ }
+
+ void Initialize() {
+ blocks.Initialize(0);
+ entered_contexts.Initialize(0);
+ security_contexts.Initialize(0);
+ spare = NULL;
+ ignore_out_of_memory = false;
+ call_depth = 0;
+ }
+
+ static HandleScopeImplementer* instance();
+
+ // Threading support for handle data.
+ static int ArchiveSpacePerThread();
+ static char* RestoreThread(char* from);
+ static char* ArchiveThread(char* to);
+
+ // Garbage collection support.
+ static void Iterate(v8::internal::ObjectVisitor* v);
+ static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
+
+
+ inline void** GetSpareOrNewBlock();
+ inline void DeleteExtensions(int extensions);
+
+ inline void IncrementCallDepth() {call_depth++;}
+ inline void DecrementCallDepth() {call_depth--;}
+ inline bool CallDepthIsZero() { return call_depth == 0; }
+
+ inline void AddEnteredContext(Handle<Object>);
+ inline Handle<Object> RemoveLastEnteredContext();
+ inline bool HasEnteredContexts();
+ inline void AddSecurityContext(Handle<Object>);
+ inline Handle<Object> RemoveLastSecurityContext();
+ inline bool HasSecurityContexts();
+
+ inline List<void**>* Blocks() { return &blocks; }
+
+ inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
+ inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+
+ private:
+ List<void**> blocks;
+ Object** spare;
+ int call_depth;
+ // Used as a stack to keep track of contexts entered.
+ List<Handle<Object> > entered_contexts;
+ // Used as a stack to keep track of security contexts entered.
+ List<Handle<Object> > security_contexts;
+ bool ignore_out_of_memory;
+ // This is only used for threading support.
+ ImplementationUtilities::HandleScopeData handle_scope_data_;
+
+ static void Iterate(ObjectVisitor* v,
+ List<void**>* blocks,
+ ImplementationUtilities::HandleScopeData* handle_data);
+ char* RestoreThreadHelper(char* from);
+ char* ArchiveThreadHelper(char* to);
+
+ DISALLOW_EVIL_CONSTRUCTORS(HandleScopeImplementer);
+};
+
+
+static const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
+
+
+void HandleScopeImplementer::AddEnteredContext(Handle<Object> context) {
+ entered_contexts.Add(context);
+}
+
+
+Handle<Object> HandleScopeImplementer::RemoveLastEnteredContext() {
+ return entered_contexts.RemoveLast();
+}
+
+
+bool HandleScopeImplementer::HasEnteredContexts() {
+ return !entered_contexts.is_empty();
+}
+
+void HandleScopeImplementer::AddSecurityContext(Handle<Object> context) {
+ security_contexts.Add(context);
+}
+
+
+Handle<Object> HandleScopeImplementer::RemoveLastSecurityContext() {
+ return security_contexts.RemoveLast();
+}
+
+
+bool HandleScopeImplementer::HasSecurityContexts() {
+ return !security_contexts.is_empty();
+}
+
+
+// If there's a spare block, use it for growing the current scope.
+void** HandleScopeImplementer::GetSpareOrNewBlock() {
+ void** block = (spare != NULL) ?
+ reinterpret_cast<void**>(spare) :
+ NewArray<void*>(kHandleBlockSize);
+ spare = NULL;
+ return block;
+}
+
+
+void HandleScopeImplementer::DeleteExtensions(int extensions) {
+ if (spare != NULL) {
+ DeleteArray(spare);
+ spare = NULL;
+ }
+ for (int i = extensions; i > 1; --i) {
+ void** block = blocks.RemoveLast();
+#ifdef DEBUG
+ ImplementationUtilities::ZapHandleRange(block, &block[kHandleBlockSize]);
+#endif
+ DeleteArray(block);
+ }
+ spare = reinterpret_cast<Object**>(blocks.RemoveLast());
+#ifdef DEBUG
+ ImplementationUtilities::ZapHandleRange(
+ reinterpret_cast<void**>(spare),
+ reinterpret_cast<void**>(&spare[kHandleBlockSize]));
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_API_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains infrastructure used by the API. See
+// v8natives.js for an explanation of these files are processed and
+// loaded.
+
+
+function CreateDate(time) {
+ var date = new ORIGINAL_DATE();
+ date.setTime(time);
+ return date;
+};
+
+
+const kApiFunctionCache = {};
+const functionCache = kApiFunctionCache;
+
+
+function Instantiate(data) {
+ if (!%IsTemplate(data)) return data;
+ var tag = %GetTemplateField(data, kApiTagOffset);
+ switch (tag) {
+ case kFunctionTag:
+ return InstantiateFunction(data);
+ case kNewObjectTag:
+ var Constructor = %GetTemplateField(data, kApiConstructorOffset);
+ var result = Constructor ? new (Instantiate(Constructor))() : {};
+ ConfigureTemplateInstance(result, data);
+ return result;
+ default:
+ throw 'Unknown API tag <' + tag + '>';
+ }
+};
+
+
+function InstantiateFunction(data) {
+ var serialNumber = %GetTemplateField(data, kApiSerialNumberOffset);
+ if (!(serialNumber in kApiFunctionCache)) {
+ kApiFunctionCache[serialNumber] = null;
+ var fun = %CreateApiFunction(data);
+ kApiFunctionCache[serialNumber] = fun;
+ var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+ fun.prototype = prototype ? Instantiate(prototype) : {};
+ %AddProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+ var parent = %GetTemplateField(data, kApiParentTemplateOffset);
+ if (parent) {
+ var parent_fun = Instantiate(parent);
+ fun.prototype.__proto__ = parent_fun.prototype;
+ }
+ ConfigureTemplateInstance(fun, data);
+ }
+ return kApiFunctionCache[serialNumber];
+};
+
+
+function ConfigureTemplateInstance(obj, data) {
+ var properties = %GetTemplateField(data, kApiPropertyListOffset);
+ if (properties) {
+ for (var i = 0; i < properties[0]; i += 3) {
+ var name = properties[i + 1];
+ var prop_data = properties[i + 2];
+ var attributes = properties[i + 3];
+ var value = Instantiate(prop_data);
+ %SetProperty(obj, name, value, attributes);
+ }
+ }
+};
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARGUMENTS_H_
+#define V8_ARGUMENTS_H_
+
+namespace v8 { namespace internal {
+
+// Arguments provides access to runtime call parameters.
+//
+// It uses the fact that the instance fields of Arguments
+// (length_, arguments_) are "overlayed" with the parameters
+// (no. of parameters, and the parameter pointer) passed so
+// that inside the C++ function, the parameters passed can
+// be accessed conveniently:
+//
+// Object* Runtime_function(Arguments args) {
+// ... use args[i] here ...
+// }
+
+class Arguments BASE_EMBEDDED {
+ public:
+ Object*& operator[] (int index) {
+ ASSERT(0 <= index && index <= length_);
+ return arguments_[-index];
+ }
+
+ template <class S> Handle<S> at(int index) {
+ Object** value = &((*this)[index]);
+ // This cast checks that the object we're accessing does indeed have the
+ // expected type.
+ S::cast(*value);
+ return Handle<S>(reinterpret_cast<S**>(value));
+ }
+
+ // Get the total number of arguments including the receiver.
+ int length() const { return length_ + 1; }
+
+ private:
+ int length_;
+ Object** arguments_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARGUMENTS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// const $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// Determines if the array contains the element.
+function Contains(array, element) {
+ var length = array.length;
+ for (var i = 0; i < length; i++) {
+ if (array[i] === element) return true;
+ }
+ return false;
+};
+
+
+// Global list of arrays visited during toString, toLocaleString and
+// join invocations.
+var visited_arrays = new $Array();
+
+
+// Gets a sorted array of array keys. Useful for operations on sparse
+// arrays. Dupes have not been removed.
+function GetSortedArrayKeys(array, intervals) {
+ var length = intervals.length;
+ var keys = [];
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var limit = j + intervals[++k];
+ for (; j < limit; j++) {
+ var e = array[j];
+ if (!IS_UNDEFINED(e) || j in array) {
+ keys.push(j);
+ }
+ }
+ } else {
+ // The case where key is undefined also ends here.
+ if (!IS_UNDEFINED(key)) {
+ var e = array[key];
+ if (!IS_UNDEFINED(e) || key in array) {
+ keys.push(key);
+ }
+ }
+ }
+ }
+ keys.sort(function(a, b) { return a - b; });
+ return keys;
+}
+
+
+// Optimized for sparse arrays if separator is ''.
+function SparseJoin(array, len, convert) {
+ var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+ var builder = new StringBuilder();
+ var last_key = -1;
+ var keys_length = keys.length;
+ for (var i = 0; i < keys_length; i++) {
+ var key = keys[i];
+ if (key != last_key) {
+ var e = array[key];
+ builder.add(convert(e));
+ last_key = key;
+ }
+ }
+ return builder.generate();
+}
+
+
+function UseSparseVariant(object, length, is_array) {
+ return is_array &&
+ length > 1000 &&
+ (!%_IsSmi(length) ||
+ %EstimateNumberOfElements(object) < (length >> 2));
+}
+
+
+function Join(array, length, separator, convert) {
+ if (length == 0) return '';
+
+ var is_array = IS_ARRAY(array);
+
+ if (is_array) {
+ // If the array is cyclic, return the empty string for already
+ // visited arrays.
+ if (Contains(visited_arrays, array)) return '';
+ visited_arrays[visited_arrays.length] = array;
+ }
+
+ // Attempt to convert the elements.
+ try {
+ if (UseSparseVariant(array, length, is_array) && separator === '') {
+ return SparseJoin(array, length, convert);
+ }
+
+ var builder = new StringBuilder();
+
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (i != 0) builder.add(separator);
+ if (!IS_UNDEFINED(e) || (i in array)) {
+ builder.add(convert(e));
+ }
+ }
+ return builder.generate();
+ } finally {
+ // Make sure to pop the visited array no matter what happens.
+ if (is_array) visited_arrays.pop();
+ }
+};
+
+
+function ConvertToString(e) {
+ if (e == null) return '';
+ else return ToString(e);
+};
+
+
+function ConvertToLocaleString(e) {
+ if (e == null) return '';
+ else {
+ // e_obj's toLocaleString might be overwritten, check if it is a function.
+ // Call ToString if toLocaleString is not a function.
+ // See issue 877615.
+ var e_obj = ToObject(e);
+ if (IS_FUNCTION(e_obj.toLocaleString))
+ return e_obj.toLocaleString();
+ else
+ return ToString(e);
+ }
+};
+
+
+// This function implements the optimized splice implementation that can use
+// special array operations to handle sparse arrays in a sensible fashion.
+function SmartSlice(array, start_i, del_count, len, deleted_elements) {
+ // Move deleted elements to a new array (the return value from splice).
+ // Intervals array can contain keys and intervals. See comment in Concat.
+ var intervals = %GetArrayKeys(array, start_i + del_count);
+ var length = intervals.length;
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var interval_limit = j + intervals[++k];
+ if (j < start_i) {
+ j = start_i;
+ }
+ for (; j < interval_limit; j++) {
+ // ECMA-262 15.4.4.12 line 10. The spec could also be
+ // interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[j];
+ if (!IS_UNDEFINED(current) || j in array) {
+ deleted_elements[j - start_i] = current;
+ }
+ }
+ } else {
+ if (!IS_UNDEFINED(key)) {
+ if (key >= start_i) {
+ // ECMA-262 15.4.4.12 line 10. The spec could also be
+ // interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array) {
+ deleted_elements[key - start_i] = current;
+ }
+ }
+ }
+ }
+ }
+};
+
+
+// This function implements the optimized splice implementation that can use
+// special array operations to handle sparse arrays in a sensible fashion.
+function SmartMove(array, start_i, del_count, len, num_additional_args) {
+ // Move data to new array.
+ var new_array = new $Array(len - del_count + num_additional_args);
+ var intervals = %GetArrayKeys(array, len);
+ var length = intervals.length;
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var interval_limit = j + intervals[++k];
+ while (j < start_i && j < interval_limit) {
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[j];
+ if (!IS_UNDEFINED(current) || j in array)
+ new_array[j] = current;
+ j++;
+ }
+ j = start_i + del_count;
+ while (j < interval_limit) {
+ // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also be
+ // interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[j];
+ if (!IS_UNDEFINED(current) || j in array)
+ new_array[j - del_count + num_additional_args] = current;
+ j++;
+ }
+ } else {
+ if (!IS_UNDEFINED(key)) {
+ if (key < start_i) {
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array)
+ new_array[key] = current;
+ } else if (key >= start_i + del_count) {
+ // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also
+ // be interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array)
+ new_array[key - del_count + num_additional_args] = current;
+ }
+ }
+ }
+ }
+ // Move contents of new_array into this array
+ %MoveArrayContents(new_array, array);
+};
+
+
+// This is part of the old simple-minded splice. We are using it either
+// because the receiver is not an array (so we have no choice) or because we
+// know we are not deleting or moving a lot of elements.
+function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
+ for (var i = 0; i < del_count; i++) {
+ var index = start_i + i;
+ // The spec could also be interpreted such that %HasLocalProperty
+ // would be the appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[index];
+ if (!IS_UNDEFINED(current) || index in array)
+ deleted_elements[i] = current;
+ }
+};
+
+
+function SimpleMove(array, start_i, del_count, len, num_additional_args) {
+ if (num_additional_args !== del_count) {
+ // Move the existing elements after the elements to be deleted
+ // to the right position in the resulting array.
+ if (num_additional_args > del_count) {
+ for (var i = len - del_count; i > start_i; i--) {
+ var from_index = i + del_count - 1;
+ var to_index = i + num_additional_args - 1;
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[from_index];
+ if (!IS_UNDEFINED(current) || from_index in array) {
+ array[to_index] = current;
+ } else {
+ delete array[to_index];
+ }
+ }
+ } else {
+ for (var i = start_i; i < len - del_count; i++) {
+ var from_index = i + del_count;
+ var to_index = i + num_additional_args;
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[from_index];
+ if (!IS_UNDEFINED(current) || from_index in array) {
+ array[to_index] = current;
+ } else {
+ delete array[to_index];
+ }
+ }
+ for (var i = len; i > len - del_count + num_additional_args; i--) {
+ delete array[i - 1];
+ }
+ }
+ }
+};
+
+
+// -------------------------------------------------------------------
+
+
+function ArrayToString() {
+ if (!IS_ARRAY(this)) {
+ throw new $TypeError('Array.prototype.toString is not generic');
+ }
+ return Join(this, this.length, ',', ConvertToString);
+};
+
+
+function ArrayToLocaleString() {
+ if (!IS_ARRAY(this)) {
+ throw new $TypeError('Array.prototype.toString is not generic');
+ }
+ return Join(this, this.length, ',', ConvertToLocaleString);
+};
+
+
+function ArrayJoin(separator) {
+ if (IS_UNDEFINED(separator)) separator = ',';
+ else separator = ToString(separator);
+ return Join(this, ToUint32(this.length), separator, ConvertToString);
+};
+
+
+// Removes the last element from the array and returns it. See
+// ECMA-262, section 15.4.4.6.
+function ArrayPop() {
+ var n = ToUint32(this.length);
+ if (n == 0) {
+ this.length = n;
+ return;
+ }
+ n--;
+ var value = this[n];
+ this.length = n;
+ delete this[n];
+ return value;
+};
+
+
+// Appends the arguments to the end of the array and returns the new
+// length of the array. See ECMA-262, section 15.4.4.7.
+function ArrayPush() {
+ var n = ToUint32(this.length);
+ var m = %_ArgumentsLength();
+ for (var i = 0; i < m; i++) {
+ this[i+n] = %_Arguments(i);
+ }
+ this.length = n + m;
+ return this.length;
+};
+
+
+function ArrayConcat(arg1) { // length == 1
+ var arg_number = 0, arg_count = %_ArgumentsLength();
+ var n = 0;
+
+ var A = $Array(1 + arg_count);
+ var E = this;
+
+ while (true) {
+ if (IS_ARRAY(E)) {
+ // This is an array of intervals or an array of keys. Keys are
+ // represented by non-negative integers. Intervals are represented by
+ // negative integers, followed by positive counts. The interval start
+ // is determined by subtracting the entry from -1. There may also be
+ // undefined entries in the array which should be skipped.
+ var intervals = %GetArrayKeys(E, E.length);
+ var length = intervals.length;
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var limit = j + intervals[++k];
+ for (; j < limit; j++) {
+ if (j in E) {
+ A[n + j] = E[j];
+ }
+ }
+ } else {
+ // The case where key is undefined also ends here.
+ if (!IS_UNDEFINED(key)) {
+ A[n + key] = E[key];
+ }
+ }
+ }
+ n += E.length;
+ } else {
+ A[n++] = E;
+ }
+ if (arg_number == arg_count) break;
+ E = %_Arguments(arg_number++);
+ }
+
+ A.length = n; // may contain empty arrays
+ return A;
+};
+
+
+// For implementing reverse() on large, sparse arrays.
+function SparseReverse(array, len) {
+ var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+ var high_counter = keys.length - 1;
+ var low_counter = 0;
+ while (low_counter <= high_counter) {
+ var i = keys[low_counter];
+ var j = keys[high_counter];
+
+ var j_complement = len - j - 1;
+ var low, high;
+
+ if (j_complement <= i) {
+ high = j;
+ while (keys[--high_counter] == j);
+ low = j_complement;
+ }
+ if (j_complement >= i) {
+ low = i;
+ while (keys[++low_counter] == i);
+ high = len - i - 1;
+ }
+
+ var current_i = array[low];
+ if (!IS_UNDEFINED(current_i) || low in array) {
+ var current_j = array[high];
+ if (!IS_UNDEFINED(current_j) || high in array) {
+ array[low] = current_j;
+ array[high] = current_i;
+ } else {
+ array[high] = current_i;
+ delete array[low];
+ }
+ } else {
+ var current_j = array[high];
+ if (!IS_UNDEFINED(current_j) || high in array) {
+ array[low] = current_j;
+ delete array[high];
+ }
+ }
+ }
+}
+
+
+function ArrayReverse() {
+ var j = ToUint32(this.length) - 1;
+
+ if (UseSparseVariant(this, j, IS_ARRAY(this))) {
+ SparseReverse(this, j+1);
+ return this;
+ }
+
+ for (var i = 0; i < j; i++, j--) {
+ var current_i = this[i];
+ if (!IS_UNDEFINED(current_i) || i in this) {
+ var current_j = this[j];
+ if (!IS_UNDEFINED(current_j) || j in this) {
+ this[i] = current_j;
+ this[j] = current_i;
+ } else {
+ this[j] = current_i;
+ delete this[i];
+ }
+ } else {
+ var current_j = this[j];
+ if (!IS_UNDEFINED(current_j) || j in this) {
+ this[i] = current_j;
+ delete this[j];
+ }
+ }
+ }
+ return this;
+};
+
+
+function ArrayShift() {
+ var len = ToUint32(this.length);
+
+ if (len === 0) {
+ this.length = 0;
+ return;
+ }
+
+ var first = this[0];
+
+ if (IS_ARRAY(this))
+ SmartMove(this, 0, 1, len, 0);
+ else
+ SimpleMove(this, 0, 1, len, 0);
+
+ this.length = len - 1;
+
+ return first;
+};
+
+
+function ArrayUnshift(arg1) { // length == 1
+ var len = ToUint32(this.length);
+ var num_arguments = %_ArgumentsLength();
+
+ if (IS_ARRAY(this))
+ SmartMove(this, 0, 0, len, num_arguments);
+ else
+ SimpleMove(this, 0, 0, len, num_arguments);
+
+ for (var i = 0; i < num_arguments; i++) {
+ this[i] = %_Arguments(i);
+ }
+
+ this.length = len + num_arguments;
+
+ return len + num_arguments;
+};
+
+
+function ArraySlice(start, end) {
+ var len = ToUint32(this.length);
+ var start_i = TO_INTEGER(start);
+ var end_i = len;
+
+ if (end !== void 0) end_i = TO_INTEGER(end);
+
+ if (start_i < 0) {
+ start_i += len;
+ if (start_i < 0) start_i = 0;
+ } else {
+ if (start_i > len) start_i = len;
+ }
+
+ if (end_i < 0) {
+ end_i += len;
+ if (end_i < 0) end_i = 0;
+ } else {
+ if (end_i > len) end_i = len;
+ }
+
+ var result = [];
+
+ if (end_i < start_i)
+ return result;
+
+ if (IS_ARRAY(this))
+ SmartSlice(this, start_i, end_i - start_i, len, result);
+ else
+ SimpleSlice(this, start_i, end_i - start_i, len, result);
+
+ result.length = end_i - start_i;
+
+ return result;
+};
+
+
+function ArraySplice(start, delete_count) {
+ var num_arguments = %_ArgumentsLength();
+
+ // SpiderMonkey and KJS return undefined in the case where no
+ // arguments are given instead of using the implicit undefined
+ // arguments. This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ if (num_arguments == 0) return;
+
+ var len = ToUint32(this.length);
+ var start_i = TO_INTEGER(start);
+
+ if (start_i < 0) {
+ start_i += len;
+ if (start_i < 0) start_i = 0;
+ } else {
+ if (start_i > len) start_i = len;
+ }
+
+ // SpiderMonkey and KJS treat the case where no delete count is
+ // given differently from when an undefined delete count is given.
+ // This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ var del_count = 0;
+ if (num_arguments > 1) {
+ del_count = TO_INTEGER(delete_count);
+ if (del_count < 0) del_count = 0;
+ if (del_count > len - start_i) del_count = len - start_i;
+ } else {
+ del_count = len - start_i;
+ }
+
+ var deleted_elements = [];
+ deleted_elements.length = del_count;
+
+ // Number of elements to add.
+ var num_additional_args = 0;
+ if (num_arguments > 2) {
+ num_additional_args = num_arguments - 2;
+ }
+
+ var use_simple_splice = true;
+
+ if (IS_ARRAY(this) && num_additional_args !== del_count) {
+ // If we are only deleting/moving a few things near the end of the
+ // array then the simple version is going to be faster, because it
+ // doesn't touch most of the array.
+ var estimated_non_hole_elements = %EstimateNumberOfElements(this);
+ if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
+ use_simple_splice = false;
+ }
+ }
+
+ if (use_simple_splice) {
+ SimpleSlice(this, start_i, del_count, len, deleted_elements);
+ SimpleMove(this, start_i, del_count, len, num_additional_args);
+ } else {
+ SmartSlice(this, start_i, del_count, len, deleted_elements);
+ SmartMove(this, start_i, del_count, len, num_additional_args);
+ }
+
+ // Insert the arguments into the resulting array in
+ // place of the deleted elements.
+ var i = start_i;
+ var arguments_index = 2;
+ var arguments_length = %_ArgumentsLength();
+ while (arguments_index < arguments_length) {
+ this[i++] = %_Arguments(arguments_index++);
+ }
+ this.length = len - del_count + num_additional_args;
+
+ // Return the deleted elements.
+ return deleted_elements;
+};
+
+
+function ArraySort(comparefn) {
+ // Standard in-place HeapSort algorithm.
+
+ function Compare(x,y) {
+ if (IS_UNDEFINED(x)) {
+ if (IS_UNDEFINED(y)) return 0;
+ return 1;
+ }
+ if (IS_UNDEFINED(y)) return -1;
+
+ if (IS_FUNCTION(comparefn)) {
+ return comparefn.call(null, x, y);
+ }
+ x = ToString(x);
+ y = ToString(y);
+ if (x == y) return 0;
+ else return x < y ? -1 : 1;
+ };
+
+ var old_length = ToUint32(this.length);
+
+ %RemoveArrayHoles(this);
+
+ var length = ToUint32(this.length);
+
+ // Bottom-up max-heap construction.
+ for (var i = 1; i < length; ++i) {
+ var child_index = i;
+ while (child_index > 0) {
+ var parent_index = ((child_index + 1) >> 1) - 1;
+ var parent_value = this[parent_index], child_value = this[child_index];
+ if (Compare(parent_value, child_value) < 0) {
+ this[parent_index] = child_value;
+ this[child_index] = parent_value;
+ } else {
+ break;
+ }
+ child_index = parent_index;
+ }
+ }
+
+ // Extract element and create sorted array.
+ for (var i = length - 1; i > 0; --i) {
+ // Put the max element at the back of the array.
+ var t0 = this[0]; this[0] = this[i]; this[i] = t0;
+ // Sift down the new top element.
+ var parent_index = 0;
+ while (true) {
+ var child_index = ((parent_index + 1) << 1) - 1;
+ if (child_index >= i) break;
+ var child1_value = this[child_index];
+ var child2_value = this[child_index + 1];
+ var parent_value = this[parent_index];
+ if (child_index + 1 >= i || Compare(child1_value, child2_value) > 0) {
+ if (Compare(parent_value, child1_value) > 0) break;
+ this[child_index] = parent_value;
+ this[parent_index] = child1_value;
+ parent_index = child_index;
+ } else {
+ if (Compare(parent_value, child2_value) > 0) break;
+ this[child_index + 1] = parent_value;
+ this[parent_index] = child2_value;
+ parent_index = child_index + 1;
+ }
+ }
+ }
+
+ // We only changed the length of the this object (in
+ // RemoveArrayHoles) if it was an array. We are not allowed to set
+ // the length of the this object if it is not an array because this
+ // might introduce a new length property.
+ if (IS_ARRAY(this)) {
+ this.length = old_length;
+ }
+
+ return this;
+};
+
+
+// The following functions cannot be made efficient on sparse arrays while
+// preserving the semantics, since the calls to the receiver function can add
+// or delete elements from the array.
+
+function ArrayFilter(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ var result = [];
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (f.call(receiver, current, i, this)) result.push(current);
+ }
+ }
+ return result;
+};
+
+
+function ArrayForEach(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ f.call(receiver, current, i, this);
+ }
+ }
+};
+
+
+// Executes the function once for each element present in the
+// array until it finds one where callback returns true.
+function ArraySome(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (f.call(receiver, current, i, this)) return true;
+ }
+ }
+ return false;
+};
+
+
+function ArrayEvery(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (!f.call(receiver, current, i, this)) return false;
+ }
+ }
+
+ return true;
+};
+
+
+function ArrayMap(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ var result = new $Array(length);
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ result[i] = f.call(receiver, current, i, this);
+ }
+ }
+ return result;
+};
+
+
+function ArrayIndexOf(element, index) {
+ var length = this.length;
+ if (index == null) {
+ index = 0;
+ } else {
+ index = TO_INTEGER(index);
+ // If index is negative, index from the end of the array.
+ if (index < 0) index = length + index;
+ // If index is still negative, search the entire array.
+ if (index < 0) index = 0;
+ }
+ // Lookup through the array.
+ for (var i = index; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (current === element) return i;
+ }
+ }
+ return -1;
+};
+
+
+function ArrayLastIndexOf(element, index) {
+ var length = this.length;
+ if (index == null) {
+ index = length - 1;
+ } else {
+ index = TO_INTEGER(index);
+ // If index is negative, index from end of the array.
+ if (index < 0) index = length + index;
+ // If index is still negative, do not search the array.
+ if (index < 0) index = -1;
+ else if (index >= length) index = length - 1;
+ }
+ // Lookup through the array.
+ for (var i = index; i >= 0; i--) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (current === element) return i;
+ }
+ }
+ return -1;
+};
+
+
+// -------------------------------------------------------------------
+
+function InstallProperties(prototype, attributes, properties) {
+ for (var key in properties) {
+ %AddProperty(prototype, key, properties[key], attributes);
+ }
+};
+
+
+function UpdateFunctionLengths(lengths) {
+ for (var key in lengths) {
+ %FunctionSetLength(this[key], lengths[key]);
+ }
+};
+
+
+// -------------------------------------------------------------------
+
+function SetupArray() {
+ // Setup non-enumerable properties of the Array.prototype object.
+ InstallProperties($Array.prototype, DONT_ENUM, {
+ constructor: $Array,
+ toString: ArrayToString,
+ toLocaleString: ArrayToLocaleString,
+ join: ArrayJoin,
+ pop: ArrayPop,
+ push: ArrayPush,
+ concat: ArrayConcat,
+ reverse: ArrayReverse,
+ shift: ArrayShift,
+ unshift: ArrayUnshift,
+ slice: ArraySlice,
+ splice: ArraySplice,
+ sort: ArraySort,
+ filter: ArrayFilter,
+ forEach: ArrayForEach,
+ some: ArraySome,
+ every: ArrayEvery,
+ map: ArrayMap,
+ indexOf: ArrayIndexOf,
+ lastIndexOf: ArrayLastIndexOf
+ });
+
+ // Manipulate the length of some of the functions to meet
+ // expectations set by ECMA-262 or Mozilla.
+ UpdateFunctionLengths({
+ ArrayFilter: 1,
+ ArrayForEach: 1,
+ ArraySome: 1,
+ ArrayEvery: 1,
+ ArrayMap: 1,
+ ArrayIndexOf: 1,
+ ArrayLastIndexOf: 1,
+ ArrayPush: 1
+ });
+};
+
+
+SetupArray();
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ASSEMBLER_ARM_INL_H_
+#define V8_ASSEMBLER_ARM_INL_H_
+
+#include "assembler-arm.h"
+#include "cpu.h"
+
+
+namespace v8 { namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(int delta) {
+ // We do not use pc relative addressing on ARM, so there is nothing to do.
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(is_code_target(rmode_));
+ return Assembler::target_address_at(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(is_code_target(rmode_));
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == external_reference);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+}
+
+
+Object* RelocInfo::call_object() {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+}
+
+
+bool RelocInfo::is_call_instruction() {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
+Operand::Operand(int32_t immediate, RelocMode rmode) {
+ rm_ = no_reg;
+ imm32_ = immediate;
+ rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(s);
+ rmode_ = embedded_string;
+}
+
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(f.address());
+ rmode_ = external_reference;
+}
+
+
+Operand::Operand(Object** opp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(opp);
+ rmode_ = no_reloc;
+}
+
+
+Operand::Operand(Context** cpp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(cpp);
+ rmode_ = no_reloc;
+}
+
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = no_reloc;
+}
+
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+}
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() > next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+ Instr instr = Memory::int32_at(pc);
+ // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+ int offset = instr & 0xfff; // offset_12 is unsigned
+ if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
+ // Verify that the constant pool comes after the instruction referencing it.
+ ASSERT(offset >= -4);
+ return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_address_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to flush the instruction cache
+ // after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction was actually patched by the assignment
+ // above; the target address is not part of an instruction, it is patched in
+ // the constant pool and is read via a data access; the instruction accessing
+ // this address in the constant pool remains unchanged.
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_ARM_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "assembler-arm-inl.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(debug_code, false,
+ "generate extra code (comments, assertions) for debugging");
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0 = { 0 };
+Register r1 = { 1 };
+Register r2 = { 2 };
+Register r3 = { 3 };
+Register r4 = { 4 };
+Register r5 = { 5 };
+Register r6 = { 6 };
+Register r7 = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register fp = { 11 };
+Register ip = { 12 };
+Register sp = { 13 };
+Register lr = { 14 };
+Register pc = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0 = { 0 };
+CRegister cr1 = { 1 };
+CRegister cr2 = { 2 };
+CRegister cr3 = { 3 };
+CRegister cr4 = { 4 };
+CRegister cr5 = { 5 };
+CRegister cr6 = { 6 };
+CRegister cr7 = { 7 };
+CRegister cr8 = { 8 };
+CRegister cr9 = { 9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+
+// In order to determine the pc store offset, we execute a small code sequence.
+// See ARM Architecture Reference Manual section A-2.4.3
+// Note that 'str pc, [sp]' and 'stmia sp, {pc}' were using different offsets
+// under the QEMU emulator (now fixed), so we are careful to test the actual
+// instruction we are interested in (stmia).
+int PcStoreOffset() {
+#if !defined(__arm__)
+ // Building an ARM emulator based target. The emulator is wired for 8 byte
+ // pc offsets as is the default in the spec.
+ static int pc_store_offset = 8;
+#elif defined(__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ static int pc_store_offset = -1;
+ asm volatile(
+ "sub sp, sp, #4 \n\t"
+ "sub r1, pc, #4 \n\t"
+ "stmia sp, {pc} \n\t"
+ "ldr r0, [sp] \n\t"
+ "add sp, sp, #4 \n\t"
+ "sub %0, r0, r1 \n\t"
+ : "=r" (pc_store_offset) : : "r0", "r1", "memory");
+#elif defined(__thumb__)
+ static int pc_store_offset = -1;
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r2, 1f \n\t"
+ "bx r2 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: sub sp, sp, #4 \n\t"
+ "sub r1, pc, #4 \n\t"
+ "stmia sp, {pc} \n\t"
+ "ldr r0, [sp] \n\t"
+ "add sp, sp, #4 \n\t"
+ "sub %0, r0, r1 \n"
+ "@ Enter THUMB Mode\n\t"
+ "adr r2, 2f+1 \n\t"
+ "bx r2 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (pc_store_offset) : : "r0", "r1", "r2", "memory");
+#else
+#error unsupported architecture
+#endif
+ ASSERT(pc_store_offset == 8 || pc_store_offset == 12);
+ return pc_store_offset;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::patch_code(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ UNIMPLEMENTED();
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::patch_code_with_call(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-arm-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = embedded_object;
+ } else {
+ // no relocation needed
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = no_reloc;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+ ASSERT(is_uint5(shift_imm));
+ ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ if (shift_op == RRX) {
+ // encoded as ROR with shift_imm == 0
+ ASSERT(shift_imm == 0);
+ shift_op_ = ROR;
+ shift_imm_ = 0;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+ ASSERT(shift_op != RRX);
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+ rn_ = rn;
+ rm_ = no_reg;
+ offset_ = offset;
+ am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+ am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am) {
+ ASSERT(is_uint5(shift_imm));
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+ H = 1 << 5, // halfword (or byte)
+ S6 = 1 << 6, // signed (or unsigned)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks
+ CondMask = 15 << 28,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+ // Reserved condition
+ nv = 15 << 28
+};
+
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ num_prinfo_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ last_const_pool_end_ = 0;
+ last_bound_pos_ = 0;
+ last_position_ = kNoPosition;
+ last_position_is_statement_ = false;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // finalize code
+ if (unbound_label_.is_linked())
+ bind_to(&unbound_label_, binding_pos_);
+
+ // emit constant pool if necessary
+ CheckConstPool(true, false);
+ ASSERT(num_prinfo_ == 0);
+
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+ Instr instr = instr_at(pos);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+ if ((instr & CondMask) == nv && (instr & B24) != 0)
+ // blx uses bit 24 to encode bit 2 of imm26
+ imm26 += 2;
+
+ return pos + 8 + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+ int imm26 = target_pos - pos - 8;
+ Instr instr = instr_at(pos);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ if ((instr & CondMask) == nv) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ ASSERT((imm26 & 1) == 0);
+ instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ } else {
+ ASSERT((imm26 & 3) == 0);
+ instr &= ~Imm24Mask;
+ }
+ int imm24 = imm26 >> 2;
+ ASSERT(is_int24(imm24));
+ instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ int cond = instr & CondMask;
+ const char* b;
+ const char* c;
+ if (cond == nv) {
+ b = "blx";
+ c = "";
+ } else {
+ if ((instr & B24) != 0)
+ b = "bl";
+ else
+ b = "b";
+
+ switch (cond) {
+ case eq: c = "eq"; break;
+ case ne: c = "ne"; break;
+ case hs: c = "hs"; break;
+ case lo: c = "lo"; break;
+ case mi: c = "mi"; break;
+ case pl: c = "pl"; break;
+ case vs: c = "vs"; break;
+ case vc: c = "vc"; break;
+ case hi: c = "hi"; break;
+ case ls: c = "ls"; break;
+ case ge: c = "ge"; break;
+ case lt: c = "lt"; break;
+ case gt: c = "gt"; break;
+ case le: c = "le"; break;
+ case al: c = ""; break;
+ default:
+ c = "";
+ UNREACHABLE();
+ }
+ }
+ PrintF("%s%s\n", b, c);
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+DEFINE_bool(eliminate_jumps, true, "eliminate jumps to jumps in assembly code");
+DEFINE_bool(print_jump_elimination, false,
+ "print elimination of jumps to jumps in assembly code");
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ next(L); // call next before overwriting link with target at fixup_pos
+ target_at_put(fixup_pos, pos);
+ }
+ L->bind_to(pos);
+
+ // do not eliminate jump instructions before the last bound position
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ int fixup_pos;
+ int link = L->pos();
+ do {
+ fixup_pos = link;
+ link = target_at(fixup_pos);
+ } while (link > 0);
+ ASSERT(link == kEndOfChain);
+ target_at_put(fixup_pos, appendix->pos());
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // label can only be bound once
+ if (FLAG_eliminate_jumps) {
+ // Resolve unbound label.
+ if (unbound_label_.is_linked()) {
+ // Unbound label exists => link it with L if same binding
+ // position, otherwise fix it.
+ if (binding_pos_ == pc_offset()) {
+ // Link it to L's list.
+ link_to(L, &unbound_label_);
+ } else {
+ // Otherwise bind unbound label.
+ ASSERT(binding_pos_ < pc_offset());
+ bind_to(&unbound_label_, binding_pos_);
+ }
+ }
+ ASSERT(!unbound_label_.is_linked());
+ // Try to eliminate jumps to next instruction.
+ Instr instr;
+ // Do not remove an already bound jump target.
+ while (last_bound_pos_ < pc_offset() &&
+ reloc_info_writer.last_pc() <= pc_ - kInstrSize &&
+ L->is_linked() && L->pos() == pc_offset() - kInstrSize &&
+ (((instr = instr_at(L->pos())) & CondMask) != nv && // not blx
+ (instr & 15*B24) == 10*B24)) { // b<cond>, but not bl<cond>
+ // Previous instruction is b<cond> jumping immediately after it
+ // => eliminate it
+ if (FLAG_print_jump_elimination)
+ PrintF("@ %d jump to next eliminated\n", L->pos());
+ // Remove first entry from label list.
+ next(L);
+ // Eliminate instruction (set code pointers back).
+ pc_ -= kInstrSize;
+ // Make sure not to skip relocation information when rewinding.
+ ASSERT(reloc_info_writer.last_pc() <= pc_);
+ }
+ // delay fixup of L => store it as unbound label
+ unbound_label_ = *L;
+ binding_pos_ = pc_offset();
+ L->Unuse();
+ }
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link > 0) {
+ L->link_to(link);
+ } else {
+ ASSERT(link == kEndOfChain);
+ L->Unuse();
+ }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+ uint32_t* rotate_imm,
+ uint32_t* immed_8,
+ Instr* instr) {
+ // imm32 must be unsigned
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+ if ((imm8 <= 0xff)) {
+ *rotate_imm = rot;
+ *immed_8 = imm8;
+ return true;
+ }
+ }
+ // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= 0x2*B21;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+ Register rn,
+ Register rd,
+ const Operand& x) {
+ CheckBuffer();
+ ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ if (!x.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if ((x.rmode_ != no_reloc && x.rmode_ != external_reference) ||
+ !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, so load
+ // it first to register ip and change the original instruction to use ip.
+ // However, if the original instruction is a 'mov rd, x' (not setting the
+ // condition code), then replace it with a 'ldr rd, [pc]'
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ASSERT(!rn.is(ip)); // rn should never be ip, or will be trashed
+ Condition cond = static_cast<Condition>(instr & CondMask);
+ if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ ldr(ip, MemOperand(pc, 0), cond);
+ addrmod1(instr, rn, rd, Operand(ip));
+ }
+ return;
+ }
+ instr |= I | rotate_imm*B8 | immed_8;
+ } else if (!x.rs_.is_valid()) {
+ // immediate shift
+ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ } else {
+ // register shift
+ ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+ instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+ }
+ emit(instr | rn.code()*B16 | rd.code()*B12);
+ if (rn.is(pc) || x.rm_.is(pc))
+ // block constant pool emission for one instruction after reading pc
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | B | L)) == B26);
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_12 = x.offset_;
+ if (offset_12 < 0) {
+ offset_12 = -offset_12;
+ am ^= U;
+ }
+ if (!is_uint12(offset_12)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_12 >= 0); // no masking needed
+ instr |= offset_12;
+ } else {
+ // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized
+ ASSERT(!x.rm_.is(pc));
+ instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT(x.rn_.is_valid());
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_8 = x.offset_;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ if (!is_uint8(offset_8)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_8 >= 0); // no masking needed
+ instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ } else if (x.shift_imm_ != 0) {
+ // scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ } else {
+ // register offset
+ ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ instr |= x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+ ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT(rl != 0);
+ ASSERT(!rn.is(pc));
+ emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+ // unindexed addressing is not encoded by this function
+ ASSERT((instr & ~(CondMask | P | U | N | W | L)) == (B27 | B26));
+ ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+ int am = x.am_;
+ int offset_8 = x.offset_;
+ ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
+ offset_8 >>= 2;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+
+ // post-indexed addressing requires W == 1; different than in addrmod2/3
+ if ((am & P) == 0)
+ am |= W;
+
+ ASSERT(offset_8 >= 0); // no masking needed
+ emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, Condition cond) {
+ // if we emit an unconditional jump/call and if the current position is the
+ // target of the unbound label, we can change the binding position of the
+ // unbound label, thereby eliminating an unnessary jump
+ bool can_eliminate = false;
+ if (cond == al && FLAG_eliminate_jumps &&
+ unbound_label_.is_linked() && binding_pos_ == pc_offset()) {
+ can_eliminate = true;
+ if (FLAG_print_jump_elimination) {
+ PrintF("eliminated jumps/calls to %d from ", binding_pos_);
+ print(&unbound_label_);
+ }
+ }
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ if (can_eliminate)
+ binding_pos_ = target_pos;
+ } else {
+ if (can_eliminate)
+ link_to(L, &unbound_label_); // may modify L's link
+ if (L->is_linked())
+ target_pos = L->pos(); // L's link
+ else
+ target_pos = kEndOfChain;
+ L->link_to(pc_offset());
+ }
+
+ // Block the emission of the constant pool, since the branch instruction must
+ // be emitted at the pc offset recorded by the label
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+
+ return target_pos - pc_offset() - 8;
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+ if (cond == al)
+ // dead code is a good location to emit the constant pool
+ CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) { // v5 and above
+ ASSERT((branch_offset & 1) == 0);
+ int h = ((branch_offset & 2) >> 1)*B24;
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) { // v5 and above
+ ASSERT(!target.is(pc));
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+ ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 4*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ ASSERT(!dst.is(src1));
+ emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dst.is(src1));
+ emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+ // v5 and above.
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+ 15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+ ASSERT(!dst.is(pc));
+ emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+ Condition cond) {
+ ASSERT(fields >= B16 && fields < B20); // at least one field set
+ Instr instr;
+ if (!src.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if ((src.rmode_ != no_reloc && src.rmode_ != external_reference)||
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+ // immediate operand cannot be encoded, load it first to register ip
+ RecordRelocInfo(src.rmode_, src.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ msr(fields, Operand(ip), cond);
+ return;
+ }
+ instr = I | rotate_imm*B8 | immed_8;
+ } else {
+ ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ instr = src.rm_.code();
+ }
+ emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | L, dst, src);
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26, src, dst);
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+ addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+ Register base,
+ RegList dst,
+ Condition cond) {
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+ addrmod4(cond | B27 | am | L, base, dst);
+
+ // emit the constant pool after a function return implemented by ldm ..{..pc}
+ if (cond == al && (dst & pc.bit()) != 0) {
+ // There is a slight chance that the ldm instruction was actually a call,
+ // in which case it would be wrong to return into the constant pool; we
+ // recognize this case by checking if the emission of the pool was blocked
+ // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+ // the case, we emit a jump over the pool.
+ CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+ }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+ Register base,
+ RegList src,
+ Condition cond) {
+ addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+ Register src,
+ Register base,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+ emit(15 << 28 | ((intptr_t) msg));
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { // v5 and above
+ ASSERT(is_uint16(imm16));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+ ASSERT(is_uint24(imm24));
+ emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+ crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& dst,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+ coproc, CRegister crd,
+ const MemOperand& dst,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+ const MemOperand& x,
+ SBit s,
+ Condition cond) {
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.offset_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.offset_), s, cond);
+ } else {
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized.
+ ASSERT(!x.rm_.is(pc));
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ }
+}
+
+
+// Debugging
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ CheckBuffer();
+ RecordRelocInfo(comment, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == kNoPosition) return;
+ ASSERT(position >= 0);
+ if (pos == last_position_) return;
+ CheckBuffer();
+ RecordRelocInfo(position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = false;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == last_position_) return;
+ CheckBuffer();
+ RecordRelocInfo(statement_position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // none of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries
+
+ // relocate pending relocation entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != comment && rinfo.rmode() != position);
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocMode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ if (rmode >= comment && rmode <= statement_position) {
+ // adjust code for new modes
+ ASSERT(is_comment(rmode) || is_position(rmode));
+ // these modes do not need an entry in the constant pool
+ } else {
+ ASSERT(num_prinfo_ < kMaxNumPRInfo);
+ prinfo_[num_prinfo_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
+ if (rinfo.rmode() != no_reloc) {
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Calculate the offset of the next check. It will be overwritten
+ // when a const pool is generated or when const pools are being
+ // blocked for a specific range.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+ // There is nothing to do if there are no pending relocation info entries
+ if (num_prinfo_ == 0) return;
+
+ // We emit a constant pool at regular intervals of about kDistBetweenPools
+ // or when requested by parameter force_emit (e.g. after each function).
+ // We prefer not to emit a jump unless the max distance is reached or if we
+ // are running low on slots, which can happen if a lot of constants are being
+ // emitted (e.g. --debug-code and many static references).
+ int dist = pc_offset() - last_const_pool_end_;
+ if (!force_emit && dist < kMaxDistBetweenPools &&
+ (require_jump || dist < kDistBetweenPools) &&
+ // TODO(1236125): Cleanup the "magic" number below. We know that
+ // the code generation will test every kCheckConstIntervalInst.
+ // Thus we are safe as long as we generate less than 7 constant
+ // entries per instruction.
+ (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+ return;
+ }
+
+ // If we did not return by now, we need to emit the constant pool soon.
+
+ // However, some small sequences of instructions must not be broken up by the
+ // insertion of a constant pool; such sequences are protected by setting
+ // no_const_pool_before_, which is checked here. Also, recursive calls to
+ // CheckConstPool are blocked by no_const_pool_before_.
+ if (pc_offset() < no_const_pool_before_) {
+ // Emission is currently blocked; make sure we try again as soon as possible
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Something is wrong if emission is forced and blocked at the same time
+ ASSERT(!force_emit);
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstrSize : 0;
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool and relocation information (include the jump over the pool and the
+ // constant pool marker).
+ int max_needed_space =
+ jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+ while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+ // Block recursive calls to CheckConstPool
+ BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+ num_prinfo_*kInstrSize);
+ // Don't bother to check for the emit calls below.
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Emit jump over constant pool if necessary
+ Label after_pool;
+ if (require_jump) b(&after_pool);
+
+ RecordComment("[ Constant Pool");
+
+ // Put down constant pool marker
+ // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ emit(0x03000000 | num_prinfo_);
+
+ // Emit constant pool entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != comment && rinfo.rmode() != position &&
+ rinfo.rmode() != statement_position);
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be a ldr/str [pc, #offset]
+ // P and U set, B and W clear, Rn == pc, offset12 still 0
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ (2*B25 | P | U | pc.code()*B16));
+ int delta = pc_ - rinfo.pc() - 8;
+ ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
+ if (delta < 0) {
+ instr &= ~U;
+ delta = -delta;
+ }
+ ASSERT(is_uint12(delta));
+ instr_at_put(rinfo.pc(), instr + delta);
+ emit(rinfo.data());
+ }
+ num_prinfo_ = 0;
+ last_const_pool_end_ = pc_offset();
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ASSEMBLER_ARM_H_
+#define V8_ASSEMBLER_ARM_H_
+
+#include "assembler.h"
+
+namespace v8 { namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+ p0 = 0,
+ p1 = 1,
+ p2 = 2,
+ p3 = 3,
+ p4 = 4,
+ p5 = 5,
+ p6 = 6,
+ p7 = 7,
+ p8 = 8,
+ p9 = 9,
+ p10 = 10,
+ p11 = 11,
+ p12 = 12,
+ p13 = 13,
+ p14 = 14,
+ p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+ eq = 0 << 28,
+ ne = 1 << 28,
+ cs = 2 << 28,
+ hs = 2 << 28,
+ cc = 3 << 28,
+ lo = 3 << 28,
+ mi = 4 << 28,
+ pl = 5 << 28,
+ vs = 6 << 28,
+ vc = 7 << 28,
+ hi = 8 << 28,
+ ls = 9 << 28,
+ ge = 10 << 28,
+ lt = 11 << 28,
+ gt = 12 << 28,
+ le = 13 << 28,
+ al = 14 << 28
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// The pc store offset may be 8 or 12 depending on the processor implementation.
+int PcStoreOffset();
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+ LSL = 0 << 5,
+ LSR = 1 << 5,
+ ASR = 2 << 5,
+ ROR = 3 << 5,
+ RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+ SetCC = 1 << 20, // set condition code
+ LeaveCC = 0 << 20 // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (without writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+ // bit encoding P U W
+ da = (0|0|0) << 21, // decrement after
+ ia = (0|4|0) << 21, // increment after
+ db = (8|0|0) << 21, // decrement before
+ ib = (8|4|0) << 21, // increment before
+ da_w = (0|0|1) << 21, // decrement after with writeback to base
+ ia_w = (0|4|1) << 21, // increment after with writeback to base
+ db_w = (8|0|1) << 21, // decrement before with writeback to base
+ ib_w = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+ Long = 1 << 22, // long load/store coprocessor
+ Short = 0 << 22 // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(int32_t immediate, RelocMode rmode = no_reloc));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // rm <shift_op> shift_imm
+ explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+ // rm <shift_op> rs
+ explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+ private:
+ Register rm_;
+ Register rs_;
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ int32_t imm32_; // valid if rm_ == no_reg
+ RelocMode rmode_;
+
+ friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+ // [rn +/- offset] Offset/NegOffset
+ // [rn +/- offset]! PreIndex/NegPreIndex
+ // [rn], +/- offset PostIndex/NegPostIndex
+ // offset is any signed 32-bit value; offset is first loaded to register ip if
+ // it does not fit the addressing mode (12-bit unsigned and sign bit)
+ explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+ // [rn +/- rm] Offset/NegOffset
+ // [rn +/- rm]! PreIndex/NegPreIndex
+ // [rn], +/- rm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+ // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
+ // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
+ // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+ Register rn_; // base
+ Register rm_; // register offset
+ int32_t offset_; // valid if rm_ == no_reg
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ AddrMode am_; // bits P, U, and W
+
+ friend class Assembler;
+};
+
+
+typedef int32_t Instr;
+
+
+class Assembler : public Malloced {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked inbetween GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if necessary
+ int branch_offset(Label* L, Condition cond);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ INLINE(static Address target_address_address_at(Address pc));
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc));
+ INLINE(static void set_target_address_at(Address pc, Address target));
+
+ // Distance between the instruction referring to the address of the call
+ // target (ldr pc, [target addr in const pool]) and the return address
+ static const int kTargetAddrToReturnAddrDist = sizeof(Instr);
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ // Branch instructions
+ void b(int branch_offset, Condition cond = al);
+ void bl(int branch_offset, Condition cond = al);
+ void blx(int branch_offset); // v5 and above
+ void blx(Register target, Condition cond = al); // v5 and above
+ void bx(Register target, Condition cond = al); // v5 and above, plus v4t
+
+ // Convenience branch instructions using labels
+ void b(Label* L, Condition cond = al) { b(branch_offset(L, cond), cond); }
+ void b(Condition cond, Label* L) { b(branch_offset(L, cond), cond); }
+ void bl(Label* L, Condition cond = al) { bl(branch_offset(L, cond), cond); }
+ void bl(Condition cond, Label* L) { bl(branch_offset(L, cond), cond); }
+ void blx(Label* L) { blx(branch_offset(L, al)); } // v5 and above
+
+ // Data-processing instructions
+ void and_(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void eor(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sub(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsb(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void add(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void adc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sbc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void tst(Register src1, const Operand& src2, Condition cond = al);
+
+ void teq(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmp(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+ void orr(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mov(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void bic(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mvn(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Multiply instructions
+
+ void mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mul(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Miscellaneous arithmetic instructions
+
+ void clz(Register dst, Register src, Condition cond = al); // v5 and above
+
+ // Status register access instructions
+
+ void mrs(Register dst, SRegister s, Condition cond = al);
+ void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+ // Load/Store instructions
+ void ldr(Register dst, const MemOperand& src, Condition cond = al);
+ void str(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+ void strb(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+ void strh(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+ void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+ // Load/Store multiple instructions
+ void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+ void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+ // Semaphore instructions
+ void swp(Register dst, Register src, Register base, Condition cond = al);
+ void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg);
+ void untested(const char* msg);
+ void unimplemented(const char* msg);
+ void unreachable(const char* msg);
+
+ void bkpt(uint32_t imm16); // v5 and above
+ void swi(uint32_t imm24, Condition cond = al);
+ // To generate a breakpoint on ARM Linux you can use swi(0x9f0001).
+ // For some reason stepi or cont will not work in gdb until you have done:
+ // set $pc = $pc + 4
+ inline void int3() { swi(0x9f0001); }
+
+ // Coprocessor instructions
+
+ void cdp(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2, Condition cond = al);
+
+ void cdp2(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2); // v5 and above
+
+ void mcr(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mcr2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void mrc(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mrc2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short, Condition cond = al);
+ void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short); // v5 and above
+ void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short, Condition cond = al);
+ void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short); // v5 and above
+ void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ // Pseudo instructions
+ void nop() { mov(r0, Operand(r0)); }
+
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
+ }
+
+ // Load effective address of memory operand x into register dst
+ void lea(Register dst, const MemOperand& x,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { b(L, al); }
+
+
+ // Debugging
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int last_position() const { return last_position_; }
+ bool last_position_is_statement() const {
+ return last_position_is_statement_;
+ }
+
+ protected:
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions
+ Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Decode branch instruction at pos and return branch target pos
+ int target_at(int pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos
+ void target_at_put(int pos, int target_pos);
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes
+ static const int kBufferCheckInterval = 1*KB/2;
+ int next_buffer_check_; // pc offset of next buffer check
+
+ // Code generation
+ static const int kInstrSize = sizeof(Instr); // signed size
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+ byte* pc_; // the program counter; moves forward
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+ // Pools are emitted after function return and in dead code at (more or less)
+ // regular intervals of kDistBetweenPools bytes
+ static const int kDistBetweenPools = 1*KB;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant. We satisfy this constraint by limiting the
+ // distance between pools.
+ static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences
+ int no_const_pool_before_; // block emission before this pc offset
+
+ // Keep track of the last emitted pool to guarantee a maximal distance
+ int last_const_pool_end_; // pc offset following the last constant pool
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+ static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
+ int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // Jump-to-jump elimination:
+ // The last label to be bound to _binding_pos, if unbound.
+ Label unbound_label_;
+ // The position to which _unbound_label has to be bound, if present.
+ int binding_pos_;
+ // The position before which jumps cannot be eliminated.
+ int last_bound_pos_;
+
+ // source position information
+ int last_position_;
+ bool last_position_is_statement_;
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+
+ // Instruction generation
+ void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+ void addrmod2(Instr instr, Register rd, const MemOperand& x);
+ void addrmod3(Instr instr, Register rd, const MemOperand& x);
+ void addrmod4(Instr instr, Register rn, RegList rl);
+ void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+ // Labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+ void next(Label* L);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocMode rmode, intptr_t data = 0);
+
+ // Check if is time to emit a constant pool for pending reloc info entries
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Block the emission of the constant pool before pc_offset
+ void BlockConstPoolBefore(int pc_offset) {
+ if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_ARM_H_
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_ASSEMBLER_IA32_INL_H_
+#define V8_ASSEMBLER_IA32_INL_H_
+
+#include "cpu.h"
+
+namespace v8 { namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(int delta) {
+ if (rmode_ == runtime_entry || is_code_target(rmode_)) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p -= delta; // relocate entry
+ } else if (rmode_ == js_return && is_call_instruction()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // relocate entry
+ }
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(is_code_target(rmode_) || rmode_ == runtime_entry);
+ return Assembler::target_address_at(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(is_code_target(rmode_) || rmode_ == runtime_entry);
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return *reinterpret_cast<Object**>(pc_);
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == external_reference);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(is_call_instruction());
+ return Assembler::target_address_at(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(is_call_instruction());
+ Assembler::set_target_address_at(pc_ + 1, target);
+}
+
+
+Object* RelocInfo::call_object() {
+ ASSERT(is_call_instruction());
+ return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(is_call_instruction());
+ return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ ASSERT(is_call_instruction());
+ *call_object_address() = target;
+}
+
+
+bool RelocInfo::is_call_instruction() {
+ return *pc_ == 0xE8;
+}
+
+
+Immediate::Immediate(int x) {
+ x_ = x;
+ rmode_ = no_reloc;
+}
+
+
+Immediate::Immediate(const ExternalReference& ext) {
+ x_ = reinterpret_cast<int32_t>(ext.address());
+ rmode_ = external_reference;
+}
+
+Immediate::Immediate(const char* s) {
+ x_ = reinterpret_cast<int32_t>(s);
+ rmode_ = embedded_string;
+}
+
+
+Immediate::Immediate(Handle<Object> handle) {
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ x_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = embedded_object;
+ } else {
+ // no relocation needed
+ x_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = no_reloc;
+ }
+}
+
+
+Immediate::Immediate(Smi* value) {
+ x_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = no_reloc;
+}
+
+
+void Assembler::emit(uint32_t x) {
+ *reinterpret_cast<uint32_t*>(pc_) = x;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit(Handle<Object> handle) {
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ emit(reinterpret_cast<intptr_t>(handle.location()), embedded_object);
+ } else {
+ // no relocation needed
+ emit(reinterpret_cast<intptr_t>(obj));
+ }
+}
+
+
+void Assembler::emit(uint32_t x, RelocMode rmode) {
+ if (rmode != no_reloc) RecordRelocInfo(rmode);
+ emit(x);
+}
+
+
+void Assembler::emit(const Immediate& x) {
+ if (x.rmode_ != no_reloc) RecordRelocInfo(x.rmode_);
+ emit(x.x_);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+}
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc);
+ *p = target - (pc + sizeof(int32_t));
+ CPU::FlushICache(p, sizeof(int32_t));
+}
+
+void Operand::set_modrm(int mod, // reg == 0
+ Register rm) {
+ ASSERT((mod & -4) == 0);
+ buf_[0] = mod << 6 | rm.code();
+ len_ = 1;
+}
+
+
+void Operand::set_dispr(int32_t disp, RelocMode rmode) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int32_t*>(&buf_[len_]) = disp;
+ len_ += sizeof(int32_t);
+ rmode_ = rmode;
+}
+
+Operand::Operand(Register reg) {
+ // reg
+ set_modrm(3, reg);
+}
+
+
+Operand::Operand(int32_t disp, RelocMode rmode) {
+ // [disp/r]
+ set_modrm(0, ebp);
+ set_dispr(disp, rmode);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_IA32_INL_H_
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(debug_code, false,
+ "generate extra code (comments, assertions) for debugging");
+DEFINE_bool(emit_branch_hints, false, "emit branch hints");
+
+// -----------------------------------------------------------------------------
+// Implementation of Register
+
+Register eax = { 0 };
+Register ecx = { 1 };
+Register edx = { 2 };
+Register ebx = { 3 };
+Register esp = { 4 };
+Register ebp = { 5 };
+Register esi = { 6 };
+Register edi = { 7 };
+Register no_reg = { -1 };
+
+XMMRegister xmm0 = { 0 };
+XMMRegister xmm1 = { 1 };
+XMMRegister xmm2 = { 2 };
+XMMRegister xmm3 = { 3 };
+XMMRegister xmm4 = { 4 };
+XMMRegister xmm5 = { 5 };
+XMMRegister xmm6 = { 6 };
+XMMRegister xmm7 = { 7 };
+
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+// Safe default is no features.
+uint32_t CpuFeatures::supported_ = 0;
+uint32_t CpuFeatures::enabled_ = 0;
+
+
+typedef int (*F0)();
+
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
+void CpuFeatures::Probe() {
+ supported_ = 0;
+ if (Serializer::enabled()) return; // No features if we might serialize.
+ Assembler assm(NULL, 0);
+ Label done;
+#define __ assm.
+ // Save old esp, since we are going to modify the stack.
+ __ push(ebp);
+ __ pushfd();
+ __ push(ecx);
+ __ push(edx);
+ __ push(ebx);
+ __ mov(ebp, Operand(esp));
+ // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+ __ pushfd();
+ __ pop(eax);
+ __ mov(edx, Operand(eax));
+ __ xor_(eax, 0x200000); // Flip bit 21.
+ __ push(eax);
+ __ popfd();
+ __ pushfd();
+ __ pop(eax);
+ __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
+ __ j(zero, &done);
+ // Invoke CPUID with 1 in eax to get feature information in edx.
+ __ mov(eax, 1);
+ // Temporarily force CPUID support, since we know it is safe here.
+ supported_ = (1 << CPUID);
+ { Scope fscope(CPUID);
+ __ cpuid();
+ }
+ supported_ = 0;
+ // Return result in eax.
+ __ mov(eax, Operand(edx));
+ __ bind(&done);
+ __ mov(esp, Operand(ebp));
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(ecx);
+ __ popfd();
+ __ pop(ebp);
+ __ ret(0);
+#undef __
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB));
+ if (!code->IsCode()) return;
+ F0 f = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ uint32_t res = f();
+ supported_ = (res | (1 << CPUID));
+}
+
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 3 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....1|.......0|
+// [ next | type |
+
+class Displacement BASE_EMBEDDED {
+ private:
+ enum Type {
+ UNCONDITIONAL_JUMP,
+ OTHER
+ };
+
+ int data_;
+
+ class TypeField: public BitField<Type, 0, 1> {};
+ class NextField: public BitField<int, 1, 32-1> {};
+
+ void init(Label* L, Type type) {
+ ASSERT(!L->is_bound());
+ int next = 0;
+ if (L->is_linked()) {
+ next = L->pos();
+ ASSERT(next > 0); // Displacements must be at positions > 0
+ }
+ // Ensure that we _never_ overflow the next field.
+ ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+ data_ = NextField::encode(next) | TypeField::encode(type);
+ }
+
+ int data() const { return data_; }
+ Type type() const { return TypeField::decode(data_); }
+ void next(Label* L) const {
+ int n = NextField::decode(data_);
+ n > 0 ? L->link_to(n) : L->Unuse();
+ }
+ void link_to(Label* L) { init(L, type()); }
+
+ explicit Displacement(int data) { data_ = data; }
+
+ Displacement(Label* L, Type type) { init(L, type); }
+
+ void print() {
+ PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+ NextField::decode(data_));
+ }
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// TODO(1236137): Stop using macros here. The reason for using them is
+// to avoid declaring the Displacement class in the .h file and have
+// functions on the assembler that returns them. Maybe that's not a
+// big issue?
+#define disp_at(L) \
+ Displacement(long_at((L)->pos()))
+
+#define disp_at_put(L, disp) \
+ long_at_put((L)->pos(), (disp).data())
+
+#define emit_disp(L, type) { \
+ Displacement disp((L), (type)); \
+ (L)->link_to(pc_offset()); \
+ emit(static_cast<int>(disp.data())); \
+ }
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << runtime_entry | 1 << js_return;
+
+
+void RelocInfo::patch_code(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc_ + i) = *(instructions + i);
+ }
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::patch_code_with_call(Address target, int guard_bytes) {
+ // Call instruction takes up 5 bytes and int3 takes up one byte.
+ int code_size = 5 + guard_bytes;
+
+ // Patch the code.
+ CodePatcher patcher(pc_, code_size);
+ patcher.masm()->call(target, no_reloc);
+
+ // Add the requested number of int3 instructions after the call.
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp, RelocMode rmode) {
+ // [base + disp/r]
+ if (disp == 0 && rmode == no_reloc && !base.is(ebp)) {
+ // [base]
+ set_modrm(0, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ } else if (is_int8(disp) && rmode == no_reloc) {
+ // [base + disp8]
+ set_modrm(1, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_disp8(disp);
+ } else {
+ // [base + disp/r]
+ set_modrm(2, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [base + index*scale + disp/r]
+ if (disp == 0 && rmode == no_reloc && !base.is(ebp)) {
+ // [base + index*scale]
+ set_modrm(0, esp);
+ set_sib(scale, index, base);
+ } else if (is_int8(disp) && rmode == no_reloc) {
+ // [base + index*scale + disp8]
+ set_modrm(1, esp);
+ set_sib(scale, index, base);
+ set_disp8(disp);
+ } else {
+ // [base + index*scale + disp/r]
+ set_modrm(2, esp);
+ set_sib(scale, index, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [index*scale + disp/r]
+ set_modrm(0, esp);
+ set_sib(scale, index, ebp);
+ set_dispr(disp, rmode);
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT((scale & -4) == 0);
+ buf_[1] = scale << 6 | index.code() << 3 | base.code();
+ len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
+void Operand::set_reg(Register reg) const {
+ ASSERT(len_ > 0);
+ buf_[0] = (buf_[0] & ~0x38) | static_cast<byte>(reg.code() << 3);
+}
+
+
+bool Operand::is_reg(Register reg) const {
+ return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
+ && ((buf_[0] & 0x07) == reg.code()); // register codes match.
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x) \
+ *pc_++ = (x)
+
+
+// spare_buffer_
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Clear the buffer in debug mode unless it was provided by the
+ // caller in which case we can't be sure it's okay to overwrite
+ // existing code in it; see CodePatcher::CodePatcher(...).
+ if (kDebug && own_buffer_) {
+ memset(buffer_, 0xCC, buffer_size); // int3
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+ last_pc_ = NULL;
+ last_bound_pos_ = 0;
+ last_position_ = kNoPosition;
+ last_position_is_statement_ = false;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // finalize code
+ if (unbound_label_.is_linked())
+ bind_to(&unbound_label_, binding_pos_);
+
+ // (at this point overflow() may be true, but the gap ensures that
+ // we are still not overlapping instructions and relocation info)
+ ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::cpuid() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xA2);
+}
+
+
+void Assembler::pushad() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x60);
+}
+
+
+void Assembler::popad() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x61);
+}
+
+
+void Assembler::pushfd() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9C);
+}
+
+
+void Assembler::popfd() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9D);
+}
+
+
+void Assembler::push(const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (x.is_int8()) {
+ EMIT(0x6a);
+ EMIT(x.x_);
+ } else {
+ EMIT(0x68);
+ emit(x);
+ }
+}
+
+
+DEFINE_bool(push_pop_elimination, true,
+ "eliminate redundant push/pops in assembly code");
+DEFINE_bool(print_push_pop_elimination, false,
+ "print elimination of redundant push/pops in assembly code");
+
+void Assembler::push(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x50 | src.code());
+}
+
+
+void Assembler::push(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(esi, src);
+}
+
+
+void Assembler::pop(Register dst) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+ // (last_pc_ != NULL) is rolled into the above check
+ // If a last_pc_ is set, we need to make sure that there has not been any
+ // relocation information generated between the last instruction and this
+ // pop instruction.
+ byte instr = last_pc_[0];
+ if (instr == (0x50 | dst.code())) {
+ pc_ = last_pc_;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
+ }
+ return;
+ } else if (instr == 0xff) { // push of an operand, convert to a move
+ byte op1 = last_pc_[1];
+ // Check if the operation is really a push
+ if ((op1 & 0x38) == (6 << 3)) {
+ op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
+ last_pc_[0] = 0x8b;
+ last_pc_[1] = op1;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
+ }
+ return;
+ }
+ } else if ((instr == 0x89) &&
+ (last_pc_[1] == 0x04) &&
+ (last_pc_[2] == 0x24)) {
+ // 0x71283c 396 890424 mov [esp],eax
+ // 0x71283f 399 58 pop eax
+ if (dst.is(eax)) {
+ // change to
+ // 0x710fac 216 83c404 add esp,0x4
+ last_pc_[0] = 0x83;
+ last_pc_[1] = 0xc4;
+ last_pc_[2] = 0x04;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
+ }
+ return;
+ }
+ }
+ // Other potential patterns for peephole:
+ // 0x712716 102 890424 mov [esp], eax
+ // 0x712719 105 8b1424 mov edx, [esp]
+ }
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x58 | dst.code());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8F);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::mov_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8A);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC6);
+ emit_operand(eax, dst);
+ EMIT(imm8);
+}
+
+
+void Assembler::mov_b(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x88);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_w(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xB8 | dst.code());
+ emit(imm32);
+}
+
+
+void Assembler::mov(Register dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xB8 | dst.code());
+ emit(handle);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(x);
+}
+
+
+void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(handle);
+}
+
+
+void Assembler::mov(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movsx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xBE);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movsx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xBF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xB6);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xB7);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ UNIMPLEMENTED();
+ USE(cc);
+ USE(dst);
+ USE(imm32);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ UNIMPLEMENTED();
+ USE(cc);
+ USE(dst);
+ USE(handle);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ UNIMPLEMENTED();
+ USE(cc);
+ USE(dst);
+ USE(src);
+}
+
+
+void Assembler::adc(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(2, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::adc(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x13);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x03);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(0, dst, x);
+}
+
+
+void Assembler::and_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(4, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::and_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x23);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::and_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(4, dst, x);
+}
+
+
+void Assembler::and_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x21);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cmp(Register reg, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, Operand(reg), Immediate(imm32));
+}
+
+
+void Assembler::cmp(Register reg, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, Operand(reg), Immediate(handle));
+}
+
+
+void Assembler::cmp(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x3B);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmp(const Operand& op, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, op, imm);
+}
+
+
+void Assembler::dec_b(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFE);
+ EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::dec(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x48 | dst.code());
+}
+
+
+void Assembler::dec(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(ecx, dst);
+}
+
+
+void Assembler::cdq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x99);
+}
+
+
+void Assembler::idiv(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xF8 | src.code());
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xAF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (is_int8(imm32)) {
+ EMIT(0x6B);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ EMIT(imm32);
+ } else {
+ EMIT(0x69);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ emit(imm32);
+ }
+}
+
+
+void Assembler::inc(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x40 | dst.code());
+}
+
+
+void Assembler::inc(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8D);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xE0 | src.code());
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xD8 | dst.code());
+}
+
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xD0 | dst.code());
+}
+
+
+void Assembler::or_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(1, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::or_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::or_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(1, dst, x);
+}
+
+
+void Assembler::or_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x09);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::sar(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xF8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xF8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::sar(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD3);
+ EMIT(0xF8 | dst.code());
+}
+
+
+void Assembler::sbb(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x1B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shld(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xA5);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::shl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD3);
+ EMIT(0xE0 | dst.code());
+}
+
+
+void Assembler::shrd(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xAD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ EMIT(0xC1);
+ EMIT(0xE8 | dst.code());
+ EMIT(imm8);
+}
+
+
+void Assembler::shr(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD3);
+ EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::sub(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(5, dst, x);
+}
+
+
+void Assembler::sub(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x2B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::sub(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x29);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::test(Register reg, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (imm.rmode_ == no_reloc && is_uint8(imm.x_) && reg.code() < 4) {
+ uint8_t imm8 = imm.x_;
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ }
+ } else {
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ }
+ emit(imm);
+ }
+}
+
+
+void Assembler::test(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x85);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::test(const Operand& op, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ emit_operand(eax, op);
+ emit(imm);
+}
+
+
+void Assembler::xor_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(6, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::xor_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x33);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& src, Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x31);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(6, dst, x);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xAB);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::hlt() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF4);
+}
+
+
+void Assembler::int3() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xCC);
+}
+
+
+void Assembler::nop() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x90);
+}
+
+
+void Assembler::rdtsc() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x31);
+}
+
+
+void Assembler::ret(int imm16) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint16(imm16));
+ if (imm16 == 0) {
+ EMIT(0xC3);
+ } else {
+ EMIT(0xC2);
+ EMIT(imm16 & 0xFF);
+ EMIT((imm16 >> 8) & 0xFF);
+ }
+}
+
+
+void Assembler::leave() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC9);
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the 32bit
+// Displacement of the last instruction using the label.
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ Displacement disp = disp_at(&l);
+ PrintF("@ %d ", l.pos());
+ disp.print();
+ PrintF("\n");
+ disp.next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+DEFINE_bool(eliminate_jumps, true, "eliminate jumps to jumps in assembly code");
+DEFINE_bool(print_jump_elimination, false,
+ "print elimination of jumps to jumps in assembly code");
+
+void Assembler::bind_to(Label* L, int pos) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = NULL;
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ Displacement disp = disp_at(L);
+ int fixup_pos = L->pos();
+ if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
+ ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
+ }
+ int imm32 = pos - (fixup_pos + sizeof(int32_t));
+ long_at_put(fixup_pos, imm32);
+ disp.next(L);
+ }
+ L->bind_to(pos);
+
+ // do not eliminate jump instructions before the last bound position
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = NULL;
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ Label p;
+ Label q = *L;
+ do {
+ p = q;
+ Displacement disp = disp_at(&q);
+ disp.next(&q);
+ } while (q.is_linked());
+ Displacement disp = disp_at(&p);
+ disp.link_to(appendix);
+ disp_at_put(&p, disp);
+ p.Unuse(); // to avoid assertion failure in ~Label
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = NULL;
+ ASSERT(!L->is_bound()); // label can only be bound once
+ if (FLAG_eliminate_jumps) {
+ // Resolve unbound label.
+ if (unbound_label_.is_linked()) {
+ // Unbound label exists => link it with L if same binding
+ // position, otherwise fix it.
+ if (binding_pos_ == pc_offset()) {
+ // Link it to L's list.
+ link_to(L, &unbound_label_);
+ } else {
+ // Otherwise bind unbound label.
+ ASSERT(binding_pos_ < pc_offset());
+ bind_to(&unbound_label_, binding_pos_);
+ }
+ }
+ ASSERT(!unbound_label_.is_linked());
+ // try to eliminate jumps to next instruction
+ const int absolute_jump_size = 5;
+ // Do not remove an already bound jump target.
+ while (last_bound_pos_ < pc_offset() &&
+ reloc_info_writer.last_pc() <= pc_ - absolute_jump_size &&
+ L->is_linked() &&
+ (L->pos() + static_cast<int>(sizeof(int32_t)) == pc_offset()) &&
+ (disp_at(L).type() == Displacement::UNCONDITIONAL_JUMP)) {
+ // Previous instruction is jump jumping immediately after it =>
+ // eliminate it.
+ // jmp expected.
+ ASSERT(byte_at(pc_offset() - absolute_jump_size) == 0xE9);
+ if (FLAG_print_jump_elimination) {
+ PrintF("@ %d jump to next eliminated\n", L->pos());
+ }
+ // Remove first entry from label list.
+ Displacement disp = disp_at(L);
+ disp.next(L);
+ // Eliminate instruction (set code pointers back).
+ pc_ -= absolute_jump_size;
+ // Make sure not to skip relocation information when rewinding.
+ ASSERT(reloc_info_writer.last_pc() <= pc_);
+ }
+ // Delay fixup of L => store it as unbound label.
+ unbound_label_ = *L;
+ binding_pos_ = pc_offset();
+ L->Unuse();
+ }
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::call(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ // 1110 1000 #32-bit disp
+ EMIT(0xE8);
+ emit(offs - long_size);
+ } else {
+ // 1110 1000 #32-bit disp
+ EMIT(0xE8);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::call(byte* entry, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(!is_code_target(rmode));
+ EMIT(0xE8);
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::call(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::call(Handle<Code> code, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_code_target(rmode));
+ EMIT(0xE8);
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void Assembler::jmp(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 1110 1011 #8-bit disp
+ EMIT(0xEB);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp
+ EMIT(0xE9);
+ emit(offs - long_size);
+ }
+ } else {
+ if (FLAG_eliminate_jumps &&
+ unbound_label_.is_linked() &&
+ binding_pos_ == pc_offset()) {
+ // Current position is target of jumps
+ if (FLAG_print_jump_elimination) {
+ PrintF("eliminated jumps/calls to %d from ", binding_pos_);
+ print(&unbound_label_);
+ }
+ link_to(L, &unbound_label_);
+ }
+ // 1110 1001 #32-bit disp
+ EMIT(0xE9);
+ emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
+ }
+}
+
+
+void Assembler::jmp(byte* entry, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(!is_code_target(rmode));
+ EMIT(0xE9);
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::jmp(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::jmp(Handle<Code> code, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_code_target(rmode));
+ EMIT(0xE9);
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+
+void Assembler::j(Condition cc, Label* L, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ EMIT(0x70 | cc);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(offs - long_size);
+ }
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ // Note: could eliminate cond. jumps to this jump if condition
+ // is the same however, seems to be rather unlikely case.
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::j(Condition cc, byte* entry, RelocMode rmode, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT((0 <= cc) && (cc < 16));
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(reinterpret_cast<intptr_t>(code.location()), code_target);
+}
+
+
+// FPU instructions
+
+
+void Assembler::fld(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fld1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE8);
+}
+
+
+void Assembler::fldz() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xEE);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDF);
+ emit_operand(ebp, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDF);
+ emit_operand(edi, adr);
+}
+
+
+void Assembler::fabs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE1);
+}
+
+
+void Assembler::fchs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fadd(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDA);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::fmul(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::faddp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xF8);
+}
+
+
+void Assembler::fprem1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDA);
+ EMIT(0xE9);
+}
+
+
+void Assembler::fcompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDE);
+ EMIT(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xdF);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fwait() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9B);
+}
+
+
+void Assembler::frndint() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xFC);
+}
+
+
+void Assembler::sahf() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9E);
+}
+
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x2C);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x2C);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdbl(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ movsd(dst, src);
+}
+
+
+void Assembler::movdbl(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ movsd(dst, src);
+}
+
+
+void Assembler::movsd(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2); // double
+ EMIT(0x0F);
+ EMIT(0x11); // store
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2); // double
+ EMIT(0x0F);
+ EMIT(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+ Register ireg = { reg.code() };
+ emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+}
+
+
+void Assembler::Print() {
+ Disassembler::Decode(stdout, buffer_, pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(js_return);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(comment, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == kNoPosition) return;
+ ASSERT(position >= 0);
+ if (pos == last_position_) return;
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = false;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == kNoPosition) return;
+ ASSERT(position >= 0);
+ if (pos == last_position_) return;
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(statement_position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = true;
+}
+
+
+void Assembler::GrowBuffer() {
+ ASSERT(overflow()); // should not call this otherwise
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else {
+ desc.buffer_size = 2*buffer_size_;
+ }
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if ((desc.buffer_size > kMaximalBufferSize) ||
+ (desc.buffer_size > Heap::OldGenerationSize())) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+ // Clear the buffer in debug mode. Use 'int3' instructions to make
+ // sure to get into problems if we ever run uninitialized code.
+ if (kDebug) {
+ memset(desc.buffer, 0xCC, desc.buffer_size);
+ }
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ if (last_pc_ != NULL) {
+ last_pc_ += pc_delta;
+ }
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // relocate runtime entries
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocMode rmode = it.rinfo()->rmode();
+ if (rmode == runtime_entry) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ *p -= pc_delta; // relocate entry
+ }
+ }
+
+ ASSERT(!overflow());
+}
+
+
+void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+ ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
+ ASSERT(is_uint8(imm8));
+ ASSERT((op1 & 0x01) == 0); // should be 8bit operation
+ EMIT(op1);
+ EMIT(op2 | dst.code());
+ EMIT(imm8);
+}
+
+
+void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+ ASSERT((0 <= sel) && (sel <= 7));
+ Register ireg = { sel };
+ if (x.is_int8()) {
+ EMIT(0x83); // using a sign-extended 8-bit immediate.
+ emit_operand(ireg, dst);
+ EMIT(x.x_ & 0xFF);
+ } else if (dst.is_reg(eax)) {
+ EMIT((sel << 3) | 0x05); // short form if the destination is eax.
+ emit(x);
+ } else {
+ EMIT(0x81); // using a literal 32-bit immediate.
+ emit_operand(ireg, dst);
+ emit(x);
+ }
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+ adr.set_reg(reg);
+ memmove(pc_, adr.buf_, adr.len_);
+ pc_ += adr.len_;
+ if (adr.len_ >= sizeof(int32_t) && adr.rmode_ != no_reloc) {
+ pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
+ RecordRelocInfo(adr.rmode_);
+ pc_ += sizeof(int32_t);
+ }
+}
+
+
+void Assembler::emit_operand(const Operand& adr, Register reg) {
+ adr.set_reg(reg);
+ memmove(pc_, adr.buf_, adr.len_);
+ pc_ += adr.len_;
+ if (adr.len_ >= sizeof(int32_t) && adr.rmode_ != no_reloc) {
+ pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
+ RecordRelocInfo(adr.rmode_);
+ pc_ += sizeof(int32_t);
+ }
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+ ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
+ ASSERT(0 <= i && i < 8); // illegal stack offset
+ EMIT(b1);
+ EMIT(b2 + i);
+}
+
+
+void Assembler::RecordRelocInfo(RelocMode rmode, intptr_t data) {
+ ASSERT(rmode != no_reloc);
+ RelocInfo rinfo(pc_, rmode, data);
+ reloc_info_writer.Write(&rinfo);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_ASSEMBLER_IA32_H_
+#define V8_ASSEMBLER_IA32_H_
+
+namespace v8 { namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 8; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+extern Register eax;
+extern Register ecx;
+extern Register edx;
+extern Register ebx;
+extern Register esp;
+extern Register ebp;
+extern Register esi;
+extern Register edi;
+extern Register no_reg;
+
+
+struct XMMRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 2; } // currently
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+extern XMMRegister xmm0;
+extern XMMRegister xmm1;
+extern XMMRegister xmm2;
+extern XMMRegister xmm3;
+extern XMMRegister xmm4;
+extern XMMRegister xmm5;
+extern XMMRegister xmm6;
+extern XMMRegister xmm7;
+
+enum Condition {
+ // any value < 0 is considered no_condition
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ below = 2,
+ above_equal = 3,
+ equal = 4,
+ not_equal = 5,
+ below_equal = 6,
+ above = 7,
+ sign = 8,
+ not_sign = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+
+ // aliases
+ zero = equal,
+ not_zero = not_equal,
+ negative = sign,
+ positive = not_sign
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case below:
+ return above;
+ case above:
+ return below;
+ case above_equal:
+ return below_equal;
+ case below_equal:
+ return above_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ };
+}
+
+enum Hint {
+ no_hint = 0,
+ not_taken = 0x2e,
+ taken = 0x3e
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+ inline explicit Immediate(int x);
+ inline explicit Immediate(const char* s);
+ inline explicit Immediate(const ExternalReference& ext);
+ inline explicit Immediate(Handle<Object> handle);
+ inline explicit Immediate(Smi* value);
+
+ bool is_zero() const { return x_ == 0 && rmode_ == no_reloc; }
+ bool is_int8() const { return -128 <= x_ && x_ < 128 && rmode_ == no_reloc; }
+
+ private:
+ int x_;
+ RelocMode rmode_;
+
+ friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+ times_1 = 0,
+ times_2 = 1,
+ times_4 = 2,
+ times_8 = 3
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+ // reg
+ INLINE(explicit Operand(Register reg));
+
+ // [disp/r]
+ INLINE(explicit Operand(int32_t disp, RelocMode rmode));
+ // disp only must always be relocated
+
+ // [base + disp/r]
+ explicit Operand(Register base, int32_t disp, RelocMode rmode = no_reloc);
+
+ // [base + index*scale + disp/r]
+ explicit Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode = no_reloc);
+
+ // [index*scale + disp/r]
+ explicit Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode = no_reloc);
+
+ static Operand StaticVariable(const ExternalReference& ext) {
+ return Operand(reinterpret_cast<int32_t>(ext.address()),
+ external_reference);
+ }
+
+ static Operand StaticArray(Register index,
+ ScaleFactor scale,
+ const ExternalReference& arr) {
+ return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
+ external_reference);
+ }
+
+ // Returns true if this Operand is a wrapper for the specified register.
+ bool is_reg(Register reg) const;
+
+ private:
+ // Mutable because reg in ModR/M byte is set by Assembler via set_reg().
+ mutable byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocMode rmode_;
+
+ inline void set_modrm(int mod, // reg == 0
+ Register rm);
+ inline void set_sib(ScaleFactor scale, Register index, Register base);
+ inline void set_disp8(int8_t disp);
+ inline void set_dispr(int32_t disp, RelocMode rmode);
+ inline void set_reg(Register reg) const;
+
+ friend class Assembler;
+};
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+// Example:
+// if (CpuFeatures::IsSupported(SSE2)) {
+// CpuFeatures::Scope fscope(SSE2);
+// // Generate SSE2 floating point code.
+// } else {
+// // Generate standard x87 floating point code.
+// }
+class CpuFeatures : public AllStatic {
+ public:
+ // Feature flags bit positions. They are mostly based on the CPUID spec.
+ // (We assign CPUID itself to one of the currently reserved bits --
+ // feel free to change this if needed.)
+ enum Feature { SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(Feature f) { return supported_ & (1 << f); }
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(Feature f) { return enabled_ & (1 << f); }
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(Feature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= (1 << f);
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ uint32_t old_enabled_;
+#else
+ public:
+ explicit Scope(Feature f) {}
+#endif
+ };
+ private:
+ static uint32_t supported_;
+ static uint32_t enabled_;
+};
+
+
+class Assembler : public Malloced {
+ private:
+ // The relocation writer's position is kGap bytes below the end of
+ // the generated instructions. This leaves enough space for the
+ // longest possible ia32 instruction (17 bytes as of 9/26/06) and
+ // allows for a single, fast space check per instruction.
+ static const int kGap = 32;
+
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked inbetween GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Read/Modify the code target in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc);
+ inline static void set_target_address_at(Address pc, Address target);
+
+ // Distance between the address of the code target in the call instruction
+ // and the return address
+ static const int kTargetAddrToReturnAddrDist = kPointerSize;
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+ //
+ // - function names correspond one-to-one to ia32 instruction mnemonics
+ // - unless specified otherwise, instructions operate on 32bit operands
+ // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+ // - instructions on 16bit (word) operands/registers have a trailing '_w'
+ // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+ // NOTE ON INTERFACE: Currently, the interface is not very consistent
+ // in the sense that some operations (e.g. mov()) can be called in more
+ // the one way to generate the same instruction: The Register argument
+ // can in some cases be replaced with an Operand(Register) argument.
+ // This should be cleaned up and made more othogonal. The questions
+ // is: should we always use Operands instead of Registers where an
+ // Operand is possible, or should we have a Register (overloaded) form
+ // instead? We must be carefull to make sure that the selected instruction
+ // is obvious from the parameters to avoid hard-to-find code generation
+ // bugs.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2.
+ void Align(int m);
+
+ // Stack
+ void pushad();
+ void popad();
+
+ void pushfd();
+ void popfd();
+
+ void push(const Immediate& x);
+ void push(Register src);
+ void push(const Operand& src);
+
+ void pop(Register dst);
+ void pop(const Operand& dst);
+
+ // Moves
+ void mov_b(Register dst, const Operand& src);
+ void mov_b(const Operand& dst, int8_t imm8);
+ void mov_b(const Operand& dst, Register src);
+
+ void mov_w(Register dst, const Operand& src);
+ void mov_w(const Operand& dst, Register src);
+
+ void mov(Register dst, int32_t imm32);
+ void mov(Register dst, Handle<Object> handle);
+ void mov(Register dst, const Operand& src);
+ void mov(const Operand& dst, const Immediate& x);
+ void mov(const Operand& dst, Handle<Object> handle);
+ void mov(const Operand& dst, Register src);
+
+ void movsx_b(Register dst, const Operand& src);
+
+ void movsx_w(Register dst, const Operand& src);
+
+ void movzx_b(Register dst, const Operand& src);
+
+ void movzx_w(Register dst, const Operand& src);
+
+ // Conditional moves
+ void cmov(Condition cc, Register dst, int32_t imm32);
+ void cmov(Condition cc, Register dst, Handle<Object> handle);
+ void cmov(Condition cc, Register dst, const Operand& src);
+
+ // Arithmetics
+ void adc(Register dst, int32_t imm32);
+ void adc(Register dst, const Operand& src);
+
+ void add(Register dst, const Operand& src);
+ void add(const Operand& dst, const Immediate& x);
+
+ void and_(Register dst, int32_t imm32);
+ void and_(Register dst, const Operand& src);
+ void and_(const Operand& src, Register dst);
+ void and_(const Operand& dst, const Immediate& x);
+
+ void cmp(Register reg, int32_t imm32);
+ void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg, const Operand& op);
+ void cmp(const Operand& op, const Immediate& imm);
+
+ void dec_b(Register dst);
+
+ void dec(Register dst);
+ void dec(const Operand& dst);
+
+ void cdq();
+
+ void idiv(Register src);
+
+ void imul(Register dst, const Operand& src);
+ void imul(Register dst, Register src, int32_t imm32);
+
+ void inc(Register dst);
+ void inc(const Operand& dst);
+
+ void lea(Register dst, const Operand& src);
+
+ void mul(Register src);
+
+ void neg(Register dst);
+
+ void not_(Register dst);
+
+ void or_(Register dst, int32_t imm32);
+ void or_(Register dst, const Operand& src);
+ void or_(const Operand& dst, Register src);
+ void or_(const Operand& dst, const Immediate& x);
+
+ void rcl(Register dst, uint8_t imm8);
+
+ void sar(Register dst, uint8_t imm8);
+ void sar(Register dst);
+
+ void sbb(Register dst, const Operand& src);
+
+ void shld(Register dst, const Operand& src);
+
+ void shl(Register dst, uint8_t imm8);
+ void shl(Register dst);
+
+ void shrd(Register dst, const Operand& src);
+
+ void shr(Register dst, uint8_t imm8);
+ void shr(Register dst);
+
+ void sub(const Operand& dst, const Immediate& x);
+ void sub(Register dst, const Operand& src);
+ void sub(const Operand& dst, Register src);
+
+ void test(Register reg, const Immediate& imm);
+ void test(Register reg, const Operand& op);
+ void test(const Operand& op, const Immediate& imm);
+
+ void xor_(Register dst, int32_t imm32);
+ void xor_(Register dst, const Operand& src);
+ void xor_(const Operand& src, Register dst);
+ void xor_(const Operand& dst, const Immediate& x);
+
+ // Bit operations.
+ void bts(const Operand& dst, Register src);
+
+ // Miscellaneous
+ void hlt();
+ void int3();
+ void nop();
+ void rdtsc();
+ void ret(int imm16);
+ void leave();
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Calls
+ void call(Label* L);
+ void call(byte* entry, RelocMode rmode);
+ void call(const Operand& adr);
+ void call(Handle<Code> code, RelocMode rmode);
+
+ // Jumps
+ void jmp(Label* L); // unconditional jump to L
+ void jmp(byte* entry, RelocMode rmode);
+ void jmp(const Operand& adr);
+ void jmp(Handle<Code> code, RelocMode rmode);
+
+ // Conditional jumps
+ void j(Condition cc, Label* L, Hint hint = no_hint);
+ void j(Condition cc, byte* entry, RelocMode rmode, Hint hint = no_hint);
+ void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+
+ // Floating-point operations
+ void fld(int i);
+
+ void fld1();
+ void fldz();
+
+ void fld_s(const Operand& adr);
+ void fld_d(const Operand& adr);
+
+ void fstp_s(const Operand& adr);
+ void fstp_d(const Operand& adr);
+
+ void fild_s(const Operand& adr);
+ void fild_d(const Operand& adr);
+
+ void fist_s(const Operand& adr);
+
+ void fistp_s(const Operand& adr);
+ void fistp_d(const Operand& adr);
+
+ void fabs();
+ void fchs();
+
+ void fadd(int i);
+ void fsub(int i);
+ void fmul(int i);
+ void fdiv(int i);
+
+ void fisub_s(const Operand& adr);
+
+ void faddp(int i = 1);
+ void fsubp(int i = 1);
+ void fsubrp(int i = 1);
+ void fmulp(int i = 1);
+ void fdivp(int i = 1);
+ void fprem();
+ void fprem1();
+
+ void fxch(int i = 1);
+ void fincstp();
+ void ffree(int i = 0);
+
+ void ftst();
+ void fucomp(int i);
+ void fucompp();
+ void fcompp();
+ void fnstsw_ax();
+ void fwait();
+
+ void frndint();
+
+ void sahf();
+
+ void cpuid();
+
+ // SSE2 instructions
+ void cvttss2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, const Operand& src);
+
+ void cvtsi2sd(XMMRegister dst, const Operand& src);
+
+ void addsd(XMMRegister dst, XMMRegister src);
+ void subsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, XMMRegister src);
+ void divsd(XMMRegister dst, XMMRegister src);
+
+ // Use either movsd or movlpd.
+ void movdbl(XMMRegister dst, const Operand& src);
+ void movdbl(const Operand& dst, XMMRegister src);
+
+ // Debugging
+ void Print();
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int last_position() const { return last_position_; }
+ bool last_position_is_statement() const {
+ return last_position_is_statement_;
+ }
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512*MB;
+ static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // code generation
+ byte* pc_; // the program counter; moves forward
+ RelocInfoWriter reloc_info_writer;
+
+ // push-pop elimination
+ byte* last_pc_;
+
+ // Jump-to-jump elimination:
+ // The last label to be bound to _binding_pos, if unbound.
+ Label unbound_label_;
+ // The position to which _unbound_label has to be bound, if present.
+ int binding_pos_;
+ // The position before which jumps cannot be eliminated.
+ int last_bound_pos_;
+
+ // source position information
+ int last_position_;
+ bool last_position_is_statement_;
+
+ byte* addr_at(int pos) { return buffer_ + pos; }
+ byte byte_at(int pos) { return buffer_[pos]; }
+ uint32_t long_at(int pos) {
+ return *reinterpret_cast<uint32_t*>(addr_at(pos));
+ }
+ void long_at_put(int pos, uint32_t x) {
+ *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+ }
+
+ // code emission
+ void GrowBuffer();
+ inline void emit(uint32_t x);
+ inline void emit(Handle<Object> handle);
+ inline void emit(uint32_t x, RelocMode rmode);
+ inline void emit(const Immediate& x);
+
+ // instruction generation
+ void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+ // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+ // with a given destination expression and an immediate operand. It attempts
+ // to use the shortest encoding possible.
+ // sel specifies the /n in the modrm byte (see the Intel PRM).
+ void emit_arith(int sel, Operand dst, const Immediate& x);
+
+ void emit_operand(Register reg, const Operand& adr);
+ void emit_operand(const Operand& adr, Register reg);
+
+ void emit_farith(int b1, int b2, int i);
+
+ // labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+
+ // record reloc info for current pc_
+ void RecordRelocInfo(RelocMode rmode, intptr_t data = 0);
+
+ friend class CodePatcher;
+ friend class EnsureSpace;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information. The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (assembler_->overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+ space_before_ = assembler_->available_space();
+#endif
+ }
+
+#ifdef DEBUG
+ ~EnsureSpace() {
+ int bytes_generated = space_before_ - assembler_->available_space();
+ ASSERT(bytes_generated < assembler_->kGap);
+ }
+#endif
+
+ private:
+ Assembler* assembler_;
+#ifdef DEBUG
+ int space_before_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_IA32_H_
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+#include "v8.h"
+
+#include "arguments.h"
+#include "execution.h"
+#include "ic-inl.h"
+#include "factory.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Label
+
+int Label::pos() const {
+ if (pos_ < 0) return -pos_ - 1;
+ if (pos_ > 0) return pos_ - 1;
+ UNREACHABLE();
+ return 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfoWriter and RelocIterator
+//
+// Encoding
+//
+// The most common modes are given single-byte encodings. Also, it is
+// easy to identify the type of reloc info and skip unwanted modes in
+// an iteration.
+//
+// The encoding relies on the fact that there are less than 14
+// different relocation modes.
+//
+// embedded_object: [6 bits pc delta] 00
+//
+// code_taget: [6 bits pc delta] 01
+//
+// position: [6 bits pc delta] 10,
+// [7 bits signed data delta] 0
+//
+// statement_position: [6 bits pc delta] 10,
+// [7 bits signed data delta] 1
+//
+// any nondata mode: 00 [4 bits rmode] 11,
+// 00 [6 bits pc delta]
+//
+// pc-jump: 00 1111 11,
+// 00 [6 bits pc delta]
+//
+// pc-jump: 01 1111 11,
+// (variable length) 7 - 26 bit pc delta, written in chunks of 7
+// bits, the lowest 7 bits written first.
+//
+// data-jump + pos: 00 1110 11,
+// signed int, lowest byte written first
+//
+// data-jump + st.pos: 01 1110 11,
+// signed int, lowest byte written first
+//
+// data-jump + comm.: 10 1110 11,
+// signed int, lowest byte written first
+//
+const int kMaxRelocModes = 14;
+
+const int kTagBits = 2;
+const int kTagMask = (1 << kTagBits) - 1;
+const int kExtraTagBits = 4;
+const int kPositionTypeTagBits = 1;
+const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
+
+const int kEmbeddedObjectTag = 0;
+const int kCodeTargetTag = 1;
+const int kPositionTag = 2;
+const int kDefaultTag = 3;
+
+const int kPCJumpTag = (1 << kExtraTagBits) - 1;
+
+const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
+const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
+
+const int kVariableLengthPCJumpTopTag = 1;
+const int kChunkBits = 7;
+const int kChunkMask = (1 << kChunkBits) - 1;
+const int kLastChunkTagBits = 1;
+const int kLastChunkTagMask = 1;
+const int kLastChunkTag = 1;
+
+
+const int kDataJumpTag = kPCJumpTag - 1;
+
+const int kNonstatementPositionTag = 0;
+const int kStatementPositionTag = 1;
+const int kCommentTag = 2;
+
+
+uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
+ // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
+ // Otherwise write a variable length PC jump for the bits that do
+ // not fit in the kSmallPCDeltaBits bits.
+ if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
+ WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
+ uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
+ ASSERT(pc_jump > 0);
+ // Write kChunkBits size chunks of the pc_jump.
+ for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
+ byte b = pc_jump & kChunkMask;
+ *--pos_ = b << kLastChunkTagBits;
+ }
+ // Tag the last chunk so it can be identified.
+ *pos_ = *pos_ | kLastChunkTag;
+ // Return the remaining kSmallPCDeltaBits of the pc_delta.
+ return pc_delta & kSmallPCDeltaMask;
+}
+
+
+void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
+ // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
+ pc_delta = WriteVariableLengthPCJump(pc_delta);
+ *--pos_ = pc_delta << kTagBits | tag;
+}
+
+
+void RelocInfoWriter::WriteTaggedData(int32_t data_delta, int tag) {
+ *--pos_ = data_delta << kPositionTypeTagBits | tag;
+}
+
+
+void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
+ *--pos_ = top_tag << (kTagBits + kExtraTagBits) |
+ extra_tag << kTagBits |
+ kDefaultTag;
+}
+
+
+void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
+ // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
+ pc_delta = WriteVariableLengthPCJump(pc_delta);
+ WriteExtraTag(extra_tag, 0);
+ *--pos_ = pc_delta;
+}
+
+
+void RelocInfoWriter::WriteExtraTaggedData(int32_t data_delta, int top_tag) {
+ WriteExtraTag(kDataJumpTag, top_tag);
+ for (int i = 0; i < kIntSize; i++) {
+ *--pos_ = data_delta;
+ data_delta = ArithmeticShiftRight(data_delta, kBitsPerByte);
+ }
+}
+
+
+void RelocInfoWriter::Write(const RelocInfo* rinfo) {
+#ifdef DEBUG
+ byte* begin_pos = pos_;
+#endif
+ Counters::reloc_info_count.Increment();
+ ASSERT(rinfo->pc() - last_pc_ >= 0);
+ ASSERT(reloc_mode_count < kMaxRelocModes);
+ // Use unsigned delta-encoding for pc.
+ uint32_t pc_delta = rinfo->pc() - last_pc_;
+ RelocMode rmode = rinfo->rmode();
+
+ // The two most common modes are given small tags, and usually fit in a byte.
+ if (rmode == embedded_object) {
+ WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
+ } else if (rmode == code_target) {
+ WriteTaggedPC(pc_delta, kCodeTargetTag);
+ } else if (rmode == position || rmode == statement_position) {
+ // Use signed delta-encoding for data.
+ int32_t data_delta = rinfo->data() - last_data_;
+ int pos_type_tag = rmode == position ? kNonstatementPositionTag
+ : kStatementPositionTag;
+ // Check if data is small enough to fit in a tagged byte.
+ if (is_intn(data_delta, kSmallDataBits)) {
+ WriteTaggedPC(pc_delta, kPositionTag);
+ WriteTaggedData(data_delta, pos_type_tag);
+ last_data_ = rinfo->data();
+ } else {
+ // Otherwise, use costly encoding.
+ WriteExtraTaggedPC(pc_delta, kPCJumpTag);
+ WriteExtraTaggedData(data_delta, pos_type_tag);
+ last_data_ = rinfo->data();
+ }
+ } else if (rmode == comment) {
+ // Comments are normally not generated, so we use the costly encoding.
+ WriteExtraTaggedPC(pc_delta, kPCJumpTag);
+ WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
+ last_data_ = rinfo->data();
+ } else {
+ // For all other modes we simply use the mode as the extra tag.
+ // None of these modes need a data component.
+ ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
+ WriteExtraTaggedPC(pc_delta, rmode);
+ }
+ last_pc_ = rinfo->pc();
+#ifdef DEBUG
+ ASSERT(begin_pos - pos_ <= kMaxSize);
+#endif
+}
+
+
+inline int RelocIterator::AdvanceGetTag() {
+ return *--pos_ & kTagMask;
+}
+
+
+inline int RelocIterator::GetExtraTag() {
+ return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
+}
+
+
+inline int RelocIterator::GetTopTag() {
+ return *pos_ >> (kTagBits + kExtraTagBits);
+}
+
+
+inline void RelocIterator::ReadTaggedPC() {
+ rinfo_.pc_ += *pos_ >> kTagBits;
+}
+
+
+inline void RelocIterator::AdvanceReadPC() {
+ rinfo_.pc_ += *--pos_;
+}
+
+
+void RelocIterator::AdvanceReadData() {
+ int32_t x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= *--pos_ << i * kBitsPerByte;
+ }
+ rinfo_.data_ += x;
+}
+
+
+void RelocIterator::AdvanceReadVariableLengthPCJump() {
+ // Read the 32-kSmallPCDeltaBits most significant bits of the
+ // pc jump in kChunkBits bit chunks and shift them into place.
+ // Stop when the last chunk is encountered.
+ uint32_t pc_jump = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ byte pc_jump_part = *--pos_;
+ pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
+ if ((pc_jump_part & kLastChunkTagMask) == 1) break;
+ }
+ // The least significant kSmallPCDeltaBits bits will be added
+ // later.
+ rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
+}
+
+
+inline int RelocIterator::GetPositionTypeTag() {
+ return *pos_ & ((1 << kPositionTypeTagBits) - 1);
+}
+
+
+inline void RelocIterator::ReadTaggedData() {
+ int8_t signed_b = *pos_;
+ rinfo_.data_ += ArithmeticShiftRight(signed_b, kPositionTypeTagBits);
+}
+
+
+inline RelocMode RelocIterator::DebugInfoModeFromTag(int tag) {
+ if (tag == kStatementPositionTag) {
+ return statement_position;
+ } else if (tag == kNonstatementPositionTag) {
+ return position;
+ } else {
+ ASSERT(tag == kCommentTag);
+ return comment;
+ }
+}
+
+
+void RelocIterator::next() {
+ ASSERT(!done());
+ // Basically, do the opposite of RelocInfoWriter::Write.
+ // Reading of data is as far as possible avoided for unwanted modes,
+ // but we must always update the pc.
+ //
+ // We exit this loop by returning when we find a mode we want.
+ while (pos_ > end_) {
+ int tag = AdvanceGetTag();
+ if (tag == kEmbeddedObjectTag) {
+ ReadTaggedPC();
+ if (SetMode(embedded_object)) return;
+ } else if (tag == kCodeTargetTag) {
+ ReadTaggedPC();
+ if (*(reinterpret_cast<int**>(rinfo_.pc())) ==
+ reinterpret_cast<int*>(0x61)) {
+ tag = 0;
+ }
+ if (SetMode(code_target)) return;
+ } else if (tag == kPositionTag) {
+ ReadTaggedPC();
+ Advance();
+ // Check if we want source positions.
+ if (mode_mask_ & RelocInfo::kPositionMask) {
+ // Check if we want this type of source position.
+ if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) {
+ // Finally read the data before returning.
+ ReadTaggedData();
+ return;
+ }
+ }
+ } else {
+ ASSERT(tag == kDefaultTag);
+ int extra_tag = GetExtraTag();
+ if (extra_tag == kPCJumpTag) {
+ int top_tag = GetTopTag();
+ if (top_tag == kVariableLengthPCJumpTopTag) {
+ AdvanceReadVariableLengthPCJump();
+ } else {
+ AdvanceReadPC();
+ }
+ } else if (extra_tag == kDataJumpTag) {
+ // Check if we want debug modes (the only ones with data).
+ if (mode_mask_ & RelocInfo::kDebugMask) {
+ int top_tag = GetTopTag();
+ AdvanceReadData();
+ if (SetMode(DebugInfoModeFromTag(top_tag))) return;
+ } else {
+ // Otherwise, just skip over the data.
+ Advance(kIntSize);
+ }
+ } else {
+ AdvanceReadPC();
+ if (SetMode(static_cast<RelocMode>(extra_tag))) return;
+ }
+ }
+ }
+ done_ = true;
+}
+
+
+RelocIterator::RelocIterator(Code* code, int mode_mask) {
+ rinfo_.pc_ = code->instruction_start();
+ rinfo_.data_ = 0;
+ // relocation info is read backwards
+ pos_ = code->relocation_start() + code->relocation_size();
+ end_ = code->relocation_start();
+ done_ = false;
+ mode_mask_ = mode_mask;
+ if (mode_mask_ == 0) pos_ = end_;
+ next();
+}
+
+
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+ rinfo_.pc_ = desc.buffer;
+ rinfo_.data_ = 0;
+ // relocation info is read backwards
+ pos_ = desc.buffer + desc.buffer_size;
+ end_ = pos_ - desc.reloc_size;
+ done_ = false;
+ mode_mask_ = mode_mask;
+ if (mode_mask_ == 0) pos_ = end_;
+ next();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+#ifdef DEBUG
+const char* RelocInfo::RelocModeName(RelocMode rmode) {
+ switch (rmode) {
+ case no_reloc:
+ return "no reloc";
+ case embedded_object:
+ return "embedded object";
+ case embedded_string:
+ return "embedded string";
+ case js_construct_call:
+ return "code target (js construct call)";
+ case exit_js_frame:
+ return "code target (exit js frame)";
+ case code_target_context:
+ return "code target (context)";
+ case code_target:
+ return "code target";
+ case runtime_entry:
+ return "runtime entry";
+ case js_return:
+ return "js return";
+ case comment:
+ return "comment";
+ case position:
+ return "position";
+ case statement_position:
+ return "statement position";
+ case external_reference:
+ return "external reference";
+ case reloc_mode_count:
+ UNREACHABLE();
+ return "reloc_mode_count";
+ }
+ return "unknown relocation type";
+}
+
+
+void RelocInfo::Print() {
+ PrintF("%p %s", pc_, RelocModeName(rmode_));
+ if (rmode_ == comment) {
+ PrintF(" (%s)", data_);
+ } else if (rmode_ == embedded_object) {
+ PrintF(" (");
+ target_object()->ShortPrint();
+ PrintF(")");
+ } else if (rmode_ == external_reference) {
+ ExternalReferenceEncoder ref_encoder;
+ PrintF(" (%s) (%p)",
+ ref_encoder.NameOfAddress(*target_reference_address()),
+ *target_reference_address());
+ } else if (is_code_target(rmode_)) {
+ Code* code = Debug::GetCodeTarget(target_address());
+ PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address());
+ } else if (is_position(rmode_)) {
+ PrintF(" (%d)", data());
+ }
+
+ PrintF("\n");
+}
+
+
+void RelocInfo::Verify() {
+ switch (rmode_) {
+ case embedded_object:
+ Object::VerifyPointer(target_object());
+ break;
+ case js_construct_call:
+ case exit_js_frame:
+ case code_target_context:
+ case code_target: {
+ // convert inline target address to code object
+ Address addr = target_address();
+ ASSERT(addr != NULL);
+ // Check that we can find the right code object.
+ HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
+ Object* found = Heap::FindCodeObject(addr);
+ ASSERT(found->IsCode());
+ ASSERT(code->address() == HeapObject::cast(found)->address());
+ break;
+ }
+ case embedded_string:
+ case runtime_entry:
+ case js_return:
+ case comment:
+ case position:
+ case statement_position:
+ case external_reference:
+ case no_reloc:
+ break;
+ case reloc_mode_count:
+ UNREACHABLE();
+ break;
+ }
+}
+#endif // DEBUG
+
+
+// -----------------------------------------------------------------------------
+// Implementation of ExternalReference
+
+ExternalReference::ExternalReference(Builtins::CFunctionId id)
+ : address_(Builtins::c_function_address(id)) {}
+
+
+ExternalReference::ExternalReference(Builtins::Name name)
+ : address_(Builtins::builtin_address(name)) {}
+
+
+ExternalReference::ExternalReference(Runtime::FunctionId id)
+ : address_(Runtime::FunctionForId(id)->entry) {}
+
+
+ExternalReference::ExternalReference(Runtime::Function* f)
+ : address_(f->entry) {}
+
+
+ExternalReference::ExternalReference(const IC_Utility& ic_utility)
+ : address_(ic_utility.address()) {}
+
+
+ExternalReference::ExternalReference(const Debug_Address& debug_address)
+ : address_(debug_address.address()) {}
+
+
+ExternalReference::ExternalReference(StatsCounter* counter)
+ : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
+
+
+ExternalReference::ExternalReference(Top::AddressId id)
+ : address_(Top::get_address_from_id(id)) {}
+
+
+ExternalReference::ExternalReference(const SCTableReference& table_ref)
+ : address_(table_ref.address()) {}
+
+
+ExternalReference ExternalReference::builtin_passed_function() {
+ return ExternalReference(&Builtins::builtin_passed_function);
+}
+
+ExternalReference ExternalReference::the_hole_value_location() {
+ return ExternalReference(Factory::the_hole_value().location());
+}
+
+
+ExternalReference ExternalReference::address_of_stack_guard_limit() {
+ return ExternalReference(StackGuard::address_of_jslimit());
+}
+
+
+ExternalReference ExternalReference::debug_break() {
+ return ExternalReference(FUNCTION_ADDR(Debug::Break));
+}
+
+
+ExternalReference ExternalReference::new_space_start() {
+ return ExternalReference(Heap::NewSpaceStart());
+}
+
+ExternalReference ExternalReference::new_space_allocation_top_address() {
+ return ExternalReference(Heap::NewSpaceAllocationTopAddress());
+}
+
+ExternalReference ExternalReference::new_space_allocation_limit_address() {
+ return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
+}
+
+ExternalReference ExternalReference::debug_step_in_fp_address() {
+ return ExternalReference(Debug::step_in_fp_addr());
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+#ifndef V8_ASSEMBLER_H_
+#define V8_ASSEMBLER_H_
+
+#include "runtime.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 { namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Labels represent pc locations; they are typically jump or call targets.
+// After declaration, a label can be freely used to denote known or (yet)
+// unknown pc location. Assembler::bind() is used to bind a label to the
+// current pc. A label can be bound only once.
+
+class Label : public ZoneObject { // ShadowLables are dynamically allocated.
+ public:
+ INLINE(Label()) { Unuse(); }
+ INLINE(~Label()) { ASSERT(!is_linked()); }
+
+ INLINE(void Unuse()) { pos_ = 0; }
+
+ INLINE(bool is_bound() const) { return pos_ < 0; }
+ INLINE(bool is_unused() const) { return pos_ == 0; }
+ INLINE(bool is_linked() const) { return pos_ > 0; }
+
+ // Returns the position of bound or linked labels. Cannot be used
+ // for unused labels.
+ int pos() const;
+
+ private:
+ // pos_ encodes both the binding state (via its sign)
+ // and the binding position (via its value) of a label.
+ //
+ // pos_ < 0 bound label, pos() returns the jump target position
+ // pos_ == 0 unused label
+ // pos_ > 0 linked label, pos() returns the last reference position
+ int pos_;
+
+ void bind_to(int pos) {
+ pos_ = -pos - 1;
+ ASSERT(is_bound());
+ }
+ void link_to(int pos) {
+ pos_ = pos + 1;
+ ASSERT(is_linked());
+ }
+
+ friend class Assembler;
+ friend class Displacement;
+ friend class LabelShadow;
+};
+
+
+// A LabelShadow is a label that temporarily shadows another label. It
+// is used to catch linking and binding of labels in certain scopes,
+// e.g. try blocks. LabelShadows are themselves labels which can be
+// used (only) after they are not shadowing anymore.
+class LabelShadow: public Label {
+ public:
+ explicit LabelShadow(Label* shadowed) {
+ ASSERT(shadowed != NULL);
+ shadowed_ = shadowed;
+ shadowed_pos_ = shadowed->pos_;
+ shadowed->Unuse();
+#ifdef DEBUG
+ is_shadowing_ = true;
+#endif
+ }
+
+ ~LabelShadow() {
+ ASSERT(!is_shadowing_);
+ }
+
+ void StopShadowing() {
+ ASSERT(is_shadowing_ && is_unused());
+ pos_ = shadowed_->pos_;
+ shadowed_->pos_ = shadowed_pos_;
+#ifdef DEBUG
+ is_shadowing_ = false;
+#endif
+ }
+
+ Label* shadowed() const { return shadowed_; }
+
+ private:
+ Label* shadowed_;
+ int shadowed_pos_;
+#ifdef DEBUG
+ bool is_shadowing_;
+#endif
+};
+
+
+// -----------------------------------------------------------------------------
+// Relocation information
+
+// The constant kNoPosition is used with the collecting of source positions
+// in the relocation information. Two types of source positions are collected
+// "position" (RelocMode position) and "statement position" (RelocMode
+// statement_position). The "position" is collected at places in the source
+// code which are of interest when making stack traces to pin-point the source
+// location of a stack frame as close as possible. The "statement position" is
+// collected at the beginning at each statement, and is used to indicate
+// possible break locations. kNoPosition is used to indicate an
+// invalid/uninitialized position value.
+static const int kNoPosition = -1;
+
+
+enum RelocMode {
+ // Please note the order is important (see is_code_target).
+ js_construct_call, // code target that is an exit JavaScript frame stub.
+ exit_js_frame, // code target that is an exit JavaScript frame stub.
+ code_target_context, // code target used for contextual loads.
+ code_target, // code target which is not any of the above.
+ embedded_object,
+ embedded_string,
+
+ // Everything after runtime_entry (inclusive) is not GC'ed.
+ runtime_entry,
+ js_return, // Marks start of the ExitJSFrame code.
+ comment,
+ position, // See comment for kNoPosition above.
+ statement_position, // See comment for kNoPosition above.
+ external_reference, // The address of an external C++ function.
+ // add more as needed
+ no_reloc, // never recorded
+
+ // Pseudo-types
+ reloc_mode_count,
+ last_code_enum = code_target
+};
+
+
+inline int RelocMask(RelocMode mode) {
+ return 1 << mode;
+}
+
+
+inline bool is_js_construct_call(RelocMode mode) {
+ return mode == js_construct_call;
+}
+
+
+inline bool is_exit_js_frame(RelocMode mode) {
+ return mode == exit_js_frame;
+}
+
+
+inline bool is_code_target(RelocMode mode) {
+ return mode <= last_code_enum;
+}
+
+
+inline bool is_js_return(RelocMode mode) {
+ return mode == js_return;
+}
+
+
+inline bool is_comment(RelocMode mode) {
+ return mode == comment;
+}
+
+
+inline bool is_position(RelocMode mode) {
+ return mode == position || mode == statement_position;
+}
+
+
+inline bool is_statement_position(RelocMode mode) {
+ return mode == statement_position;
+}
+
+inline bool is_external_reference(RelocMode mode) {
+ return mode == external_reference;
+}
+
+// Relocation information consists of the address (pc) of the datum
+// to which the relocation information applies, the relocation mode
+// (rmode), and an optional data field. The relocation mode may be
+// "descriptive" and not indicate a need for relocation, but simply
+// describe a property of the datum. Such rmodes are useful for GC
+// and nice disassembly output.
+
+class RelocInfo BASE_EMBEDDED {
+ public:
+ RelocInfo() {}
+ RelocInfo(byte* pc, RelocMode rmode, intptr_t data)
+ : pc_(pc), rmode_(rmode), data_(data) {
+ }
+
+ // Accessors
+ byte* pc() const { return pc_; }
+ void set_pc(byte* pc) { pc_ = pc; }
+ RelocMode rmode() const { return rmode_; }
+ intptr_t data() const { return data_; }
+
+ // Apply a relocation by delta bytes
+ INLINE(void apply(int delta));
+
+ // Read/modify the code target in the branch/call instruction this relocation
+ // applies to; can only be called if this->is_code_target(rmode_)
+ INLINE(Address target_address());
+ INLINE(void set_target_address(Address target));
+ INLINE(Object* target_object());
+ INLINE(Object** target_object_address());
+ INLINE(void set_target_object(Object* target));
+
+ // Read/modify the reference in the instruction this relocation
+ // applies to; can only be called if rmode_ is external_reference
+ INLINE(Address* target_reference_address());
+
+ // Read/modify the address of a call instruction. This is used to relocate
+ // the break points where straight-line code is patched with a call
+ // instruction.
+ INLINE(Address call_address());
+ INLINE(void set_call_address(Address target));
+ INLINE(Object* call_object());
+ INLINE(Object** call_object_address());
+ INLINE(void set_call_object(Object* target));
+
+ // Patch the code with some other code.
+ void patch_code(byte* instructions, int instruction_count);
+
+ // Patch the code with a call.
+ void patch_code_with_call(Address target, int guard_bytes);
+ INLINE(bool is_call_instruction());
+
+#ifdef DEBUG
+ // Debugging
+ void Print();
+ void Verify();
+ static const char* RelocModeName(RelocMode rmode);
+#endif
+
+ static const int kCodeTargetMask = (1 << (last_code_enum + 1)) - 1;
+ static const int kPositionMask = 1 << position | 1 << statement_position;
+ static const int kDebugMask = kPositionMask | 1 << comment;
+ static const int kApplyMask; // Modes affected by apply. Depends on arch.
+
+ private:
+ // On ARM, note that pc_ is the address of the constant pool entry
+ // to be relocated and not the address of the instruction
+ // referencing the constant pool entry (except when rmode_ ==
+ // comment).
+ byte* pc_;
+ RelocMode rmode_;
+ intptr_t data_;
+ friend class RelocIterator;
+};
+
+
+// RelocInfoWriter serializes a stream of relocation info. It writes towards
+// lower addresses.
+class RelocInfoWriter BASE_EMBEDDED {
+ public:
+ RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
+ RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
+ last_data_(0) {}
+
+ byte* pos() const { return pos_; }
+ byte* last_pc() const { return last_pc_; }
+
+ void Write(const RelocInfo* rinfo);
+
+ // Update the state of the stream after reloc info buffer
+ // and/or code is moved while the stream is active.
+ void Reposition(byte* pos, byte* pc) {
+ pos_ = pos;
+ last_pc_ = pc;
+ }
+
+ // Max size (bytes) of a written RelocInfo.
+ static const int kMaxSize = 12;
+
+ private:
+ inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
+ inline void WriteTaggedPC(uint32_t pc_delta, int tag);
+ inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
+ inline void WriteExtraTaggedData(int32_t data_delta, int top_tag);
+ inline void WriteTaggedData(int32_t data_delta, int tag);
+ inline void WriteExtraTag(int extra_tag, int top_tag);
+
+ byte* pos_;
+ byte* last_pc_;
+ intptr_t last_data_;
+ DISALLOW_EVIL_CONSTRUCTORS(RelocInfoWriter);
+};
+
+
+// A RelocIterator iterates over relocation information.
+// Typical use:
+//
+// for (RelocIterator it(code); !it.done(); it.next()) {
+// // do something with it.rinfo() here
+// }
+//
+// A mask can be specified to skip unwanted modes.
+class RelocIterator: public Malloced {
+ public:
+ // Create a new iterator positioned at
+ // the beginning of the reloc info.
+ // Relocation information with mode k is included in the
+ // iteration iff bit k of mode_mask is set.
+ explicit RelocIterator(Code* code, int mode_mask = -1);
+ explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
+
+ // Iteration
+ bool done() const { return done_; }
+ void next();
+
+ // Return pointer valid until next next().
+ RelocInfo* rinfo() {
+ ASSERT(!done());
+ return &rinfo_;
+ }
+
+ private:
+ // Advance* moves the position before/after reading.
+ // *Read* reads from current byte(s) into rinfo_.
+ // *Get* just reads and returns info on current byte.
+ void Advance(int bytes = 1) { pos_ -= bytes; }
+ int AdvanceGetTag();
+ int GetExtraTag();
+ int GetTopTag();
+ void ReadTaggedPC();
+ void AdvanceReadPC();
+ void AdvanceReadData();
+ void AdvanceReadVariableLengthPCJump();
+ int GetPositionTypeTag();
+ void ReadTaggedData();
+
+ static RelocMode DebugInfoModeFromTag(int tag);
+
+ // If the given mode is wanted, set it in rinfo_ and return true.
+ // Else return false. Used for efficiently skipping unwanted modes.
+ bool SetMode(RelocMode mode) {
+ return (mode_mask_ & 1 << mode) ? (rinfo_.rmode_ = mode, true) : false;
+ }
+
+ byte* pos_;
+ byte* end_;
+ RelocInfo rinfo_;
+ bool done_;
+ int mode_mask_;
+ DISALLOW_EVIL_CONSTRUCTORS(RelocIterator);
+};
+
+
+//------------------------------------------------------------------------------
+// External function
+
+//----------------------------------------------------------------------------
+class IC_Utility;
+class Debug_Address;
+class SCTableReference;
+
+// An ExternalReference represents a C++ address called from the generated
+// code. All references to C++ functions and must be encapsulated in an
+// ExternalReference instance. This is done in order to track the origin of
+// all external references in the code.
+class ExternalReference BASE_EMBEDDED {
+ public:
+ explicit ExternalReference(Builtins::CFunctionId id);
+
+ explicit ExternalReference(Builtins::Name name);
+
+ explicit ExternalReference(Runtime::FunctionId id);
+
+ explicit ExternalReference(Runtime::Function* f);
+
+ explicit ExternalReference(const IC_Utility& ic_utility);
+
+ explicit ExternalReference(const Debug_Address& debug_address);
+
+ explicit ExternalReference(StatsCounter* counter);
+
+ explicit ExternalReference(Top::AddressId id);
+
+ explicit ExternalReference(const SCTableReference& table_ref);
+
+ // One-of-a-kind references. These references are not part of a general
+ // pattern. This means that they have to be added to the
+ // ExternalReferenceTable in serialize.cc manually.
+
+ static ExternalReference builtin_passed_function();
+
+ // Static variable Factory::the_hole_value.location()
+ static ExternalReference the_hole_value_location();
+
+ // Static variable StackGuard::address_of_limit()
+ static ExternalReference address_of_stack_guard_limit();
+
+ // Function Debug::Break()
+ static ExternalReference debug_break();
+
+ // Static variable Heap::NewSpaceStart()
+ static ExternalReference new_space_start();
+
+ // Used for fast allocation in generated code.
+ static ExternalReference new_space_allocation_top_address();
+ static ExternalReference new_space_allocation_limit_address();
+
+ // Used to check if single stepping is enabled in generated code.
+ static ExternalReference debug_step_in_fp_address();
+
+ Address address() const {return address_;}
+
+ private:
+ explicit ExternalReference(void* address)
+ : address_(reinterpret_cast<Address>(address)) {}
+
+ Address address_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+// Move these into inline file?
+
+static inline bool is_intn(int x, int n) {
+ return -(1 << (n-1)) <= x && x < (1 << (n-1));
+}
+
+static inline bool is_int24(int x) { return is_intn(x, 24); }
+static inline bool is_int8(int x) { return is_intn(x, 8); }
+
+static inline bool is_uintn(int x, int n) {
+ return (x & -(1 << n)) == 0;
+}
+
+static inline bool is_uint3(int x) { return is_uintn(x, 3); }
+static inline bool is_uint4(int x) { return is_uintn(x, 4); }
+static inline bool is_uint5(int x) { return is_uintn(x, 5); }
+static inline bool is_uint8(int x) { return is_uintn(x, 8); }
+static inline bool is_uint12(int x) { return is_uintn(x, 12); }
+static inline bool is_uint16(int x) { return is_uintn(x, 16); }
+static inline bool is_uint24(int x) { return is_uintn(x, 24); }
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+
+VariableProxySentinel VariableProxySentinel::this_proxy_(true);
+VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
+ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
+Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
+Call Call::sentinel_(NULL, NULL, false, 0);
+
+
+// ----------------------------------------------------------------------------
+// All the Accept member functions for each syntax tree node type.
+
+#define DECL_ACCEPT(type) \
+ void type::Accept(Visitor* v) { \
+ if (v->CheckStackOverflow()) return; \
+ v->Visit##type(this); \
+ }
+NODE_LIST(DECL_ACCEPT)
+#undef DECL_ACCEPT
+
+
+// ----------------------------------------------------------------------------
+// Implementation of other node functionality.
+
+VariableProxy::VariableProxy(Handle<String> name,
+ bool is_this,
+ bool inside_with)
+ : name_(name),
+ var_(NULL),
+ is_this_(is_this),
+ inside_with_(inside_with) {
+ // names must be canonicalized for fast equality checks
+ ASSERT(name->IsSymbol());
+ // at least one access, otherwise no need for a VariableProxy
+ var_uses_.RecordAccess(1);
+}
+
+
+VariableProxy::VariableProxy(bool is_this)
+ : is_this_(is_this) {
+}
+
+
+void VariableProxy::BindTo(Variable* var) {
+ ASSERT(var_ == NULL); // must be bound only once
+ ASSERT(var != NULL); // must bind
+ ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
+ // Ideally CONST-ness should match. However, this is very hard to achieve
+ // because we don't know the exact semantics of conflicting (const and
+ // non-const) multiple variable declarations, const vars introduced via
+ // eval() etc. Const-ness and variable declarations are a complete mess
+ // in JS. Sigh...
+ // ASSERT(var->mode() == Variable::CONST || !is_const());
+ var_ = var;
+ var->var_uses()->RecordUses(&var_uses_);
+ var->obj_uses()->RecordUses(&obj_uses_);
+}
+
+
+#ifdef DEBUG
+
+const char* LoopStatement::OperatorString() const {
+ switch (type()) {
+ case DO_LOOP: return "DO";
+ case FOR_LOOP: return "FOR";
+ case WHILE_LOOP: return "WHILE";
+ }
+ return NULL;
+}
+
+#endif // DEBUG
+
+
+Token::Value Assignment::binary_op() const {
+ switch (op_) {
+ case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
+ case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
+ case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
+ case Token::ASSIGN_SHL: return Token::SHL;
+ case Token::ASSIGN_SAR: return Token::SAR;
+ case Token::ASSIGN_SHR: return Token::SHR;
+ case Token::ASSIGN_ADD: return Token::ADD;
+ case Token::ASSIGN_SUB: return Token::SUB;
+ case Token::ASSIGN_MUL: return Token::MUL;
+ case Token::ASSIGN_DIV: return Token::DIV;
+ case Token::ASSIGN_MOD: return Token::MOD;
+ default: UNREACHABLE();
+ }
+ return Token::ILLEGAL;
+}
+
+
+bool FunctionLiteral::AllowsLazyCompilation() {
+ return scope()->AllowsLazyCompilation();
+}
+
+
+ObjectLiteral::Property::Property(Literal* key, Expression* value) {
+ key_ = key;
+ value_ = value;
+ Object* k = *key->handle();
+ if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
+ kind_ = PROTOTYPE;
+ } else {
+ kind_ = value_->AsLiteral() == NULL ? COMPUTED : CONSTANT;
+ }
+}
+
+
+ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+ key_ = new Literal(value->name());
+ value_ = value;
+ kind_ = is_getter ? GETTER : SETTER;
+}
+
+
+
+void LabelCollector::AddLabel(Label* label) {
+ // Add the label to the collector, but discard duplicates.
+ int length = labels_->length();
+ for (int i = 0; i < length; i++) {
+ if (labels_->at(i) == label) return;
+ }
+ labels_->Add(label);
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Visitor
+
+
+void Visitor::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
+}
+
+
+void Visitor::VisitExpressions(ZoneList<Expression*>* expressions) {
+ for (int i = 0; i < expressions->length(); i++) {
+ // The variable statement visiting code may pass NULL expressions
+ // to this code. Maybe this should be handled by introducing an
+ // undefined expression or literal? Revisit this code if this
+ // changes
+ Expression* expression = expressions->at(i);
+ if (expression != NULL) Visit(expression);
+ }
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_AST_H_
+#define V8_AST_H_
+
+#include "execution.h"
+#include "factory.h"
+#include "runtime.h"
+#include "token.h"
+#include "variables.h"
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+// The abstract syntax tree is an intermediate, light-weight
+// representation of the parsed JavaScript code suitable for
+// compilation to native code.
+
+// Nodes are allocated in a separate zone, which allows faster
+// allocation and constant-time deallocation of the entire syntax
+// tree.
+
+
+// ----------------------------------------------------------------------------
+// Nodes of the abstract syntax tree. Only concrete classes are
+// enumerated here.
+
+#define NODE_LIST(V) \
+ V(Block) \
+ V(Declaration) \
+ V(ExpressionStatement) \
+ V(EmptyStatement) \
+ V(IfStatement) \
+ V(ContinueStatement) \
+ V(BreakStatement) \
+ V(ReturnStatement) \
+ V(WithEnterStatement) \
+ V(WithExitStatement) \
+ V(SwitchStatement) \
+ V(LoopStatement) \
+ V(ForInStatement) \
+ V(TryCatch) \
+ V(TryFinally) \
+ V(DebuggerStatement) \
+ V(FunctionLiteral) \
+ V(FunctionBoilerplateLiteral) \
+ V(Conditional) \
+ V(Slot) \
+ V(VariableProxy) \
+ V(Literal) \
+ V(RegExpLiteral) \
+ V(ObjectLiteral) \
+ V(ArrayLiteral) \
+ V(Assignment) \
+ V(Throw) \
+ V(Property) \
+ V(Call) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(UnaryOperation) \
+ V(CountOperation) \
+ V(BinaryOperation) \
+ V(CompareOperation) \
+ V(ThisFunction)
+
+
+#define DEF_FORWARD_DECLARATION(type) class type;
+NODE_LIST(DEF_FORWARD_DECLARATION)
+#undef DEF_FORWARD_DECLARATION
+
+
+// Typedef only introduced to avoid unreadable code.
+// Please do appreciate the required space in "> >".
+typedef ZoneList<Handle<String> > ZoneStringList;
+
+
+class Node: public ZoneObject {
+ public:
+ Node(): statement_pos_(kNoPosition) { }
+ virtual ~Node() { }
+ virtual void Accept(Visitor* v) = 0;
+
+ // Type testing & conversion.
+ virtual Statement* AsStatement() { return NULL; }
+ virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
+ virtual EmptyStatement* AsEmptyStatement() { return NULL; }
+ virtual Expression* AsExpression() { return NULL; }
+ virtual Literal* AsLiteral() { return NULL; }
+ virtual Slot* AsSlot() { return NULL; }
+ virtual VariableProxy* AsVariableProxy() { return NULL; }
+ virtual Property* AsProperty() { return NULL; }
+ virtual Call* AsCall() { return NULL; }
+ virtual LabelCollector* AsLabelCollector() { return NULL; }
+ virtual BreakableStatement* AsBreakableStatement() { return NULL; }
+ virtual IterationStatement* AsIterationStatement() { return NULL; }
+ virtual UnaryOperation* AsUnaryOperation() { return NULL; }
+ virtual BinaryOperation* AsBinaryOperation() { return NULL; }
+ virtual Assignment* AsAssignment() { return NULL; }
+ virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
+
+ void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
+ int statement_pos() const { return statement_pos_; }
+
+ private:
+ int statement_pos_;
+};
+
+
+class Statement: public Node {
+ public:
+ virtual Statement* AsStatement() { return this; }
+ virtual ReturnStatement* AsReturnStatement() { return NULL; }
+
+ bool IsEmpty() { return AsEmptyStatement() != NULL; }
+};
+
+
+class Expression: public Node {
+ public:
+ virtual Expression* AsExpression() { return this; }
+
+ virtual bool IsValidLeftHandSide() { return false; }
+
+ // Mark the expression as being compiled as an expression
+ // statement. This is used to transform postfix increments to
+ // (faster) prefix increments.
+ virtual void MarkAsStatement() { /* do nothing */ }
+};
+
+
+/**
+ * A sentinel used during pre parsing that represents some expression
+ * that is a valid left hand side without having to actually build
+ * the expression.
+ */
+class ValidLeftHandSideSentinel: public Expression {
+ public:
+ virtual bool IsValidLeftHandSide() { return true; }
+ virtual void Accept(Visitor* v) { UNREACHABLE(); }
+ static ValidLeftHandSideSentinel* instance() { return &instance_; }
+ private:
+ static ValidLeftHandSideSentinel instance_;
+};
+
+
+class BreakableStatement: public Statement {
+ public:
+ enum Type {
+ TARGET_FOR_ANONYMOUS,
+ TARGET_FOR_NAMED_ONLY
+ };
+
+ // The labels associated with this statement. May be NULL;
+ // if it is != NULL, guaranteed to contain at least one entry.
+ ZoneStringList* labels() const { return labels_; }
+
+ // Type testing & conversion.
+ virtual BreakableStatement* AsBreakableStatement() { return this; }
+
+ // Code generation
+ Label* break_target() { return &break_target_; }
+
+ // Used during code generation for restoring the stack when a
+ // break/continue crosses a statement that keeps stuff on the stack.
+ int break_stack_height() { return break_stack_height_; }
+ void set_break_stack_height(int height) { break_stack_height_ = height; }
+
+ // Testers.
+ bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
+
+ protected:
+ BreakableStatement(ZoneStringList* labels, Type type)
+ : labels_(labels), type_(type) {
+ ASSERT(labels == NULL || labels->length() > 0);
+ }
+
+ private:
+ ZoneStringList* labels_;
+ Type type_;
+ Label break_target_;
+ int break_stack_height_;
+};
+
+
+class Block: public BreakableStatement {
+ public:
+ Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
+ : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+ statements_(capacity),
+ is_initializer_block_(is_initializer_block) { }
+
+ virtual void Accept(Visitor* v);
+
+ void AddStatement(Statement* statement) { statements_.Add(statement); }
+
+ ZoneList<Statement*>* statements() { return &statements_; }
+ bool is_initializer_block() const { return is_initializer_block_; }
+
+ private:
+ ZoneList<Statement*> statements_;
+ bool is_initializer_block_;
+};
+
+
+class Declaration: public Node {
+ public:
+ Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
+ : proxy_(proxy),
+ mode_(mode),
+ fun_(fun) {
+ ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ // At the moment there are no "const functions"'s in JavaScript...
+ ASSERT(fun == NULL || mode == Variable::VAR);
+ }
+
+ virtual void Accept(Visitor* v);
+
+ VariableProxy* proxy() const { return proxy_; }
+ Variable::Mode mode() const { return mode_; }
+ FunctionLiteral* fun() const { return fun_; } // may be NULL
+
+ private:
+ VariableProxy* proxy_;
+ Variable::Mode mode_;
+ FunctionLiteral* fun_;
+};
+
+
+class IterationStatement: public BreakableStatement {
+ public:
+ // Type testing & conversion.
+ virtual IterationStatement* AsIterationStatement() { return this; }
+
+ Statement* body() const { return body_; }
+
+ // Code generation
+ Label* continue_target() { return &continue_target_; }
+
+ protected:
+ explicit IterationStatement(ZoneStringList* labels)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
+
+ void Initialize(Statement* body) {
+ body_ = body;
+ }
+
+ private:
+ Statement* body_;
+ Label continue_target_;
+};
+
+
+class LoopStatement: public IterationStatement {
+ public:
+ enum Type { DO_LOOP, FOR_LOOP, WHILE_LOOP };
+
+ LoopStatement(ZoneStringList* labels, Type type)
+ : IterationStatement(labels), type_(type), init_(NULL),
+ cond_(NULL), next_(NULL) { }
+
+ void Initialize(Statement* init,
+ Expression* cond,
+ Statement* next,
+ Statement* body) {
+ ASSERT(init == NULL || type_ == FOR_LOOP);
+ ASSERT(next == NULL || type_ == FOR_LOOP);
+ IterationStatement::Initialize(body);
+ init_ = init;
+ cond_ = cond;
+ next_ = next;
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Type type() const { return type_; }
+ Statement* init() const { return init_; }
+ Expression* cond() const { return cond_; }
+ Statement* next() const { return next_; }
+
+#ifdef DEBUG
+ const char* OperatorString() const;
+#endif
+
+ private:
+ Type type_;
+ Statement* init_;
+ Expression* cond_;
+ Statement* next_;
+};
+
+
+class ForInStatement: public IterationStatement {
+ public:
+ explicit ForInStatement(ZoneStringList* labels)
+ : IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
+
+ void Initialize(Expression* each, Expression* enumerable, Statement* body) {
+ IterationStatement::Initialize(body);
+ each_ = each;
+ enumerable_ = enumerable;
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* each() const { return each_; }
+ Expression* enumerable() const { return enumerable_; }
+
+ private:
+ Expression* each_;
+ Expression* enumerable_;
+};
+
+
+class ExpressionStatement: public Statement {
+ public:
+ explicit ExpressionStatement(Expression* expression)
+ : expression_(expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual ExpressionStatement* AsExpressionStatement() { return this; }
+
+ void set_expression(Expression* e) { expression_ = e; }
+ Expression* expression() { return expression_; }
+
+ private:
+ Expression* expression_;
+};
+
+
+class ContinueStatement: public Statement {
+ public:
+ explicit ContinueStatement(IterationStatement* target)
+ : target_(target) { }
+
+ virtual void Accept(Visitor* v);
+
+ IterationStatement* target() const { return target_; }
+
+ private:
+ IterationStatement* target_;
+};
+
+
+class BreakStatement: public Statement {
+ public:
+ explicit BreakStatement(BreakableStatement* target)
+ : target_(target) { }
+
+ virtual void Accept(Visitor* v);
+
+ BreakableStatement* target() const { return target_; }
+
+ private:
+ BreakableStatement* target_;
+};
+
+
+class ReturnStatement: public Statement {
+ public:
+ explicit ReturnStatement(Expression* expression)
+ : expression_(expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual ReturnStatement* AsReturnStatement() { return this; }
+
+ Expression* expression() { return expression_; }
+
+ private:
+ Expression* expression_;
+};
+
+
+class WithEnterStatement: public Statement {
+ public:
+ explicit WithEnterStatement(Expression* expression)
+ : expression_(expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* expression() const { return expression_; }
+
+ private:
+ Expression* expression_;
+};
+
+
+class WithExitStatement: public Statement {
+ public:
+ WithExitStatement() { }
+
+ virtual void Accept(Visitor* v);
+};
+
+
+class CaseClause: public ZoneObject {
+ public:
+ CaseClause(Expression* label, ZoneList<Statement*>* statements)
+ : label_(label), statements_(statements) { }
+
+ bool is_default() const { return label_ == NULL; }
+ Expression* label() const {
+ CHECK(!is_default());
+ return label_;
+ }
+ ZoneList<Statement*>* statements() const { return statements_; }
+
+ private:
+ Expression* label_;
+ ZoneList<Statement*>* statements_;
+};
+
+
+class SwitchStatement: public BreakableStatement {
+ public:
+ explicit SwitchStatement(ZoneStringList* labels)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+ tag_(NULL), cases_(NULL) { }
+
+ void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
+ tag_ = tag;
+ cases_ = cases;
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* tag() const { return tag_; }
+ ZoneList<CaseClause*>* cases() const { return cases_; }
+
+ private:
+ Expression* tag_;
+ ZoneList<CaseClause*>* cases_;
+};
+
+
+// If-statements always have non-null references to their then- and
+// else-parts. When parsing if-statements with no explicit else-part,
+// the parser implicitly creates an empty statement. Use the
+// HasThenStatement() and HasElseStatement() functions to check if a
+// given if-statement has a then- or an else-part containing code.
+class IfStatement: public Statement {
+ public:
+ IfStatement(Expression* condition,
+ Statement* then_statement,
+ Statement* else_statement)
+ : condition_(condition),
+ then_statement_(then_statement),
+ else_statement_(else_statement) { }
+
+ virtual void Accept(Visitor* v);
+
+ bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
+ bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
+
+ Expression* condition() const { return condition_; }
+ Statement* then_statement() const { return then_statement_; }
+ Statement* else_statement() const { return else_statement_; }
+
+ private:
+ Expression* condition_;
+ Statement* then_statement_;
+ Statement* else_statement_;
+};
+
+
+// NOTE: LabelCollectors are represented as nodes to fit in the target
+// stack in the compiler; this should probably be reworked.
+class LabelCollector: public Node {
+ public:
+ explicit LabelCollector(ZoneList<Label*>* labels) : labels_(labels) { }
+
+ // Adds a label to the collector. The collector stores a pointer not
+ // a copy of the label to make binding work, so make sure not to
+ // pass in references to something on the stack.
+ void AddLabel(Label* label);
+
+ // Virtual behaviour. LabelCollectors are never part of the AST.
+ virtual void Accept(Visitor* v) { UNREACHABLE(); }
+ virtual LabelCollector* AsLabelCollector() { return this; }
+
+ ZoneList<Label*>* labels() { return labels_; }
+
+ private:
+ ZoneList<Label*>* labels_;
+};
+
+
+class TryStatement: public Statement {
+ public:
+ explicit TryStatement(Block* try_block)
+ : try_block_(try_block), escaping_labels_(NULL) { }
+
+ void set_escaping_labels(ZoneList<Label*>* labels) {
+ escaping_labels_ = labels;
+ }
+
+ Block* try_block() const { return try_block_; }
+ ZoneList<Label*>* escaping_labels() const { return escaping_labels_; }
+
+ private:
+ Block* try_block_;
+ ZoneList<Label*>* escaping_labels_;
+};
+
+
+class TryCatch: public TryStatement {
+ public:
+ TryCatch(Block* try_block, Expression* catch_var, Block* catch_block)
+ : TryStatement(try_block),
+ catch_var_(catch_var),
+ catch_block_(catch_block) {
+ ASSERT(catch_var->AsVariableProxy() != NULL);
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* catch_var() const { return catch_var_; }
+ Block* catch_block() const { return catch_block_; }
+
+ private:
+ Expression* catch_var_;
+ Block* catch_block_;
+};
+
+
+class TryFinally: public TryStatement {
+ public:
+ TryFinally(Block* try_block, Expression* finally_var, Block* finally_block)
+ : TryStatement(try_block),
+ finally_var_(finally_var),
+ finally_block_(finally_block) { }
+
+ virtual void Accept(Visitor* v);
+
+ // If the finally block is non-trivial it may be problematic to have
+ // extra stuff on the expression stack while evaluating it. The
+ // finally variable is used to hold the state instead of storing it
+ // on the stack. It may be NULL in which case the state is stored on
+ // the stack.
+ Expression* finally_var() const { return finally_var_; }
+
+ Block* finally_block() const { return finally_block_; }
+
+ private:
+ Expression* finally_var_;
+ Block* finally_block_;
+};
+
+
+class DebuggerStatement: public Statement {
+ public:
+ virtual void Accept(Visitor* v);
+};
+
+
+class EmptyStatement: public Statement {
+ public:
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual EmptyStatement* AsEmptyStatement() { return this; }
+};
+
+
+class Literal: public Expression {
+ public:
+ explicit Literal(Handle<Object> handle) : handle_(handle) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual Literal* AsLiteral() { return this; }
+
+ // Check if this literal is identical to the other literal.
+ bool IsIdenticalTo(const Literal* other) const {
+ return handle_.is_identical_to(other->handle_);
+ }
+
+ // Identity testers.
+ bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
+ bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
+ bool IsFalse() const {
+ return handle_.is_identical_to(Factory::false_value());
+ }
+
+ Handle<Object> handle() const { return handle_; }
+
+ private:
+ Handle<Object> handle_;
+};
+
+
+// Base class for literals that needs space in the corresponding JSFunction.
+class MaterializedLiteral: public Expression {
+ public:
+ explicit MaterializedLiteral(int literal_index)
+ : literal_index_(literal_index) {}
+ int literal_index() { return literal_index_; }
+ private:
+ int literal_index_;
+};
+
+
+// An object literal has a boilerplate object that is used
+// for minimizing the work when constructing it at runtime.
+class ObjectLiteral: public MaterializedLiteral {
+ public:
+ // Property is used for passing information
+ // about an object literal's properties from the parser
+ // to the code generator.
+ class Property: public ZoneObject {
+ public:
+
+ enum Kind {
+ CONSTANT, // Property with constant value (at compile time).
+ COMPUTED, // Property with computed value (at execution time).
+ GETTER, SETTER, // Property is an accessor function.
+ PROTOTYPE // Property is __proto__.
+ };
+
+ Property(Literal* key, Expression* value);
+ Property(bool is_getter, FunctionLiteral* value);
+
+ Literal* key() { return key_; }
+ Expression* value() { return value_; }
+ Kind kind() { return kind_; }
+
+ private:
+ Literal* key_;
+ Expression* value_;
+ Kind kind_;
+ };
+
+ ObjectLiteral(Handle<FixedArray> constant_properties,
+ Expression* result,
+ ZoneList<Property*>* properties,
+ int literal_index)
+ : MaterializedLiteral(literal_index),
+ constant_properties_(constant_properties),
+ result_(result),
+ properties_(properties) {
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Handle<FixedArray> constant_properties() const {
+ return constant_properties_;
+ }
+ Expression* result() const { return result_; }
+ ZoneList<Property*>* properties() const { return properties_; }
+
+ private:
+ Handle<FixedArray> constant_properties_;
+ Expression* result_;
+ ZoneList<Property*>* properties_;
+};
+
+
+// Node for capturing a regexp literal.
+class RegExpLiteral: public MaterializedLiteral {
+ public:
+ RegExpLiteral(Handle<String> pattern,
+ Handle<String> flags,
+ int literal_index)
+ : MaterializedLiteral(literal_index),
+ pattern_(pattern),
+ flags_(flags) {}
+
+ virtual void Accept(Visitor* v);
+
+ Handle<String> pattern() const { return pattern_; }
+ Handle<String> flags() const { return flags_; }
+
+ private:
+ Handle<String> pattern_;
+ Handle<String> flags_;
+};
+
+// An array literal has a literals object that is used
+// used for minimizing the work when contructing it at runtime.
+class ArrayLiteral: public Expression {
+ public:
+ ArrayLiteral(Handle<FixedArray> literals,
+ Expression* result,
+ ZoneList<Expression*>* values)
+ : literals_(literals), result_(result), values_(values) {
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Handle<FixedArray> literals() const { return literals_; }
+ Expression* result() const { return result_; }
+ ZoneList<Expression*>* values() const { return values_; }
+
+ private:
+ Handle<FixedArray> literals_;
+ Expression* result_;
+ ZoneList<Expression*>* values_;
+};
+
+
+class VariableProxy: public Expression {
+ public:
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual Property* AsProperty() {
+ return var_ == NULL ? NULL : var_->AsProperty();
+ }
+ virtual VariableProxy* AsVariableProxy() { return this; }
+
+ Variable* AsVariable() {
+ return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
+ }
+ virtual bool IsValidLeftHandSide() {
+ return var_ == NULL ? true : var_->IsValidLeftHandSide();
+ }
+ bool IsVariable(Handle<String> n) {
+ return !is_this() && name().is_identical_to(n);
+ }
+
+ // If this assertion fails it means that some code has tried to
+ // treat the special "this" variable as an ordinary variable with
+ // the name "this".
+ Handle<String> name() const { return name_; }
+ Variable* var() const { return var_; }
+ UseCount* var_uses() { return &var_uses_; }
+ UseCount* obj_uses() { return &obj_uses_; }
+ bool is_this() const { return is_this_; }
+ bool inside_with() const { return inside_with_; }
+
+ // Bind this proxy to the variable var.
+ void BindTo(Variable* var);
+
+ protected:
+ Handle<String> name_;
+ Variable* var_; // resolved variable, or NULL
+ bool is_this_;
+ bool inside_with_;
+
+ // VariableProxy usage info.
+ UseCount var_uses_; // uses of the variable value
+ UseCount obj_uses_; // uses of the object the variable points to
+
+ VariableProxy(Handle<String> name, bool is_this, bool inside_with);
+ explicit VariableProxy(bool is_this);
+
+ friend class Scope;
+};
+
+
+class VariableProxySentinel: public VariableProxy {
+ public:
+ virtual bool IsValidLeftHandSide() { return !is_this(); }
+ static VariableProxySentinel* this_proxy() { return &this_proxy_; }
+ static VariableProxySentinel* identifier_proxy() {
+ return &identifier_proxy_;
+ }
+
+ private:
+ explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
+ static VariableProxySentinel this_proxy_;
+ static VariableProxySentinel identifier_proxy_;
+};
+
+
+class Slot: public Expression {
+ public:
+ enum Type {
+ // A slot in the parameter section on the stack. index() is
+ // the parameter index, counting left-to-right, starting at 0.
+ PARAMETER,
+
+ // A slot in the local section on the stack. index() is
+ // the variable index in the stack frame, starting at 0.
+ LOCAL,
+
+ // An indexed slot in a heap context. index() is the
+ // variable index in the context object on the heap,
+ // starting at 0. var()->scope() is the corresponding
+ // scope.
+ CONTEXT,
+
+ // A named slot in a heap context. var()->name() is the
+ // variable name in the context object on the heap,
+ // with lookup starting at the current context. index()
+ // is invalid.
+ LOOKUP,
+
+ // A property in the global object. var()->name() is
+ // the property name.
+ GLOBAL
+ };
+
+ Slot(Variable* var, Type type, int index)
+ : var_(var), type_(type), index_(index) {
+ ASSERT(var != NULL);
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual Slot* AsSlot() { return this; }
+
+ // Accessors
+ Variable* var() const { return var_; }
+ Type type() const { return type_; }
+ int index() const { return index_; }
+
+ private:
+ Variable* var_;
+ Type type_;
+ int index_;
+};
+
+
+class Property: public Expression {
+ public:
+ Property(Expression* obj, Expression* key, int pos)
+ : obj_(obj), key_(key), pos_(pos) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual Property* AsProperty() { return this; }
+
+ virtual bool IsValidLeftHandSide() { return true; }
+
+ Expression* obj() const { return obj_; }
+ Expression* key() const { return key_; }
+ int position() const { return pos_; }
+
+ // Returns a property singleton property access on 'this'. Used
+ // during preparsing.
+ static Property* this_property() { return &this_property_; }
+
+ private:
+ Expression* obj_;
+ Expression* key_;
+ int pos_;
+
+ // Dummy property used during preparsing
+ static Property this_property_;
+};
+
+
+class Call: public Expression {
+ public:
+ Call(Expression* expression,
+ ZoneList<Expression*>* arguments,
+ bool is_eval,
+ int pos)
+ : expression_(expression),
+ arguments_(arguments),
+ is_eval_(is_eval),
+ pos_(pos) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing and conversion.
+ virtual Call* AsCall() { return this; }
+
+ Expression* expression() const { return expression_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+ bool is_eval() { return is_eval_; }
+ int position() { return pos_; }
+
+ static Call* sentinel() { return &sentinel_; }
+
+ private:
+ Expression* expression_;
+ ZoneList<Expression*>* arguments_;
+ bool is_eval_;
+ int pos_;
+
+ static Call sentinel_;
+};
+
+
+class CallNew: public Call {
+ public:
+ CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ : Call(expression, arguments, false, pos) { }
+
+ virtual void Accept(Visitor* v);
+};
+
+
+// The CallRuntime class does not represent any official JavaScript
+// language construct. Instead it is used to call a C or JS function
+// with a set of arguments. This is used from the builtins that are
+// implemented in JavaScript (see "v8natives.js").
+class CallRuntime: public Expression {
+ public:
+ CallRuntime(Handle<String> name,
+ Runtime::Function* function,
+ ZoneList<Expression*>* arguments)
+ : name_(name), function_(function), arguments_(arguments) { }
+
+ virtual void Accept(Visitor* v);
+
+ Handle<String> name() const { return name_; }
+ Runtime::Function* function() const { return function_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+
+ private:
+ Handle<String> name_;
+ Runtime::Function* function_;
+ ZoneList<Expression*>* arguments_;
+};
+
+
+class UnaryOperation: public Expression {
+ public:
+ UnaryOperation(Token::Value op, Expression* expression)
+ : op_(op), expression_(expression) {
+ ASSERT(Token::IsUnaryOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual UnaryOperation* AsUnaryOperation() { return this; }
+
+ Token::Value op() const { return op_; }
+ Expression* expression() const { return expression_; }
+
+ private:
+ Token::Value op_;
+ Expression* expression_;
+};
+
+
+class BinaryOperation: public Expression {
+ public:
+ BinaryOperation(Token::Value op, Expression* left, Expression* right)
+ : op_(op), left_(left), right_(right) {
+ ASSERT(Token::IsBinaryOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual BinaryOperation* AsBinaryOperation() { return this; }
+
+ // True iff the result can be safely overwritten (to avoid allocation).
+ // False for operations that can return one of their operands.
+ bool ResultOverwriteAllowed() {
+ switch (op_) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ return false;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ return true;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+ }
+
+ Token::Value op() const { return op_; }
+ Expression* left() const { return left_; }
+ Expression* right() const { return right_; }
+
+ private:
+ Token::Value op_;
+ Expression* left_;
+ Expression* right_;
+};
+
+
+class CountOperation: public Expression {
+ public:
+ CountOperation(bool is_prefix, Token::Value op, Expression* expression)
+ : is_prefix_(is_prefix), op_(op), expression_(expression) {
+ ASSERT(Token::IsCountOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ bool is_prefix() const { return is_prefix_; }
+ bool is_postfix() const { return !is_prefix_; }
+ Token::Value op() const { return op_; }
+ Expression* expression() const { return expression_; }
+
+ virtual void MarkAsStatement() { is_prefix_ = true; }
+
+ private:
+ bool is_prefix_;
+ Token::Value op_;
+ Expression* expression_;
+};
+
+
+class CompareOperation: public Expression {
+ public:
+ CompareOperation(Token::Value op, Expression* left, Expression* right)
+ : op_(op), left_(left), right_(right) {
+ ASSERT(Token::IsCompareOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Token::Value op() const { return op_; }
+ Expression* left() const { return left_; }
+ Expression* right() const { return right_; }
+
+ private:
+ Token::Value op_;
+ Expression* left_;
+ Expression* right_;
+};
+
+
+class Conditional: public Expression {
+ public:
+ Conditional(Expression* condition,
+ Expression* then_expression,
+ Expression* else_expression)
+ : condition_(condition),
+ then_expression_(then_expression),
+ else_expression_(else_expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* condition() const { return condition_; }
+ Expression* then_expression() const { return then_expression_; }
+ Expression* else_expression() const { return else_expression_; }
+
+ private:
+ Expression* condition_;
+ Expression* then_expression_;
+ Expression* else_expression_;
+};
+
+
+class Assignment: public Expression {
+ public:
+ Assignment(Token::Value op, Expression* target, Expression* value, int pos)
+ : op_(op), target_(target), value_(value), pos_(pos) {
+ ASSERT(Token::IsAssignmentOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+ virtual Assignment* AsAssignment() { return this; }
+
+ Token::Value binary_op() const;
+
+ Token::Value op() const { return op_; }
+ Expression* target() const { return target_; }
+ Expression* value() const { return value_; }
+ int position() { return pos_; }
+
+ private:
+ Token::Value op_;
+ Expression* target_;
+ Expression* value_;
+ int pos_;
+};
+
+
+class Throw: public Expression {
+ public:
+ Throw(Expression* exception, int pos)
+ : exception_(exception), pos_(pos) {}
+
+ virtual void Accept(Visitor* v);
+ Expression* exception() const { return exception_; }
+ int position() const { return pos_; }
+
+ private:
+ Expression* exception_;
+ int pos_;
+};
+
+
+class FunctionLiteral: public Expression {
+ public:
+ FunctionLiteral(Handle<String> name,
+ Scope* scope,
+ ZoneList<Statement*>* body,
+ int materialized_literal_count,
+ int expected_property_count,
+ int num_parameters,
+ int start_position,
+ int end_position,
+ bool is_expression)
+ : name_(name),
+ scope_(scope),
+ body_(body),
+ materialized_literal_count_(materialized_literal_count),
+ expected_property_count_(expected_property_count),
+ num_parameters_(num_parameters),
+ start_position_(start_position),
+ end_position_(end_position),
+ is_expression_(is_expression),
+ function_token_position_(kNoPosition) {
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual FunctionLiteral* AsFunctionLiteral() { return this; }
+
+ Handle<String> name() const { return name_; }
+ Scope* scope() const { return scope_; }
+ ZoneList<Statement*>* body() const { return body_; }
+ void set_function_token_position(int pos) { function_token_position_ = pos; }
+ int function_token_position() const { return function_token_position_; }
+ int start_position() const { return start_position_; }
+ int end_position() const { return end_position_; }
+ bool is_expression() const { return is_expression_; }
+
+ int materialized_literal_count() { return materialized_literal_count_; }
+ int expected_property_count() { return expected_property_count_; }
+ int num_parameters() { return num_parameters_; }
+
+ bool AllowsLazyCompilation();
+
+ private:
+ Handle<String> name_;
+ Scope* scope_;
+ ZoneList<Statement*>* body_;
+ int materialized_literal_count_;
+ int expected_property_count_;
+ int num_parameters_;
+ int start_position_;
+ int end_position_;
+ bool is_expression_;
+ int function_token_position_;
+};
+
+
+class FunctionBoilerplateLiteral: public Expression {
+ public:
+ explicit FunctionBoilerplateLiteral(Handle<JSFunction> boilerplate)
+ : boilerplate_(boilerplate) {
+ ASSERT(boilerplate->IsBoilerplate());
+ }
+
+ Handle<JSFunction> boilerplate() const { return boilerplate_; }
+
+ virtual void Accept(Visitor* v);
+
+ private:
+ Handle<JSFunction> boilerplate_;
+};
+
+
+class ThisFunction: public Expression {
+ public:
+ virtual void Accept(Visitor* v);
+};
+
+
+// ----------------------------------------------------------------------------
+// Basic visitor
+// - leaf node visitors are abstract.
+
+class Visitor BASE_EMBEDDED {
+ public:
+ Visitor() : stack_overflow_(false) { }
+ virtual ~Visitor() { }
+
+ // Dispatch
+ void Visit(Node* node) { node->Accept(this); }
+
+ // Iteration
+ virtual void VisitStatements(ZoneList<Statement*>* statements);
+ virtual void VisitExpressions(ZoneList<Expression*>* expressions);
+
+ // Stack overflow tracking support.
+ bool HasStackOverflow() const { return stack_overflow_; }
+ bool CheckStackOverflow() {
+ if (stack_overflow_) return true;
+ StackLimitCheck check;
+ if (!check.HasOverflowed()) return false;
+ return (stack_overflow_ = true);
+ }
+
+ // Individual nodes
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) = 0;
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+ bool stack_overflow_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_AST_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "macro-assembler.h"
+#include "natives.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_string(natives_file, NULL, "alternative natives file"); // for debugging
+DEFINE_bool(expose_gc, false, "expose gc extension"); // for debugging
+
+// A SourceCodeCache uses a FixedArray to store pairs of
+// (AsciiString*, JSFunction*), mapping names of native code files
+// (runtime.js, etc.) to precompiled functions. Instead of mapping
+// names to functions it might make sense to let the JS2C tool
+// generate an index for each native JS file.
+class SourceCodeCache BASE_EMBEDDED {
+ public:
+ explicit SourceCodeCache(ScriptType type): type_(type) { }
+
+ void Initialize(bool create_heap_objects) {
+ if (create_heap_objects) {
+ cache_ = Heap::empty_fixed_array();
+ } else {
+ cache_ = NULL;
+ }
+ }
+
+ void Iterate(ObjectVisitor* v) {
+ v->VisitPointer(reinterpret_cast<Object**>(&cache_));
+ }
+
+
+ bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) {
+ for (int i = 0; i < cache_->length(); i+=2) {
+ AsciiString* str = AsciiString::cast(cache_->get(i));
+ if (str->IsEqualTo(name)) {
+ *handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1)));
+ return true;
+ }
+ }
+ return false;
+ }
+
+
+ void Add(Vector<const char> name, Handle<JSFunction> fun) {
+ ASSERT(fun->IsBoilerplate());
+ HandleScope scope;
+ int length = cache_->length();
+ Handle<FixedArray> new_array =
+ Factory::NewFixedArray(length + 2, TENURED);
+ cache_->CopyTo(0, *new_array, 0, cache_->length());
+ cache_ = *new_array;
+ Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
+ cache_->set(length, *str);
+ cache_->set(length + 1, *fun);
+ Script::cast(fun->shared()->script())->set_type(Smi::FromInt(type_));
+ }
+
+ private:
+ ScriptType type_;
+ FixedArray* cache_;
+ DISALLOW_EVIL_CONSTRUCTORS(SourceCodeCache);
+};
+
+static SourceCodeCache natives_cache(SCRIPT_TYPE_NATIVE);
+static SourceCodeCache extensions_cache(SCRIPT_TYPE_EXTENSION);
+
+
+Handle<String> Bootstrapper::NativesSourceLookup(int index) {
+ ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
+ if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
+ Handle<String> source_code =
+ Factory::NewStringFromAscii(Natives::GetScriptSource(index));
+ Heap::natives_source_cache()->set(index, *source_code);
+ }
+ Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
+ return Handle<String>::cast(cached_source);
+}
+
+
+bool Bootstrapper::NativesCacheLookup(Vector<const char> name,
+ Handle<JSFunction>* handle) {
+ return natives_cache.Lookup(name, handle);
+}
+
+
+void Bootstrapper::NativesCacheAdd(Vector<const char> name,
+ Handle<JSFunction> fun) {
+ natives_cache.Add(name, fun);
+}
+
+
+void Bootstrapper::Initialize(bool create_heap_objects) {
+ natives_cache.Initialize(create_heap_objects);
+ extensions_cache.Initialize(create_heap_objects);
+}
+
+
+void Bootstrapper::TearDown() {
+ natives_cache.Initialize(false); // Yes, symmetrical
+ extensions_cache.Initialize(false);
+}
+
+
+// Pending fixups are code positions that have refer to builtin code
+// objects that were not available at the time the code was generated.
+// The pending list is processed whenever an environment has been
+// created.
+class PendingFixups : public AllStatic {
+ public:
+ static void Add(Code* code, MacroAssembler* masm);
+ static bool Process(Handle<JSBuiltinsObject> builtins);
+
+ static void Iterate(ObjectVisitor* v);
+
+ private:
+ static List<Object*> code_;
+ static List<const char*> name_;
+ static List<int> pc_;
+ static List<uint32_t> flags_;
+
+ static void Clear();
+};
+
+
+List<Object*> PendingFixups::code_(0);
+List<const char*> PendingFixups::name_(0);
+List<int> PendingFixups::pc_(0);
+List<uint32_t> PendingFixups::flags_(0);
+
+
+void PendingFixups::Add(Code* code, MacroAssembler* masm) {
+ // Note this code is not only called during bootstrapping.
+ List<MacroAssembler::Unresolved>* unresolved = masm->unresolved();
+ int n = unresolved->length();
+ for (int i = 0; i < n; i++) {
+ const char* name = unresolved->at(i).name;
+ code_.Add(code);
+ name_.Add(name);
+ pc_.Add(unresolved->at(i).pc);
+ flags_.Add(unresolved->at(i).flags);
+ LOG(StringEvent("unresolved", name));
+ }
+}
+
+
+bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
+ HandleScope scope;
+ // NOTE: Extra fixups may be added to the list during the iteration
+ // due to lazy compilation of functions during the processing. Do not
+ // cache the result of getting the length of the code list.
+ for (int i = 0; i < code_.length(); i++) {
+ const char* name = name_[i];
+ uint32_t flags = flags_[i];
+ Handle<String> symbol = Factory::LookupAsciiSymbol(name);
+ Object* o = builtins->GetProperty(*symbol);
+#ifdef DEBUG
+ if (!o->IsJSFunction()) {
+ V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
+ }
+#endif
+ Handle<JSFunction> f = Handle<JSFunction>(JSFunction::cast(o));
+ // Make sure the number of parameters match the formal parameter count.
+ int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
+ USE(argc);
+ ASSERT(f->shared()->formal_parameter_count() == argc);
+ if (!f->is_compiled()) {
+ // Do lazy compilation and check for stack overflows.
+ if (!CompileLazy(f, CLEAR_EXCEPTION)) {
+ Clear();
+ return false;
+ }
+ }
+ Code* code = Code::cast(code_[i]);
+ Address pc = code->instruction_start() + pc_[i];
+ bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags);
+ if (is_pc_relative) {
+ Assembler::set_target_address_at(pc, f->code()->instruction_start());
+ } else {
+ *reinterpret_cast<Object**>(pc) = f->code();
+ }
+ LOG(StringEvent("resolved", name));
+ }
+ Clear();
+
+ // TODO(1240818): We should probably try to avoid doing this for all
+ // the V8 builtin JS files. It should only happen after running
+ // runtime.js - just like there shouldn't be any fixups left after
+ // that.
+ for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
+ Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
+ Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
+ JSFunction* function = JSFunction::cast(builtins->GetProperty(*name));
+ builtins->set_javascript_builtin(id, function);
+ }
+
+ return true;
+}
+
+
+void PendingFixups::Clear() {
+ code_.Clear();
+ name_.Clear();
+ pc_.Clear();
+ flags_.Clear();
+}
+
+
+void PendingFixups::Iterate(ObjectVisitor* v) {
+ if (!code_.is_empty()) {
+ v->VisitPointers(&code_[0], &code_[0] + code_.length());
+ }
+}
+
+
+class Genesis BASE_EMBEDDED {
+ public:
+ Genesis(Handle<Object> global_object,
+ v8::Handle<v8::ObjectTemplate> global_template,
+ v8::ExtensionConfiguration* extensions);
+ ~Genesis();
+
+ Handle<Context> result() { return result_; }
+
+ Genesis* previous() { return previous_; }
+ static Genesis* current() { return current_; }
+
+ private:
+ Handle<Context> global_context_;
+
+ // There may be more than one active genesis object: When GC is
+ // triggered during environment creation there may be weak handle
+ // processing callbacks which may create new environments.
+ Genesis* previous_;
+ static Genesis* current_;
+
+ Handle<Context> global_context() { return global_context_; }
+
+ void CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
+ Handle<Object> global_object);
+ void InstallNativeFunctions();
+ bool InstallNatives();
+ bool InstallExtensions(v8::ExtensionConfiguration* extensions);
+ bool InstallExtension(const char* name);
+ bool InstallExtension(v8::RegisteredExtension* current);
+ bool ConfigureGlobalObject(v8::Handle<v8::ObjectTemplate> global_template);
+
+ // Migrates all properties from the 'from' object to the 'to'
+ // object and overrides the prototype in 'to' with the one from
+ // 'from'.
+ void TransferObject(Handle<JSObject> from, Handle<JSObject> to);
+ void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
+ void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
+
+ Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
+ bool make_prototype_read_only,
+ bool make_prototype_enumerable = false);
+ void MakeFunctionInstancePrototypeWritable();
+
+ void AddSpecialFunction(Handle<JSObject> prototype,
+ const char* name,
+ Handle<Code> code,
+ int parameter_count);
+
+ void BuildSpecialFunctionTable();
+
+ static bool CompileBuiltin(int index);
+ static bool CompileNative(Vector<const char> name, Handle<String> source);
+ static bool CompileScriptCached(Vector<const char> name,
+ Handle<String> source,
+ SourceCodeCache* cache,
+ v8::Extension* extension,
+ bool use_runtime_context);
+
+ Handle<Context> result_;
+};
+
+Genesis* Genesis::current_ = NULL;
+
+
+void Bootstrapper::Iterate(ObjectVisitor* v) {
+ natives_cache.Iterate(v);
+ extensions_cache.Iterate(v);
+ PendingFixups::Iterate(v);
+}
+
+
+// While setting up the environment, we collect code positions that
+// need to be patched before we can run any code in the environment.
+void Bootstrapper::AddFixup(Code* code, MacroAssembler* masm) {
+ PendingFixups::Add(code, masm);
+}
+
+
+bool Bootstrapper::IsActive() {
+ return Genesis::current() != NULL;
+}
+
+
+Handle<Context> Bootstrapper::CreateEnvironment(
+ Handle<Object> global_object,
+ v8::Handle<v8::ObjectTemplate> global_template,
+ v8::ExtensionConfiguration* extensions) {
+ Genesis genesis(global_object, global_template, extensions);
+ return genesis.result();
+}
+
+
+Genesis::~Genesis() {
+ ASSERT(current_ == this);
+ current_ = previous_;
+}
+
+static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
+ const char* name,
+ InstanceType type,
+ int instance_size,
+ Handle<JSObject> prototype,
+ Builtins::Name call,
+ bool is_ecma_native) {
+ Handle<String> symbol = Factory::LookupAsciiSymbol(name);
+ Handle<Code> call_code = Handle<Code>(Builtins::builtin(call));
+ Handle<JSFunction> function =
+ Factory::NewFunctionWithPrototype(symbol,
+ type,
+ instance_size,
+ prototype,
+ call_code,
+ is_ecma_native);
+ SetProperty(target, symbol, function, DONT_ENUM);
+ if (is_ecma_native) {
+ function->shared()->set_instance_class_name(*symbol);
+ }
+ return function;
+}
+
+
+Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
+ bool make_prototype_read_only,
+ bool make_prototype_enumerable) {
+ Handle<DescriptorArray> result = Factory::empty_descriptor_array();
+
+ // Add prototype.
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(
+ (make_prototype_enumerable ? 0 : DONT_ENUM)
+ | DONT_DELETE
+ | (make_prototype_read_only ? READ_ONLY : 0));
+ result =
+ Factory::CopyAppendProxyDescriptor(
+ result,
+ Factory::prototype_symbol(),
+ Factory::NewProxy(&Accessors::FunctionPrototype),
+ attributes);
+
+ attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ // Add length.
+ result =
+ Factory::CopyAppendProxyDescriptor(
+ result,
+ Factory::length_symbol(),
+ Factory::NewProxy(&Accessors::FunctionLength),
+ attributes);
+
+ // Add name.
+ result =
+ Factory::CopyAppendProxyDescriptor(
+ result,
+ Factory::name_symbol(),
+ Factory::NewProxy(&Accessors::FunctionName),
+ attributes);
+
+ // Add arguments.
+ result =
+ Factory::CopyAppendProxyDescriptor(
+ result,
+ Factory::arguments_symbol(),
+ Factory::NewProxy(&Accessors::FunctionArguments),
+ attributes);
+
+ // Add caller.
+ result =
+ Factory::CopyAppendProxyDescriptor(
+ result,
+ Factory::caller_symbol(),
+ Factory::NewProxy(&Accessors::FunctionCaller),
+ attributes);
+
+ return result;
+}
+
+
+void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
+ Handle<Object> global_object) {
+ HandleScope scope;
+ // Allocate the global context FixedArray first and then patch the
+ // closure and extension object later (we need the empty function
+ // and the global object, but in order to create those, we need the
+ // global context).
+ global_context_ =
+ Handle<Context>::cast(
+ GlobalHandles::Create(*Factory::NewGlobalContext()));
+ Top::set_security_context(*global_context());
+ Top::set_context(*global_context());
+
+ // Allocate the message listeners object.
+ v8::NeanderArray listeners;
+ global_context()->set_message_listeners(*listeners.value());
+
+ // Allocate the debug event listeners object.
+ v8::NeanderArray debug_event_listeners;
+ global_context()->set_debug_event_listeners(*debug_event_listeners.value());
+
+ // Allocate the map for function instances.
+ Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ global_context()->set_function_instance_map(*fm);
+ // Please note that the prototype property for function instances must be
+ // writable.
+ Handle<DescriptorArray> function_map_descriptors =
+ ComputeFunctionInstanceDescriptor(false, true);
+ fm->set_instance_descriptors(*function_map_descriptors);
+
+ // Allocate the function map first and then patch the prototype later
+ fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ global_context()->set_function_map(*fm);
+ function_map_descriptors = ComputeFunctionInstanceDescriptor(true);
+ fm->set_instance_descriptors(*function_map_descriptors);
+
+ Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+
+ { // --- O b j e c t ---
+ Handle<JSFunction> object_fun =
+ Factory::NewFunction(object_name, Factory::null_value());
+ Handle<Map> object_function_map =
+ Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ object_fun->set_initial_map(*object_function_map);
+ object_function_map->set_constructor(*object_fun);
+
+ global_context()->set_object_function(*object_fun);
+
+ // Allocate a new prototype for the object function.
+ Handle<JSObject> prototype = Factory::NewJSObject(Top::object_function(),
+ TENURED);
+
+ global_context()->set_initial_object_prototype(*prototype);
+ SetPrototype(object_fun, prototype);
+ object_function_map->set_instance_descriptors(
+ DescriptorArray::cast(Heap::empty_fixed_array()));
+ }
+
+ // Allocate the empty function as the prototype for function ECMAScript
+ // 262 15.3.4.
+ Handle<String> symbol = Factory::LookupAsciiSymbol("Empty");
+ Handle<JSFunction> empty_function =
+ Factory::NewFunction(symbol, Factory::null_value());
+
+ { // --- E m p t y ---
+ Handle<Code> call_code =
+ Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+
+ empty_function->set_code(*call_code);
+ global_context()->function_map()->set_prototype(*empty_function);
+ global_context()->function_instance_map()->set_prototype(*empty_function);
+
+ // Allocate the function map first and then patch the prototype later
+ Handle<Map> empty_fm = Factory::CopyMap(fm);
+ empty_fm->set_prototype(global_context()->object_function()->prototype());
+ empty_function->set_map(*empty_fm);
+ }
+
+ { // --- G l o b a l ---
+ Handle<String> global_name = Factory::LookupAsciiSymbol("global");
+ Handle<JSFunction> global_function;
+
+ if (global_template.IsEmpty()) {
+ Handle<String> name = Handle<String>(Heap::empty_symbol());
+ Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ global_function = Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+ JSGlobalObject::kSize, code, true);
+ // Change the constructor property of the prototype of the
+ // hidden global function to refer to the Object function.
+ Handle<JSObject> prototype =
+ Handle<JSObject>(
+ JSObject::cast(global_function->instance_prototype()));
+ SetProperty(prototype, Factory::constructor_symbol(),
+ Top::object_function(), NONE);
+ } else {
+ Handle<ObjectTemplateInfo> data = v8::Utils::OpenHandle(*global_template);
+ Handle<FunctionTemplateInfo> global_constructor =
+ Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(data->constructor()));
+ global_function = Factory::CreateApiFunction(global_constructor, true);
+ }
+
+ SetExpectedNofProperties(global_function, 100);
+ global_function->shared()->set_instance_class_name(*global_name);
+ global_function->initial_map()->set_needs_access_check();
+
+ Handle<JSGlobalObject> object;
+ if (global_object.location() != NULL) {
+ ASSERT(global_object->IsJSGlobalObject());
+ object =
+ ReinitializeJSGlobalObject(
+ global_function,
+ Handle<JSGlobalObject>::cast(global_object));
+ } else {
+ object =
+ Handle<JSGlobalObject>::cast(Factory::NewJSObject(global_function,
+ TENURED));
+ }
+
+ // Set the global context for the global object.
+ object->set_global_context(*global_context());
+
+ // Security setup. Set the security token of the global object to
+ // itself.
+ object->set_security_token(*object);
+
+ { // --- G l o b a l C o n t e x t ---
+ // use the empty function as closure (no scope info)
+ global_context()->set_closure(*empty_function);
+ global_context()->set_fcontext(*global_context());
+ global_context()->set_previous(NULL);
+
+ // set extension and global object
+ global_context()->set_extension(*object);
+ global_context()->set_global(*object);
+ }
+
+ Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ SetProperty(global, object_name, Top::object_function(), DONT_ENUM);
+ }
+
+ Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+
+ // Install global Function object
+ InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
+ empty_function, Builtins::Illegal, true); // ECMA native.
+
+ { // --- A r r a y ---
+ Handle<JSFunction> array_function =
+ InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
+ Top::initial_object_prototype(), Builtins::ArrayCode,
+ true);
+
+ // This seems a bit hackish, but we need to make sure Array.length
+ // is 1.
+ array_function->shared()->set_length(1);
+ Handle<DescriptorArray> array_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ Factory::empty_descriptor_array(),
+ Factory::length_symbol(),
+ Factory::NewProxy(&Accessors::ArrayLength),
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+
+ // Cache the fast JavaScript array map
+ global_context()->set_js_array_map(array_function->initial_map());
+ global_context()->js_array_map()->set_instance_descriptors(
+ *array_descriptors);
+ // array_function is used internally. JS code creating array object should
+ // search for the 'Array' property on the global object and use that one
+ // as the constructor. 'Array' property on a global object can be
+ // overwritten by JS code.
+ global_context()->set_array_function(*array_function);
+ }
+
+ { // --- N u m b e r ---
+ Handle<JSFunction> number_fun =
+ InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
+ Top::initial_object_prototype(), Builtins::Illegal,
+ true);
+ global_context()->set_number_function(*number_fun);
+ }
+
+ { // --- B o o l e a n ---
+ Handle<JSFunction> boolean_fun =
+ InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
+ Top::initial_object_prototype(), Builtins::Illegal,
+ true);
+ global_context()->set_boolean_function(*boolean_fun);
+ }
+
+ { // --- S t r i n g ---
+ Handle<JSFunction> string_fun =
+ InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
+ Top::initial_object_prototype(), Builtins::Illegal,
+ true);
+ global_context()->set_string_function(*string_fun);
+ // Add 'length' property to strings.
+ Handle<DescriptorArray> string_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ Factory::empty_descriptor_array(),
+ Factory::length_symbol(),
+ Factory::NewProxy(&Accessors::StringLength),
+ static_cast<PropertyAttributes>(DONT_ENUM |
+ DONT_DELETE |
+ READ_ONLY));
+
+ Handle<Map> string_map =
+ Handle<Map>(global_context()->string_function()->initial_map());
+ string_map->set_instance_descriptors(*string_descriptors);
+ }
+
+ { // --- D a t e ---
+ // Builtin functions for Date.prototype.
+ Handle<JSFunction> date_fun =
+ InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
+ Top::initial_object_prototype(), Builtins::Illegal,
+ true);
+
+ global_context()->set_date_function(*date_fun);
+ }
+
+
+ { // -- R e g E x p
+ // Builtin functions for RegExp.prototype.
+ Handle<JSFunction> regexp_fun =
+ InstallFunction(global, "RegExp", JS_VALUE_TYPE, JSValue::kSize,
+ Top::initial_object_prototype(), Builtins::Illegal,
+ true);
+
+ global_context()->set_regexp_function(*regexp_fun);
+ }
+
+ { // --- arguments_boilerplate_
+ // Make sure we can recognize argument objects at runtime.
+ // This is done by introducing an anonymous function with
+ // class_name equals 'Arguments'.
+ Handle<String> symbol = Factory::LookupAsciiSymbol("Arguments");
+ Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<JSObject> prototype =
+ Handle<JSObject>(
+ JSObject::cast(global_context()->object_function()->prototype()));
+ Handle<JSFunction> function =
+ Factory::NewFunctionWithPrototype(symbol, JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, prototype,
+ code, true);
+ function->shared()->set_instance_class_name(*symbol);
+
+ Handle<JSObject> result = Factory::NewJSObject(function);
+
+ global_context()->set_arguments_boilerplate(*result);
+ // Note: callee must be added as the first property and
+ // lenght must be added as the second property.
+ SetProperty(result, Factory::callee_symbol(), Factory::undefined_value(),
+ DONT_ENUM);
+ SetProperty(result, Factory::length_symbol(), Factory::undefined_value(),
+ DONT_ENUM);
+
+ // Check the state of the object.
+ ASSERT(result->HasFastProperties());
+ ASSERT(result->HasFastElements());
+ }
+
+ { // --- context extension
+ // Create a function for the context extension objects.
+ Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<JSFunction> context_extension_fun =
+ Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, code, true);
+
+ Handle<String> name = Factory::LookupAsciiSymbol("context_extension");
+ context_extension_fun->shared()->set_instance_class_name(*name);
+ global_context()->set_context_extension_function(*context_extension_fun);
+ }
+
+ // Setup the call-as-function delegate.
+ Handle<Code> code =
+ Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsFunction));
+ Handle<JSFunction> delegate =
+ Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, code, true);
+ global_context()->set_call_as_function_delegate(*delegate);
+
+ global_context()->set_special_function_table(Heap::empty_fixed_array());
+
+ // Initialize the out of memory slot.
+ global_context()->set_out_of_memory(Heap::false_value());
+}
+
+
+bool Genesis::CompileBuiltin(int index) {
+ Vector<const char> name = Natives::GetScriptName(index);
+ Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ return CompileNative(name, source_code);
+}
+
+
+bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
+ HandleScope scope;
+ Debugger::set_compiling_natives(true);
+ bool result =
+ CompileScriptCached(name, source, &natives_cache, NULL, true);
+ ASSERT(Top::has_pending_exception() != result);
+ if (!result) Top::clear_pending_exception();
+ Debugger::set_compiling_natives(false);
+ return result;
+}
+
+
+bool Genesis::CompileScriptCached(Vector<const char> name,
+ Handle<String> source,
+ SourceCodeCache* cache,
+ v8::Extension* extension,
+ bool use_runtime_context) {
+ HandleScope scope;
+ Handle<JSFunction> boilerplate;
+
+ // If we can't find the function in the cache, we compile a new
+ // function and insert it into the cache.
+ if (!cache->Lookup(name, &boilerplate)) {
+#ifdef DEBUG
+ ASSERT(source->IsAscii());
+#endif
+ Handle<String> script_name = Factory::NewStringFromUtf8(name);
+ boilerplate =
+ Compiler::Compile(source, script_name, 0, 0, extension, NULL);
+ if (boilerplate.is_null()) return false;
+ cache->Add(name, boilerplate);
+ }
+
+ // Setup the function context. Conceptually, we should clone the
+ // function before overwriting the context but since we're in a
+ // single-threaded environment it is not strictly necessary.
+ ASSERT(Top::context()->IsGlobalContext());
+ Handle<Context> context =
+ Handle<Context>(use_runtime_context
+ ? Top::context()->runtime_context()
+ : Top::context());
+ Handle<JSFunction> fun =
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
+
+ // Call function using the either the runtime object or the global
+ // object as the receiver. Provide no parameters.
+ Handle<Object> receiver =
+ Handle<Object>(use_runtime_context
+ ? Top::context()->builtins()
+ : Top::context()->global());
+ bool has_pending_exception;
+ Handle<Object> result =
+ Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ if (has_pending_exception) return false;
+ return PendingFixups::Process(
+ Handle<JSBuiltinsObject>(Top::context()->builtins()));
+}
+
+
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<String> var##_name = Factory::LookupAsciiSymbol(name); \
+ global_context()->set_##var(Type::cast(global_context()-> \
+ builtins()-> \
+ GetProperty(*var##_name)));
+
+void Genesis::InstallNativeFunctions() {
+ HandleScope scope;
+ INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+ INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
+ INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
+ INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
+ INSTALL_NATIVE(JSFunction, "ToObject", to_object_fun);
+ INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
+ INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
+ INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
+ INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
+ INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
+ INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
+ configure_instance_fun);
+ INSTALL_NATIVE(JSFunction, "MakeMessage", make_message_fun);
+ INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
+ INSTALL_NATIVE(JSObject, "functionCache", function_cache);
+}
+
+#undef INSTALL_NATIVE
+
+
+bool Genesis::InstallNatives() {
+ HandleScope scope;
+
+ // Create a function for the builtins object. Allocate space for the
+ // JavaScript builtins, a reference to the builtins object
+ // (itself) and a reference to the global_context directly in the object.
+ Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<JSFunction> builtins_fun =
+ Factory::NewFunction(Factory::empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
+ JSBuiltinsObject::kSize, code, true);
+
+ Handle<String> name = Factory::LookupAsciiSymbol("builtins");
+ builtins_fun->shared()->set_instance_class_name(*name);
+ SetExpectedNofProperties(builtins_fun, 100);
+
+ // Allocate the builtins object.
+ Handle<JSBuiltinsObject> builtins =
+ Handle<JSBuiltinsObject>::cast(Factory::NewJSObject(builtins_fun,
+ TENURED));
+ builtins->set_builtins(*builtins);
+ builtins->set_global_context(*global_context());
+
+ // Setup the 'global' properties of the builtins object. The
+ // 'global' property that refers to the global object is the only
+ // way to get from code running in the builtins context to the
+ // global object.
+ static const PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ SetProperty(builtins, Factory::LookupAsciiSymbol("global"),
+ Handle<Object>(global_context()->global()), attributes);
+
+ // Setup the reference from the global object to the builtins object.
+ JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
+
+ // Create a bridge function that has context in the global context.
+ Handle<JSFunction> bridge =
+ Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
+ ASSERT(bridge->context() == *Top::global_context());
+
+ // Allocate the builtins context.
+ Handle<Context> context =
+ Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+ context->set_global(*builtins); // override builtins global object
+
+ global_context()->set_runtime_context(*context);
+
+ { // -- S c r i p t
+ // Builtin functions for Script.
+ Handle<JSFunction> script_fun =
+ InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
+ Top::initial_object_prototype(), Builtins::Illegal,
+ false);
+ Handle<JSObject> prototype =
+ Factory::NewJSObject(Top::object_function(), TENURED);
+ SetPrototype(script_fun, prototype);
+ global_context()->set_script_function(*script_fun);
+
+ // Add 'source' and 'data' property to scripts.
+ PropertyAttributes common_attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ Handle<Proxy> proxy_source = Factory::NewProxy(&Accessors::ScriptSource);
+ Handle<DescriptorArray> script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ Factory::empty_descriptor_array(),
+ Factory::LookupAsciiSymbol("source"),
+ proxy_source,
+ common_attributes);
+ Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptName);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("name"),
+ proxy_data,
+ common_attributes);
+ Handle<Proxy> proxy_line_offset =
+ Factory::NewProxy(&Accessors::ScriptLineOffset);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("line_offset"),
+ proxy_line_offset,
+ common_attributes);
+ Handle<Proxy> proxy_column_offset =
+ Factory::NewProxy(&Accessors::ScriptColumnOffset);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("column_offset"),
+ proxy_column_offset,
+ common_attributes);
+ Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("type"),
+ proxy_type,
+ common_attributes);
+
+ Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+ script_map->set_instance_descriptors(*script_descriptors);
+
+ // Allocate the empty script.
+ Handle<Script> script = Factory::NewScript(Factory::empty_string());
+ global_context()->set_empty_script(*script);
+ }
+
+ if (FLAG_natives_file == NULL) {
+ // Without natives file, install default natives.
+ for (int i = Natives::GetDelayCount();
+ i < Natives::GetBuiltinsCount();
+ i++) {
+ if (!CompileBuiltin(i)) return false;
+ }
+
+ // Setup natives with lazy loading.
+ SetupLazy(Handle<JSFunction>(global_context()->date_function()),
+ Natives::GetIndex("date"),
+ Top::global_context(),
+ Handle<Context>(Top::context()->runtime_context()),
+ Handle<Context>(Top::security_context()));
+ SetupLazy(Handle<JSFunction>(global_context()->regexp_function()),
+ Natives::GetIndex("regexp"),
+ Top::global_context(),
+ Handle<Context>(Top::context()->runtime_context()),
+ Handle<Context>(Top::security_context()));
+
+ } else if (strlen(FLAG_natives_file) != 0) {
+ // Otherwise install natives from natives file if file exists and
+ // compiles.
+ bool exists;
+ Vector<const char> source = ReadFile(FLAG_natives_file, &exists);
+ Handle<String> source_string = Factory::NewStringFromAscii(source);
+ if (source.is_empty()) return false;
+ bool result = CompileNative(CStrVector(FLAG_natives_file), source_string);
+ if (!result) return false;
+
+ } else {
+ // Empty natives file name - do not install any natives.
+ PrintF("Warning: Running without installed natives!\n");
+ return true;
+ }
+
+ InstallNativeFunctions();
+
+#ifndef USE_OLD_CALLING_CONVENTIONS
+ // TODO(1240778): Get rid of the JS implementation of
+ // Function.prototype.call and simply create a function with the
+ // faked formal parameter count (-1) and use the illegal builtin as
+ // the code for it.
+
+ // Find Function.prototype.call and set it's number of formal
+ // parameters to -1 to let the arguments adaptor handle it
+ // specially.
+ { Handle<JSFunction> function =
+ Handle<JSFunction>::cast(GetProperty(Top::global(),
+ Factory::function_class_symbol()));
+ Handle<JSObject> proto =
+ Handle<JSObject>(JSObject::cast(function->instance_prototype()));
+ Handle<JSFunction> call =
+ Handle<JSFunction>::cast(GetProperty(proto, Factory::call_symbol()));
+ call->shared()->set_formal_parameter_count(-1);
+
+ // Make sure that Function.prototype.call appears to be compiled.
+ // The code will never be called, but inline caching for call will
+ // only work if it appears to be compiled.
+ call->shared()->set_code(Builtins::builtin(Builtins::Illegal));
+ ASSERT(call->is_compiled());
+
+ // Use the specialized builtin for Function.prototype.apply.
+ Handle<JSFunction> apply =
+ Handle<JSFunction>::cast(GetProperty(proto, Factory::apply_symbol()));
+ apply->shared()->set_code(Builtins::builtin(Builtins::FunctionApply));
+ }
+#endif
+
+ // Make sure that the builtins object has fast properties.
+ // If the ASSERT below fails, please increase the expected number of
+ // properties for the builtins object.
+ ASSERT(builtins->HasFastProperties());
+#ifdef DEBUG
+ builtins->Verify();
+#endif
+ return true;
+}
+
+
+bool Genesis::InstallExtensions(v8::ExtensionConfiguration* extensions) {
+ // Clear coloring of extension list
+ v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
+ while (current != NULL) {
+ current->set_state(v8::UNVISITED);
+ current = current->next();
+ }
+ // Install auto extensions
+ current = v8::RegisteredExtension::first_extension();
+ while (current != NULL) {
+ if (current->extension()->auto_enable())
+ InstallExtension(current);
+ current = current->next();
+ }
+
+ if (FLAG_expose_gc) InstallExtension("v8/gc");
+
+ if (extensions == NULL) return true;
+ // Install required extensions
+ int count = v8::ImplementationUtilities::GetNameCount(extensions);
+ const char** names = v8::ImplementationUtilities::GetNames(extensions);
+ for (int i = 0; i < count; i++) {
+ if (!InstallExtension(names[i]))
+ return false;
+ }
+
+ return true;
+}
+
+
+// Installs a named extension. This methods is unoptimized and does
+// not scale well if we want to support a large number of extensions.
+bool Genesis::InstallExtension(const char* name) {
+ v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
+ // Loop until we find the relevant extension
+ while (current != NULL) {
+ if (strcmp(name, current->extension()->name()) == 0) break;
+ current = current->next();
+ }
+ // Didn't find the extension; fail.
+ if (current == NULL) {
+ v8::Utils::ReportApiFailure(
+ "v8::Context::New()", "Cannot find required extension");
+ return false;
+ }
+ return InstallExtension(current);
+}
+
+
+bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
+ HandleScope scope;
+
+ if (current->state() == v8::INSTALLED) return true;
+ // The current node has already been visited so there must be a
+ // cycle in the dependency graph; fail.
+ if (current->state() == v8::VISITED) {
+ v8::Utils::ReportApiFailure(
+ "v8::Context::New()", "Circular extension dependency");
+ return false;
+ }
+ ASSERT(current->state() == v8::UNVISITED);
+ current->set_state(v8::VISITED);
+ v8::Extension* extension = current->extension();
+ // Install the extension's dependencies
+ for (int i = 0; i < extension->dependency_count(); i++) {
+ if (!InstallExtension(extension->dependencies()[i])) return false;
+ }
+ Vector<const char> source = CStrVector(extension->source());
+ Handle<String> source_code = Factory::NewStringFromAscii(source);
+ bool result = CompileScriptCached(CStrVector(extension->name()),
+ source_code,
+ &extensions_cache, extension,
+ false);
+ ASSERT(Top::has_pending_exception() != result);
+ if (!result) {
+ Top::clear_pending_exception();
+ v8::Utils::ReportApiFailure(
+ "v8::Context::New()", "Error installing extension");
+ }
+ current->set_state(v8::INSTALLED);
+ return result;
+}
+
+
+bool Genesis::ConfigureGlobalObject(
+ v8::Handle<v8::ObjectTemplate> global_template) {
+ Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ if (!global_template.IsEmpty()) {
+ Handle<ObjectTemplateInfo> data = v8::Utils::OpenHandle(*global_template);
+ bool pending_exception = false;
+ Handle<JSObject> obj =
+ Execution::InstantiateObject(data, &pending_exception);
+ if (pending_exception) {
+ ASSERT(Top::has_pending_exception());
+ Top::clear_pending_exception();
+ return false;
+ }
+ TransferObject(obj, global);
+ }
+ return true;
+}
+
+
+void Genesis::TransferNamedProperties(Handle<JSObject> from,
+ Handle<JSObject> to) {
+ if (from->HasFastProperties()) {
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(from->map()->instance_descriptors());
+ int offset = 0;
+ while (true) {
+ // Iterating through the descriptors is not gc safe so we have to
+ // store the value in a handle and create a new stream for each entry.
+ DescriptorReader stream(*descs, offset);
+ if (stream.eos()) break;
+ // We have to read out the next offset before we do anything that may
+ // cause a gc, since the DescriptorReader is not gc safe.
+ offset = stream.next_position();
+ PropertyDetails details = stream.GetDetails();
+ switch (details.type()) {
+ case FIELD: {
+ HandleScope inner;
+ Handle<String> key = Handle<String>(stream.GetKey());
+ int index = stream.GetFieldIndex();
+ Handle<Object> value = Handle<Object>(from->properties()->get(index));
+ SetProperty(to, key, value, details.attributes());
+ break;
+ }
+ case CONSTANT_FUNCTION: {
+ HandleScope inner;
+ Handle<String> key = Handle<String>(stream.GetKey());
+ Handle<JSFunction> fun =
+ Handle<JSFunction>(stream.GetConstantFunction());
+ SetProperty(to, key, fun, details.attributes());
+ break;
+ }
+ case CALLBACKS: {
+ LookupResult result;
+ to->LocalLookup(stream.GetKey(), &result);
+ // If the property is already there we skip it
+ if (result.IsValid()) continue;
+ HandleScope inner;
+ Handle<DescriptorArray> inst_descs =
+ Handle<DescriptorArray>(to->map()->instance_descriptors());
+ Handle<String> key = Handle<String>(stream.GetKey());
+ Handle<Object> entry = Handle<Object>(stream.GetCallbacksObject());
+ inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs,
+ key,
+ entry,
+ details.attributes());
+ to->map()->set_instance_descriptors(*inst_descs);
+ break;
+ }
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ // Ignore map transitions.
+ break;
+ case NORMAL:
+ // Do not occur since the from object has fast properties.
+ case INTERCEPTOR:
+ // No element in instance descriptors have interceptor type.
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ Handle<Dictionary> properties =
+ Handle<Dictionary>(from->property_dictionary());
+ int capacity = properties->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* raw_key(properties->KeyAt(i));
+ if (properties->IsKey(raw_key)) {
+ ASSERT(raw_key->IsString());
+ // If the property is already there we skip it.
+ LookupResult result;
+ to->LocalLookup(String::cast(raw_key), &result);
+ if (result.IsValid()) continue;
+ // Set the property.
+ Handle<String> key = Handle<String>(String::cast(raw_key));
+ Handle<Object> value = Handle<Object>(properties->ValueAt(i));
+ PropertyDetails details = properties->DetailsAt(i);
+ SetProperty(to, key, value, details.attributes());
+ }
+ }
+ }
+}
+
+
+void Genesis::TransferIndexedProperties(Handle<JSObject> from,
+ Handle<JSObject> to) {
+ // Cloning the elements array is sufficient.
+ Handle<FixedArray> from_elements =
+ Handle<FixedArray>(FixedArray::cast(from->elements()));
+ Handle<FixedArray> to_elements = Factory::CopyFixedArray(from_elements);
+ to->set_elements(*to_elements);
+}
+
+
+void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
+ HandleScope outer;
+
+ ASSERT(!from->IsJSArray());
+ ASSERT(!to->IsJSArray());
+
+ TransferNamedProperties(from, to);
+ TransferIndexedProperties(from, to);
+
+ // Transfer the prototype (new map is needed).
+ Handle<Map> old_to_map = Handle<Map>(to->map());
+ Handle<Map> new_to_map = Factory::CopyMap(old_to_map);
+ new_to_map->set_prototype(from->map()->prototype());
+ to->set_map(*new_to_map);
+}
+
+
+void Genesis::MakeFunctionInstancePrototypeWritable() {
+ // Make a new function map so all future functions
+ // will have settable prototype properties.
+ HandleScope scope;
+
+ Handle<DescriptorArray> function_map_descriptors =
+ ComputeFunctionInstanceDescriptor(false);
+ Handle<Map> fm = Factory::CopyMap(Top::function_map());
+ fm->set_instance_descriptors(*function_map_descriptors);
+ Top::context()->global_context()->set_function_map(*fm);
+}
+
+
+void Genesis::AddSpecialFunction(Handle<JSObject> prototype,
+ const char* name,
+ Handle<Code> code,
+ int parameter_count) {
+ Handle<String> key = Factory::LookupAsciiSymbol(name);
+ Handle<Object> value = Handle<Object>(prototype->GetProperty(*key));
+ if (value->IsJSFunction()) {
+ Handle<JSFunction> optimized = Factory::NewFunction(key,
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize,
+ code,
+ false);
+ optimized->shared()->set_formal_parameter_count(parameter_count);
+ int len = global_context()->special_function_table()->length();
+ Handle<FixedArray> new_array = Factory::NewFixedArray(len + 3);
+ for (int index = 0; index < len; index++) {
+ new_array->set(index,
+ global_context()->special_function_table()->get(index));
+ }
+ new_array->set(len+0, *prototype);
+ new_array->set(len+1, *value);
+ new_array->set(len+2, *optimized);
+ global_context()->set_special_function_table(*new_array);
+ }
+}
+
+
+void Genesis::BuildSpecialFunctionTable() {
+ HandleScope scope;
+ Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ // Add special versions for Array.prototype.pop and push.
+ Handle<JSFunction> function =
+ Handle<JSFunction>(
+ JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
+ Handle<JSObject> prototype =
+ Handle<JSObject>(JSObject::cast(function->prototype()));
+ AddSpecialFunction(prototype, "pop",
+ Handle<Code>(Builtins::builtin(Builtins::ArrayPop)),
+ 0);
+ AddSpecialFunction(prototype, "push",
+ Handle<Code>(Builtins::builtin(Builtins::ArrayPush)),
+ 1);
+}
+
+
+Genesis::Genesis(Handle<Object> global_object,
+ v8::Handle<v8::ObjectTemplate> global_template,
+ v8::ExtensionConfiguration* extensions) {
+ // Link this genesis object into the stacked genesis chain. This
+ // must be done before any early exits because the deconstructor
+ // will always do unlinking.
+ previous_ = current_;
+ current_ = this;
+ result_ = NULL;
+
+ // If V8 hasn't been and cannot be initialized, just return.
+ if (!V8::HasBeenSetup() && !V8::Initialize(NULL)) return;
+
+ // Before creating the roots we must save the context and restore it
+ // on all function exits.
+ HandleScope scope;
+ SaveContext context;
+
+ CreateRoots(global_template, global_object);
+ if (!InstallNatives()) return;
+
+ MakeFunctionInstancePrototypeWritable();
+ BuildSpecialFunctionTable();
+ if (!ConfigureGlobalObject(global_template)) return;
+
+ if (!InstallExtensions(extensions)) return;
+
+ result_ = global_context_;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_BOOTSTRAPPER_H_
+#define V8_BOOTSTRAPPER_H_
+
+namespace v8 { namespace internal {
+
+// The Boostrapper is the public interface for creating a JavaScript global
+// context.
+class Bootstrapper : public AllStatic {
+ public:
+ // Requires: Heap::Setup has been called.
+ static void Initialize(bool create_heap_objects);
+ static void TearDown();
+
+ // Creates a JavaScript Global Context with initial object graph.
+ // The returned value is a global handle casted to V8Environment*.
+ static Handle<Context> CreateEnvironment(
+ Handle<Object> global_object,
+ v8::Handle<v8::ObjectTemplate> global_template,
+ v8::ExtensionConfiguration* extensions);
+
+ // Traverses the pointers for memory manangment.
+ static void Iterate(ObjectVisitor* v);
+
+ // Accessors for the native scripts cache. Used in lazy loading.
+ static Handle<String> NativesSourceLookup(int index);
+ static bool NativesCacheLookup(Vector<const char> name,
+ Handle<JSFunction>* handle);
+ static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
+
+ // Append code that needs fixup at the end of boot strapping.
+ static void AddFixup(Code* code, MacroAssembler* masm);
+
+ // Tells wheter boostrapping is active.
+ static bool IsActive();
+
+ // Encoding/decoding support for fixup flags.
+ class FixupFlagsIsPCRelative: public BitField<bool, 0, 1> {};
+ class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
+};
+
+}} // namespace v8::internal
+
+#endif // V8_BOOTSTRAPPER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 { namespace internal {
+
+
+#define __ masm->
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ int argc,
+ CFunctionId id) {
+ __ JumpToBuiltin(ExternalReference(id));
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+ // r0: number of arguments
+
+ __ EnterJSFrame(0, 0);
+
+ // Allocate the new receiver object.
+ __ push(r0);
+ __ ldr(r0, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ push(r0); // empty TOS cache
+
+ // Push the function and the allocated receiver from the stack.
+ __ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(r1); // function
+ __ push(r0); // receiver
+
+ // Restore the arguments length from the stack.
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
+
+ // Setup pointer to last argument - receiver is not counted.
+ __ sub(r2, pp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r2, r2, Operand(kPointerSize));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(r1, Operand(r0));
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r3, MemOperand(r2, r1, LSL, kPointerSizeLog2));
+ __ push(r3);
+ __ bind(&entry);
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ b(ge, &loop);
+
+ // Get the function to call from the stack and get the code from it.
+ __ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Call the function.
+ Label return_site;
+ __ RecordPosition(position);
+ __ Call(r1);
+ __ bind(&return_site);
+
+ // Restore context from the frame and discard the function.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ add(sp, sp, Operand(kPointerSize));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // JS_OBJECT type, it is not an object in the ECMA sense.
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_OBJECT_TYPE));
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ __ ExitJSFrame(RETURN, 0);
+
+ // Compute the offset from the beginning of the JSConstructCall
+ // builtin code object to the return address after the call.
+ ASSERT(return_site.is_bound());
+ construct_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from Generate_JS_Entry
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ // r5-r7, cp may be clobbered
+
+ // Enter the JS frame
+ // compute parameter pointer before making changes
+ __ mov(ip, Operand(sp)); // ip == caller_sp == new pp
+
+ __ mov(r5, Operand(0)); // spare slot to store caller code object during GC
+ __ mov(r6, Operand(0)); // no context
+ __ mov(r7, Operand(0)); // no incoming parameters
+ __ mov(r8, Operand(0)); // caller_pp == NULL for trampoline frames
+ ASSERT(cp.bit() == r8.bit()); // adjust the code otherwise
+
+ // push in reverse order:
+ // code (r5==0), context (r6==0), args_len (r7==0), caller_pp (r8==0),
+ // caller_fp, sp_on_exit (caller_sp), caller_pc
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit() |
+ fp.bit() | ip.bit() | lr.bit());
+ // Setup new frame pointer.
+ __ add(fp, sp, Operand(-StandardFrameConstants::kCodeOffset));
+ __ mov(pp, Operand(ip)); // setup new parameter pointer
+
+ // Setup the context from the function argument.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ mov(r5, Operand(r1)); // change save order: function above receiver
+ __ stm(db_w, sp, r2.bit() | r5.bit());
+
+ // Copy arguments to the stack in a loop.
+ // r3: argc
+ // r4: argv, i.e. points to first arg
+ Label loop, entry;
+ __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+ // r2 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r1, MemOperand(r1)); // dereference handle
+ __ push(r1); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, Operand(r2));
+ __ b(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ mov(r4, Operand(Factory::undefined_value()));
+ __ mov(r5, Operand(r4));
+ __ mov(r6, Operand(r4));
+ __ mov(r7, Operand(r4));
+ if (kR9Available == 1)
+ __ mov(r9, Operand(r4));
+
+ // Invoke the code and pass argc as r0.
+ if (is_construct) {
+ __ mov(r0, Operand(r3));
+ __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ code_target);
+ } else {
+ __ mov(ip, Operand(r0));
+ __ mov(r0, Operand(r3));
+ __ Call(ip);
+ }
+
+ // Exit the JS frame and remove the parameters (except function), and return.
+ // Respect ABI stack constraint.
+ __ add(sp, fp, Operand(StandardFrameConstants::kCallerFPOffset));
+ __ ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+
+ // r0: result
+ // pp: not restored, should not be used anymore
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ // TODO(1233523): Implement. Unused for now.
+ __ int3();
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // TODO(1233523): Implement. Unused for now.
+ __ int3();
+
+ Label return_site;
+ __ bind(&return_site);
+
+ // Compute the offset from the beginning of the ArgumentsAdaptorTrampoline
+ // builtin code object to the return address after the call.
+ ASSERT(return_site.is_bound());
+ arguments_adaptor_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
+}
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList pointer_regs) {
+ // Save the content of all general purpose registers in memory. This copy in
+ // memory is later pushed onto the JS expression stack for the fake JS frame
+ // generated and also to the C frame generated on top of that. In the JS
+ // frame ONLY the registers containing pointers will be pushed on the
+ // expression stack. This causes the GC to update these pointers so that
+ // they will have the correct value when returning from the debugger.
+ __ SaveRegistersToMemory(kJSCallerSaved);
+
+ // This is a direct call from a debug breakpoint. To build a fake JS frame
+ // with no parameters push a function and a receiver, keep the current
+ // return address in lr, and set r0 to zero.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r3, MemOperand(ip));
+ __ mov(r0, Operand(0)); // Null receiver and zero arguments.
+ __ stm(db_w, sp, r0.bit() | r3.bit()); // push function and receiver
+
+ // r0: number of arguments.
+ // What follows is an inlined version of EnterJSFrame(0, 0).
+ // It needs to be kept in sync if any calling conventions are changed.
+
+ // Compute parameter pointer before making changes
+ // ip = sp + kPointerSize*(args_len+1); // +1 for receiver, args_len == 0
+ __ add(ip, sp, Operand(kPointerSize));
+
+ __ mov(r3, Operand(0)); // args_len to be saved
+ __ mov(r2, Operand(cp)); // context to be saved
+
+ // push in reverse order: context (r2), args_len (r3), caller_pp, caller_fp,
+ // sp_on_exit (ip == pp), return address, prolog_pc
+ __ stm(db_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit() |
+ ip.bit() | lr.bit() | pc.bit());
+ // Setup new frame pointer.
+ __ add(fp, sp, Operand(-StandardFrameConstants::kContextOffset));
+ __ mov(pp, Operand(ip)); // setup new parameter pointer
+ // r0 is already set to 0 as spare slot to store caller code object during GC
+
+ // Inlined EnterJSFrame ends here.
+
+ // Empty top-of-stack cache (code pointer).
+ __ push(r0);
+
+ // Store the registers containing object pointers on the expression stack to
+ // make sure that these are correctly updated during GC.
+ // Use sp as base to push.
+ __ CopyRegistersFromMemoryToStack(sp, pointer_regs);
+
+ // Empty top-of-stack cache (fake receiver).
+ __ push(r0);
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ // r0 is already 0, no arguments
+ __ mov(r1, Operand(ExternalReference::debug_break()));
+
+ CEntryDebugBreakStub ceb;
+ __ CallStub(&ceb);
+
+ // Restore the register values containing object pointers from the expression
+ // stack in the reverse order as they where pushed.
+ // Use sp as base to pop.
+ __ CopyRegistersFromStackToMemory(sp, r3, pointer_regs);
+
+ // What follows is an inlined version of ExitJSFrame(0).
+ // It needs to be kept in sync if any calling conventions are changed.
+ // NOTE: loading the return address to lr and discarding the (fake) function
+ // is an addition to this inlined copy.
+
+ __ mov(sp, Operand(fp)); // respect ABI stack constraint
+ __ ldm(ia, sp, pp.bit() | fp.bit() | sp.bit() | lr.bit());
+ __ add(sp, sp, Operand(kPointerSize)); // discard fake function
+
+ // Inlined ExitJSFrame ends here.
+
+ // Finally restore all registers.
+ __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ __ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget())));
+ __ ldr(ip, MemOperand(ip));
+ __ Jump(ip);
+}
+
+
+void Builtins::Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers r0 and r2 contain objects that needs to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+}
+
+
+void Builtins::Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers r0 and r2 contain objects that needs to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+}
+
+
+void Builtins::Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
+ // Keyed load IC not implemented on ARM.
+}
+
+
+void Builtins::Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
+ // Keyed store IC ont implemented on ARM.
+}
+
+
+void Builtins::Generate_CallIC_DebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC call (from ic-arm.cc)
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+ // Register r1 contains an object that needs to be pushed on the expression
+ // stack of the fake JS frame. r0 is the actual number of arguments not
+ // encoded as a smi, therefore it cannot be on the expression stack of the
+ // fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be
+ // pushed on the stack of the C frame and restored from there.
+ Generate_DebugBreakCallHelper(masm, r1.bit());
+}
+
+
+void Builtins::Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, r0.bit());
+}
+
+
+void Builtins::Generate_Return_DebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, r0.bit());
+}
+
+
+void Builtins::Generate_Return_DebugBreakEntry(MacroAssembler* masm) {
+ // Generate nothing as this handling of debug break return is not done this
+ // way on ARM - yet.
+}
+
+void Builtins::Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
+ // Generate nothing as CodeStub CallFunction is not used on ARM.
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 { namespace internal {
+
+
+#define __ masm->
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ int argc,
+ CFunctionId id) {
+ __ mov(eax, argc);
+ __ mov(Operand::StaticVariable(ExternalReference::builtin_passed_function()),
+ edi);
+ __ JumpToBuiltin(ExternalReference(id));
+}
+
+
+DEFINE_bool(inline_new, true, "use fast inline allocation");
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Store a smi-tagged arguments count on the stack.
+ __ shl(eax, kSmiTagSize);
+ __ push(eax);
+
+ // Push the function to invoke on the stack.
+ __ push(edi);
+
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address();
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(not_equal, &rt_call);
+ // Check that function is not a Smi.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &rt_call);
+ // Check that function is a JSFunction
+ __ mov(eax, FieldOperand(edi, JSFunction::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(eax, JS_FUNCTION_TYPE);
+ __ j(not_equal, &rt_call);
+
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // edi: constructor
+ __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ mov(ebx, FieldOperand(eax, JSFunction::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ebx, MAP_TYPE);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(ebx, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ // Make sure that the maximum heap object size will never cause us
+ // problem here, because it is always greater than the maximum
+ // instance size that can be represented in a byte.
+ ASSERT(Heap::MaxHeapObjectSize() >= (1 << kBitsPerByte));
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ __ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
+ __ add(edi, Operand(ebx)); // Calculate new top
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
+ __ j(greater_equal, &rt_call);
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(Operand(ecx), Factory::empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ // Set extra fields in the newly allocated object.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ { Label loop, entry;
+ __ mov(Operand(edx), Factory::undefined_value());
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(ecx, 0), edx);
+ __ add(Operand(ecx), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(ecx, Operand(edi));
+ __ j(less, &loop);
+ }
+
+ // Mostly done with the JSObject. Add the heap tag and store the new top, so
+ // that we can continue and jump into the continuation code at any time from
+ // now on. Any failures need to undo the setting of the new top, so that the
+ // heap is in a consistent state and verifiable.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ or_(Operand(ebx), Immediate(kHeapObjectTag));
+ __ mov(Operand::StaticVariable(new_space_allocation_top), edi);
+
+ // Check if a properties array should be setup and allocate one if needed.
+ // Otherwise initialize the properties to the empty_fixed_array as well.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ test(edx, Operand(edx));
+ // Done if no unused properties are to be allocated.
+ __ j(zero, &allocated);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object (will be start of FixedArray)
+ // edx: number of elements in properties array
+ ASSERT(Heap::MaxHeapObjectSize() >
+ (FixedArray::kHeaderSize + 255*kPointerSize));
+ __ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize));
+ __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
+ __ j(greater_equal, &undo_allocation);
+ __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+
+ // Initialize the FixedArray.
+ // ebx: JSObject
+ // edi: FixedArray
+ // edx: number of elements
+ // ecx: start of next object
+ __ mov(eax, Factory::fixed_array_map());
+ __ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
+ __ mov(Operand(edi, Array::kLengthOffset), edx); // and length
+
+ // Initialize the fields to undefined.
+ // ebx: JSObject
+ // edi: FixedArray
+ // ecx: start of next object
+ { Label loop, entry;
+ __ mov(Operand(edx), Factory::undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(eax, 0), edx);
+ __ add(Operand(eax), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(eax, Operand(ecx));
+ __ j(less, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // ebx: JSObject
+ // edi: FixedArray
+ __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag
+ __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // ebx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ xor_(Operand(ebx), Immediate(kHeapObjectTag)); // clear the heap tag
+ __ mov(Operand::StaticVariable(new_space_allocation_top), ebx);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ // edi: function (constructor)
+ __ bind(&rt_call);
+ // Must restore edi (constructor) before calling runtime.
+ __ mov(edi, Operand(esp, 0));
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(ebx, Operand(eax)); // store result in ebx
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(edi);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ shr(eax, kSmiTagSize);
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+
+ // Setup pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, Operand(eax));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ Label return_site;
+ __ RecordPosition(position);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ __ bind(&return_site);
+
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &use_receiver, not_taken);
+
+ // If the type of the result (stored in its map) is less than
+ // JS_OBJECT type, it is not an object in the ECMA sense.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, JS_OBJECT_TYPE);
+ __ j(greater_equal, &exit, not_taken);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and exit the internal frame.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, kPointerSize)); // get arguments count
+ __ ExitFrame(StackFrame::INTERNAL);
+
+ // Remove caller arguments from the stack and return.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+ __ ret(0);
+
+ // Compute the offset from the beginning of the JSConstructCall
+ // builtin code object to the return address after the call.
+ ASSERT(return_site.is_bound());
+ construct_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Clear the context before we push it when entering the JS frame.
+ __ xor_(esi, Operand(esi)); // clear esi
+
+ // Enter an internal frame.
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
+
+ // Get the function from the frame and setup the context.
+ __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+ __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+ __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+ // Copy arguments to the stack in a loop.
+ Label loop, entry;
+ __ xor_(ecx, Operand(ecx)); // clear ecx
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
+ __ push(Operand(edx, 0)); // dereference handle
+ __ inc(Operand(ecx));
+ __ bind(&entry);
+ __ cmp(ecx, Operand(eax));
+ __ j(not_equal, &loop);
+
+ // Get the function from the stack and call it.
+ __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver
+
+ // Invoke the code.
+ if (is_construct) {
+ __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ code_target);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ }
+
+ // Exit the JS frame. Notice that this also removes the empty
+ // context and the function left on the stack by the code
+ // invocation.
+ __ ExitFrame(StackFrame::INTERNAL);
+ __ ret(1 * kPointerSize); // remove receiver
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(Operand(ebp, 2 * kPointerSize)); // push arguments
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Eagerly check for stack-overflow before pushing all the arguments
+ // to the stack.
+ Label okay;
+ __ lea(ecx, Operand(esp, -3 * kPointerSize)); // receiver, limit, index
+ __ mov(edx, Operand(eax));
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ __ sub(ecx, Operand(edx));
+ ExternalReference stack_guard_limit_address =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(ecx, Operand::StaticVariable(stack_guard_limit_address));
+ __ j(greater, &okay, taken);
+
+ // Too bad: Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(eax); // limit
+ __ push(Immediate(0)); // index
+
+ // Change context eagerly to get the right global object if
+ // necessary.
+ __ mov(edi, Operand(ebp, 4 * kPointerSize));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ mov(ebx, Operand(ebp, 3 * kPointerSize));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &call_to_object);
+ __ cmp(ebx, Factory::null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(equal, &use_global_receiver);
+
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &call_to_object);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ j(less_equal, &push_receiver);
+
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, Operand(eax));
+ __ jmp(&push_receiver);
+
+ // Use the current global object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(ebx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(ecx, Operand(ebp, 2 * kPointerSize)); // load arguments
+ __ push(ecx);
+ __ push(eax);
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, code_target);
+
+ // Remove IC arguments from the stack and push the nth argument.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(eax);
+
+ // Update the index on the stack and in register eax.
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ add(Operand(eax), Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, kIndexOffset), eax);
+
+ __ bind(&entry);
+ __ cmp(eax, Operand(ebp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Invoke the function.
+ ParameterCount actual(eax);
+ __ shr(eax, kSmiTagSize);
+ __ mov(edi, Operand(ebp, 4 * kPointerSize));
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+
+ __ ExitFrame(StackFrame::INTERNAL);
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
+
+ // Store the arguments adaptor context sentinel.
+ __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+
+ // Push the function on the stack.
+ __ push(edi);
+
+ // Preserve the number of arguments on the stack. Must preserve both
+ // eax and ebx because these registers are used when copying the
+ // arguments and the receiver.
+ ASSERT(kSmiTagSize == 1);
+ __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
+ __ push(Operand(ecx));
+}
+
+
+static void ExitArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // Retrieve the number of arguments from the stack.
+ __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Leave the frame.
+ __ leave();
+
+ // Remove caller arguments from the stack.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : actual number of arguments
+ // -- ebx : expected number of arguments
+ // -- edx : code entry to call
+ // -----------------------------------
+
+ Label entry, invoke, function_prototype_call;
+ __ bind(&entry);
+ __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+ Label enough, too_few;
+ __ cmp(eax, Operand(ebx));
+ __ j(less, &too_few);
+ __ cmp(ebx, -1);
+ __ j(equal, &function_prototype_call);
+
+ { // Enough parameters: Actual >= expected.
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all expected arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(eax, Operand(ebp, eax, times_4, offset));
+ __ mov(ecx, -1); // account for receiver
+
+ Label copy;
+ __ bind(©);
+ __ inc(ecx);
+ __ push(Operand(eax, 0));
+ __ sub(Operand(eax), Immediate(kPointerSize));
+ __ cmp(ecx, Operand(ebx));
+ __ j(less, ©);
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(edi, Operand(ebp, eax, times_4, offset));
+ __ mov(ecx, -1); // account for receiver
+
+ Label copy;
+ __ bind(©);
+ __ inc(ecx);
+ __ push(Operand(edi, 0));
+ __ sub(Operand(edi), Immediate(kPointerSize));
+ __ cmp(ecx, Operand(eax));
+ __ j(less, ©);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ bind(&fill);
+ __ inc(ecx);
+ __ push(Immediate(Factory::undefined_value()));
+ __ cmp(ecx, Operand(ebx));
+ __ j(less, &fill);
+
+ // Restore function pointer.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
+ // Mark the adaptor frame as special by overwriting the context slot
+ // in the stack with a sentinel.
+ Label return_site;
+ __ bind(&invoke);
+ __ call(Operand(edx));
+ __ bind(&return_site);
+
+ ExitArgumentsAdaptorFrame(masm);
+ __ ret(0);
+
+ // Compute the offset from the beginning of the ArgumentsAdaptorTrampoline
+ // builtin code object to the return address after the call.
+ ASSERT(return_site.is_bound());
+ arguments_adaptor_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
+
+
+ // -------------------------------------------
+ // Function.prototype.call implementation.
+ // -------------------------------------------
+ __ bind(&function_prototype_call);
+
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &done, taken);
+ __ pop(ebx);
+ __ push(Immediate(Factory::undefined_value()));
+ __ push(ebx);
+ __ inc(eax);
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call from the stack.
+ { Label done, non_function, function;
+ // +1 ~ return address.
+ __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &non_function, not_taken);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset)); // get the map
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, JS_FUNCTION_TYPE);
+ __ j(equal, &function, taken);
+
+ // Non-function called: Clear the function to force exception.
+ __ bind(&non_function);
+ __ xor_(edi, Operand(edi));
+ __ jmp(&done);
+
+ // Function called: Change context eagerly to get the right global object.
+ __ bind(&function);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ __ bind(&done);
+ }
+
+ // 3. Make sure first argument is an object; convert if necessary.
+ { Label call_to_object, use_global_receiver, patch_receiver, done;
+ __ mov(ebx, Operand(esp, eax, times_4, 0));
+
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &call_to_object);
+
+ __ cmp(ebx, Factory::null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(equal, &use_global_receiver);
+
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &call_to_object);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ j(less_equal, &done);
+
+ __ bind(&call_to_object);
+ __ EnterFrame(StackFrame::INTERNAL); // preserves eax, ebx, edi
+
+ // Store the arguments count on the stack (smi tagged).
+ ASSERT(kSmiTag == 0);
+ __ shl(eax, kSmiTagSize);
+ __ push(eax);
+
+ __ push(edi); // save edi across the call
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(Operand(ebx), eax);
+ __ pop(edi); // restore edi after the call
+
+ // Get the arguments count and untag it.
+ __ pop(eax);
+ __ shr(eax, kSmiTagSize);
+
+ __ ExitFrame(StackFrame::INTERNAL);
+ __ jmp(&patch_receiver);
+
+ // Use the global object from the called function as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ mov(ebx, FieldOperand(esi, kGlobalIndex));
+
+ __ bind(&patch_receiver);
+ __ mov(Operand(esp, eax, times_4, 0), ebx);
+
+ __ bind(&done);
+ }
+
+ // 4. Shift stuff one slot down the stack.
+ { Label loop;
+ __ lea(ecx, Operand(eax, +1)); // +1 ~ copy receiver too
+ __ bind(&loop);
+ __ mov(ebx, Operand(esp, ecx, times_4, 0));
+ __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+ }
+
+ // 5. Remove TOS (copy of last arguments), but keep return address.
+ __ pop(ebx);
+ __ pop(ecx);
+ __ push(ebx);
+ __ dec(eax);
+
+ // 6. Check that function really was a function and get the code to
+ // call from the function and check that the number of expected
+ // arguments matches what we're providing.
+ { Label invoke;
+ __ test(edi, Operand(edi));
+ __ j(not_zero, &invoke, taken);
+ __ xor_(ebx, Operand(ebx));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ jmp(&enough);
+
+ __ bind(&invoke);
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ cmp(eax, Operand(ebx));
+ __ j(not_equal, &entry);
+ }
+
+ // 7. Jump (tail-call) to the code in register edx without checking arguments.
+ ParameterCount expected(0);
+ __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
+}
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList pointer_regs,
+ bool convert_call_to_jmp) {
+ // Save the content of all general purpose registers in memory. This copy in
+ // memory is later pushed onto the JS expression stack for the fake JS frame
+ // generated and also to the C frame generated on top of that. In the JS
+ // frame ONLY the registers containing pointers will be pushed on the
+ // expression stack. This causes the GC to update these pointers so that
+ // they will have the correct value when returning from the debugger.
+ __ SaveRegistersToMemory(kJSCallerSaved);
+
+ // Enter an internal frame.
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Store the registers containing object pointers on the expression stack to
+ // make sure that these are correctly updated during GC.
+ __ PushRegistersFromMemory(pointer_regs);
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Set(eax, Immediate(0)); // no arguments
+ __ push(eax); // fake receiver - use NULL
+ __ mov(Operand(ebx), Immediate(ExternalReference::debug_break()));
+
+ CEntryDebugBreakStub ceb;
+ __ CallStub(&ceb);
+
+ // Restore the register values containing object pointers from the expression
+ // stack in the reverse order as they where pushed.
+ __ PopRegistersToMemory(pointer_regs);
+
+ // Get rid of the internal frame.
+ __ ExitFrame(StackFrame::INTERNAL);
+
+ // If this call did not replace a call but patched other code then there will
+ // be an unwanted return address left on the stack. Here we get rid of that.
+ if (convert_call_to_jmp) {
+ __ pop(eax);
+ }
+
+ // Finally restore all registers.
+ __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference(Debug_Address::AfterBreakTarget());
+ __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void Builtins::Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
+ // Register state for IC load call (from ic-ia32.cc).
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, ecx.bit(), false);
+}
+
+
+void Builtins::Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
+ // REgister state for IC store call (from ic-ia32.cc).
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+}
+
+
+void Builtins::Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC load call (from ic-ia32.cc).
+ // ----------- S t a t e -------------
+ // No registers used on entry.
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Builtins::Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC load call (from ic-ia32.cc).
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -----------------------------------
+ // Register eax contains an object that needs to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, eax.bit(), false);
+}
+
+
+void Builtins::Generate_CallIC_DebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC call call (from ic-ia32.cc)
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Builtins::Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
+ // Register state just before return fron JS function (from codegen-ia32.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Builtins::Generate_Return_DebugBreak(MacroAssembler* masm) {
+ // Register state just before return from JS function (from codegen-ia32.cc).
+ // ----------- S t a t e -------------
+ // -- eax: return value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), true);
+}
+
+
+void Builtins::Generate_Return_DebugBreakEntry(MacroAssembler* masm) {
+ // OK to clobber ebx as we are returning from a JS function in the code
+ // generated by Ia32CodeGenerator::ExitJSFrame.
+ ExternalReference debug_break_return =
+ ExternalReference(Debug_Address::DebugBreakReturn());
+ __ mov(ebx, Operand::StaticVariable(debug_break_return));
+ __ add(Operand(ebx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(Operand(ebx));
+}
+
+
+void Builtins::Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
+ // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
+ // ----------- S t a t e -------------
+ // No registers used on entry.
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "builtins.h"
+#include "ic-inl.h"
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// Support macros for defining builtins in C.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+// BUILTIN_<n>(name, ...)
+// {
+// ...
+// }
+// BUILTIN_END
+//
+// where <n> is the number of arguments (not counting the receiver). The
+// names of the arguments must be listed after the name in the declaration.
+// In the body of the builtin function, the variables 'env' and 'receiver'
+// are visible. The arguments can be accessed through:
+//
+// BUILTIN_ARG(0): Receiver (also available as 'receiver')
+// BUILTIN_ARG(1): First argument
+// ...
+// BUILTIN_ARG(n): Last argument
+//
+// and they evaluate to undefined values if too few arguments were
+// passed to the builtin function invocation.
+// ----------------------------------------------------------------------------
+
+
+// TODO(1238487): This is not okay. We need to get rid of this macro
+// and start calling the builtins in a more direct way. Looking at the
+// stack frames for all builtin invocations comes with a pretty
+// significant performance penalty.
+#define BUILTIN_0(name) \
+ static Object* Builtin_##name(int __argc__, \
+ Object** __argv__) { \
+ Handle<Object> receiver(&__argv__[0]); \
+ bool is_construct = false; \
+ USE(__argc__); \
+ USE(__argv__); \
+ { StackFrameIterator it; \
+ ASSERT(it.frame()->is_exit()); \
+ it.Advance(); \
+ StackFrame::Type type = it.frame()->type(); \
+ if (type == StackFrame::INTERNAL) { \
+ InternalFrame* frame = InternalFrame::cast(it.frame()); \
+ is_construct = frame->is_construct_trampoline(); \
+ } else if (type == StackFrame::ARGUMENTS_ADAPTOR) { \
+ ArgumentsAdaptorFrame* frame = \
+ ArgumentsAdaptorFrame::cast(it.frame()); \
+ __argc__ = frame->GetProvidedParametersCount(); \
+ __argv__ = reinterpret_cast<Object**>(frame->pp()) - 1; \
+ it.Advance(); \
+ is_construct = \
+ it.frame()->is_internal() && \
+ InternalFrame::cast(it.frame())->is_construct_trampoline(); \
+ } \
+ }
+
+
+#define BUILTIN_1(name, a0) \
+ BUILTIN_0(name) \
+ Object* a0 = BUILTIN_ARG(1);
+
+
+#define BUILTIN_2(name, a0, a1) \
+ BUILTIN_1(name, a0) \
+ Object* a1 = BUILTIN_ARG(2);
+
+
+#define BUILTIN_3(name, a0, a1, a2) \
+ BUILTIN_2(name, a0, a1) \
+ Object* a2 = BUILTIN_ARG(3);
+
+
+#define BUILTIN_VARARG(name, aidx0, aidxN) \
+ BUILTIN_0(name); \
+ int aidx0 = 1; \
+ int aidxN = __argc__; \
+
+
+// Use an inline function to avoid evaluating the index (n) more than
+// once in the BUILTIN_ARG macro.
+static inline Object* __builtin_arg__(int n, int argc, Object** argv) {
+ ASSERT(n >= 0);
+ return (argc >= n) ? argv[-n] : Heap::undefined_value();
+}
+
+
+// NOTE: Argument 0 is the receiver. The first 'real' argument is
+// argument 1 - BUILTIN_ARG(1).
+#define BUILTIN_ARG(n) (__builtin_arg__(n, __argc__, __argv__))
+
+
+#define BUILTIN_END \
+ return Heap::undefined_value(); \
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+int Builtins::construct_call_pc_offset_ = 0;
+int Builtins::arguments_adaptor_call_pc_offset_ = 0;
+
+
+// Check if the builtin was called in a 'new' call.
+bool Builtins::IsConstructCall(Address pc) {
+ ASSERT(construct_call_pc_offset_ > 0);
+ int offset = pc - builtin(JSConstructCall)->address();
+ return offset == construct_call_pc_offset_;
+}
+
+
+bool Builtins::IsArgumentsAdaptorCall(Address pc) {
+ ASSERT(arguments_adaptor_call_pc_offset_ > 0);
+ int offset = pc - builtin(ArgumentsAdaptorTrampoline)->address();
+ return offset == arguments_adaptor_call_pc_offset_;
+}
+
+
+BUILTIN_0(Illegal) {
+ UNREACHABLE();
+}
+BUILTIN_END
+
+
+BUILTIN_0(EmptyFunction) {
+}
+BUILTIN_END
+
+
+BUILTIN_0(ArrayCode) {
+ JSArray* array;
+ if (is_construct) {
+ array = JSArray::cast(*receiver);
+ } else {
+ // Allocate the JS Array
+ JSFunction* constructor =
+ Top::context()->global_context()->array_function();
+ Object* obj = Heap::AllocateJSObject(constructor);
+ if (obj->IsFailure()) return obj;
+ array = JSArray::cast(obj);
+ }
+
+ // 'array' now contains the JSArray we should initialize.
+
+ // Optimize the case where there is one argument and the argument is a
+ // small smi.
+ if (__argc__ == 1) {
+ Object* obj = BUILTIN_ARG(1);
+ if (obj->IsSmi()) {
+ int len = Smi::cast(obj)->value();
+ if (len >= 0 && len < JSObject::kMaxFastElementsLength) {
+ Object* obj = Heap::AllocateFixedArrayWithHoles(len);
+ if (obj->IsFailure()) return obj;
+ array->SetContent(FixedArray::cast(obj));
+ return array;
+ }
+ }
+ // Take the argument as the length.
+ obj = array->Initialize(0);
+ if (obj->IsFailure()) return obj;
+ if (__argc__ == 1) return array->SetElementsLength(BUILTIN_ARG(1));
+ }
+
+ // Optimize the case where there are no paramaters passed.
+ if (__argc__ == 0) return array->Initialize(4);
+
+ // Take the arguments as elements.
+ int len = Smi::FromInt(__argc__)->value();
+ Object* obj = Heap::AllocateFixedArrayWithHoles(len);
+ if (obj->IsFailure()) return obj;
+ FixedArray* elms = FixedArray::cast(obj);
+ FixedArray::WriteBarrierMode mode = elms->GetWriteBarrierMode();
+ // Fill in the content
+ for (int index = 0; index < __argc__; index++) {
+ elms->set(index, BUILTIN_ARG(index+1), mode);
+ }
+
+ // Set length and elements on the array.
+ array->set_elements(FixedArray::cast(obj));
+ array->set_length(Smi::FromInt(__argc__));
+
+ return array;
+}
+BUILTIN_END
+
+
+BUILTIN_0(ArrayPush) {
+ JSArray* array = JSArray::cast(*receiver);
+ ASSERT(array->HasFastElements());
+
+ // Make sure we have space for the elements.
+ int len = Smi::cast(array->length())->value();
+
+ // Set new length.
+ int new_length = len + __argc__;
+ FixedArray* elms = FixedArray::cast(array->elements());
+
+ if (new_length <= elms->length()) {
+ // Backing storage has extra space for the provided values.
+ for (int index = 0; index < __argc__; index++) {
+ elms->set(index + len, BUILTIN_ARG(index+1));
+ }
+ } else {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ if (obj->IsFailure()) return obj;
+ FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray::WriteBarrierMode mode = new_elms->GetWriteBarrierMode();
+ // Fill out the new array with old elements.
+ for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
+ // Add the provided values.
+ for (int index = 0; index < __argc__; index++) {
+ new_elms->set(index + len, BUILTIN_ARG(index+1), mode);
+ }
+ // Set the new backing storage.
+ array->set_elements(new_elms);
+ }
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return array->length();
+}
+BUILTIN_END
+
+
+BUILTIN_0(ArrayPop) {
+ JSArray* array = JSArray::cast(*receiver);
+ ASSERT(array->HasFastElements());
+ Object* undefined = Heap::undefined_value();
+
+ int len = Smi::cast(array->length())->value();
+ if (len == 0) return undefined;
+
+ // Get top element
+ FixedArray* elms = FixedArray::cast(array->elements());
+ Object* top = elms->get(len - 1);
+
+ // Set the length.
+ array->set_length(Smi::FromInt(len - 1));
+
+ if (!top->IsTheHole()) {
+ // Delete the top element.
+ elms->set_the_hole(len - 1);
+
+ return top;
+ }
+
+ // Remember to check the prototype chain.
+ JSFunction* array_function =
+ Top::context()->global_context()->array_function();
+ JSObject* prototype = JSObject::cast(array_function->prototype());
+ top = prototype->GetElement(len - 1);
+
+ return top;
+}
+BUILTIN_END
+
+
+// -----------------------------------------------------------------------------
+//
+
+
+// Returns the holder JSObject if the function can legally be called
+// with this receiver. Returns Heap::null_value() if the call is
+// illegal. Any arguments that don't fit the expected type is
+// overwritten with undefined. Arguments that do fit the expected
+// type is overwritten with the object in the prototype chain that
+// actually has that type.
+static inline Object* TypeCheck(int argc,
+ Object** argv,
+ FunctionTemplateInfo* info) {
+ Object* recv = argv[0];
+ Object* sig_obj = info->signature();
+ if (sig_obj->IsUndefined()) return recv;
+ SignatureInfo* sig = SignatureInfo::cast(sig_obj);
+ // If necessary, check the receiver
+ Object* recv_type = sig->receiver();
+
+ Object* holder = recv;
+ if (!recv_type->IsUndefined()) {
+ for (; holder != Heap::null_value(); holder = holder->GetPrototype()) {
+ if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
+ break;
+ }
+ }
+ if (holder == Heap::null_value()) return holder;
+ }
+ Object* args_obj = sig->args();
+ // If there is no argument signature we're done
+ if (args_obj->IsUndefined()) return holder;
+ FixedArray* args = FixedArray::cast(args_obj);
+ int length = args->length();
+ if (argc < length) length = argc;
+ for (int i = 0; i < length; i++) {
+ Object* argtype = args->get(i);
+ if (argtype->IsUndefined()) continue;
+ Object** arg = &argv[-1 - i];
+ Object* current = *arg;
+ for (; current != Heap::null_value(); current = current->GetPrototype()) {
+ if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
+ *arg = current;
+ break;
+ }
+ }
+ if (current == Heap::null_value()) *arg = Heap::undefined_value();
+ }
+ return holder;
+}
+
+
+BUILTIN_0(HandleApiCall) {
+ HandleScope scope;
+
+ // TODO(1238487): This is not nice. We need to get rid of this
+ // retarded behavior and start handling API calls in a more direct
+ // way - maybe compile specialized stubs lazily?.
+#ifdef USE_OLD_CALLING_CONVENTIONS
+ Handle<JSFunction> function =
+ Handle<JSFunction>(JSFunction::cast(__argv__[1]));
+#else
+ Handle<JSFunction> function =
+ Handle<JSFunction>(JSFunction::cast(Builtins::builtin_passed_function));
+#endif
+
+ if (is_construct) {
+ Handle<FunctionTemplateInfo> desc =
+ Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(function->shared()->function_data()));
+ bool pending_exception = false;
+ Factory::ConfigureInstance(desc, Handle<JSObject>::cast(receiver),
+ &pending_exception);
+ ASSERT(Top::has_pending_exception() == pending_exception);
+ if (pending_exception) return Failure::Exception();
+ }
+
+ FunctionTemplateInfo* fun_data =
+ FunctionTemplateInfo::cast(function->shared()->function_data());
+ Object* raw_holder = TypeCheck(__argc__, __argv__, fun_data);
+
+ if (raw_holder->IsNull()) {
+ // This function cannot be called with the given receiver. Abort!
+ Handle<Object> obj =
+ Factory::NewTypeError("illegal_invocation", HandleVector(&function, 1));
+ return Top::Throw(*obj);
+ }
+
+ Object* raw_call_data = fun_data->call_code();
+ if (!raw_call_data->IsUndefined()) {
+ CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+ Object* callback_obj = call_data->callback();
+ v8::InvocationCallback callback =
+ v8::ToCData<v8::InvocationCallback>(callback_obj);
+ Object* data_obj = call_data->data();
+ Object* result;
+
+ v8::Local<v8::Object> self =
+ v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
+ Handle<Object> data_handle(data_obj);
+ v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
+ ASSERT(raw_holder->IsJSObject());
+ v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
+ Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
+ v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
+ LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
+ v8::Arguments args = v8::ImplementationUtilities::NewArguments(
+ data,
+ holder,
+ callee,
+ is_construct,
+ reinterpret_cast<void**>(__argv__ - 1),
+ __argc__);
+
+ v8::Handle<v8::Value> value;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ value = callback(args);
+ }
+ if (value.IsEmpty()) {
+ result = Heap::undefined_value();
+ } else {
+ result = *reinterpret_cast<Object**>(*value);
+ }
+
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!is_construct || result->IsJSObject()) return result;
+ }
+
+ return *receiver;
+}
+BUILTIN_END
+
+
+// Handle calls to non-function objects created through the API that
+// support calls.
+BUILTIN_0(HandleApiCallAsFunction) {
+ // Non-functions are never called as constructors.
+ ASSERT(!is_construct);
+
+ // Get the object called.
+ JSObject* obj = JSObject::cast(*receiver);
+
+ // Get the invocation callback from the function descriptor that was
+ // used to create the called object.
+ ASSERT(obj->map()->has_instance_call_handler());
+ JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
+ Object* template_info = constructor->shared()->function_data();
+ Object* handler =
+ FunctionTemplateInfo::cast(template_info)->instance_call_handler();
+ ASSERT(!handler->IsUndefined());
+ CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
+ Object* callback_obj = call_data->callback();
+ v8::InvocationCallback callback =
+ v8::ToCData<v8::InvocationCallback>(callback_obj);
+
+ // Get the data for the call and perform the callback.
+ Object* data_obj = call_data->data();
+ Object* result;
+ { HandleScope scope;
+ v8::Local<v8::Object> self =
+ v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
+ Handle<Object> data_handle(data_obj);
+ v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
+ Handle<JSFunction> callee_handle(constructor);
+ v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
+ LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
+ v8::Arguments args = v8::ImplementationUtilities::NewArguments(
+ data,
+ self,
+ callee,
+ is_construct,
+ reinterpret_cast<void**>(__argv__ - 1),
+ __argc__);
+ v8::Handle<v8::Value> value;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ value = callback(args);
+ }
+ if (value.IsEmpty()) {
+ result = Heap::undefined_value();
+ } else {
+ result = *reinterpret_cast<Object**>(*value);
+ }
+ }
+ // Check for exceptions and return result.
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return result;
+}
+BUILTIN_END
+
+
+// TODO(1238487): This is a nasty hack. We need to improve the way we
+// call builtins considerable to get rid of this and the hairy macros
+// in builtins.cc.
+Object* Builtins::builtin_passed_function;
+
+
+
+static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
+ LoadIC::GenerateArrayLength(masm);
+}
+
+
+static void Generate_LoadIC_ShortStringLength(MacroAssembler* masm) {
+ LoadIC::GenerateShortStringLength(masm);
+}
+
+
+static void Generate_LoadIC_MediumStringLength(MacroAssembler* masm) {
+ LoadIC::GenerateMediumStringLength(masm);
+}
+
+
+static void Generate_LoadIC_LongStringLength(MacroAssembler* masm) {
+ LoadIC::GenerateLongStringLength(masm);
+}
+
+
+static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) {
+ LoadIC::GenerateFunctionPrototype(masm);
+}
+
+
+static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
+ LoadIC::GenerateInitialize(masm);
+}
+
+
+static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
+ LoadIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_LoadIC_Miss(MacroAssembler* masm) {
+ LoadIC::GenerateMiss(masm);
+}
+
+
+static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
+ LoadIC::GenerateMegamorphic(masm);
+}
+
+
+static void Generate_LoadIC_Normal(MacroAssembler* masm) {
+ LoadIC::GenerateNormal(masm);
+}
+
+
+static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateInitialize(masm);
+}
+
+
+static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateMiss(masm);
+}
+
+
+static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateGeneric(masm);
+}
+
+
+static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
+ KeyedLoadIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
+ StoreIC::GenerateInitialize(masm);
+}
+
+
+static void Generate_StoreIC_Miss(MacroAssembler* masm) {
+ StoreIC::GenerateMiss(masm);
+}
+
+
+static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
+ StoreIC::GenerateMegamorphic(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateGeneric(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateInitialize(masm);
+}
+
+
+Object* Builtins::builtins_[builtin_count] = { NULL, };
+const char* Builtins::names_[builtin_count] = { NULL, };
+
+#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
+ Address Builtins::c_functions_[cfunction_count] = {
+ BUILTIN_LIST_C(DEF_ENUM_C)
+ };
+#undef DEF_ENUM_C
+
+#define DEF_JS_NAME(name, ignore) #name,
+#define DEF_JS_ARGC(ignore, argc) argc,
+const char* Builtins::javascript_names_[id_count] = {
+ BUILTINS_LIST_JS(DEF_JS_NAME)
+};
+
+int Builtins::javascript_argc_[id_count] = {
+ BUILTINS_LIST_JS(DEF_JS_ARGC)
+};
+#undef DEF_JS_NAME
+#undef DEF_JS_ARGC
+
+static bool is_initialized = false;
+void Builtins::Setup(bool create_heap_objects) {
+ ASSERT(!is_initialized);
+
+ // Create a scope for the handles in the builtins.
+ HandleScope scope;
+
+ struct BuiltinDesc {
+ byte* generator;
+ byte* c_code;
+ const char* s_name; // name is only used for generating log information.
+ int name;
+ Code::Flags flags;
+ int argc;
+ };
+
+#define DEF_FUNCTION_PTR_C(name, argc) \
+ { FUNCTION_ADDR(Generate_Adaptor), \
+ FUNCTION_ADDR(Builtin_##name), \
+ #name, \
+ c_##name, \
+ Code::ComputeFlags(Code::BUILTIN), \
+ argc \
+ },
+
+#define DEF_FUNCTION_PTR_A(name, kind, state) \
+ { FUNCTION_ADDR(Generate_##name), \
+ NULL, \
+ #name, \
+ name, \
+ Code::ComputeFlags(Code::kind, state), \
+ -1 \
+ },
+
+ // Define array of pointers to generators and C builtin functions.
+ static BuiltinDesc functions[] = {
+ BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
+ BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ // Terminator:
+ { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0), -1}
+ };
+
+#undef DEF_FUNCTION_PTR_C
+#undef DEF_FUNCTION_PTR_A
+
+ // For now we generate builtin adaptor code into a stack-allocated
+ // buffer, before copying it into invididual code objects.
+ byte buffer[4*KB];
+
+ // Traverse the list of builtins and generate an adaptor in a
+ // separate code object for each one.
+ for (int i = 0; i < builtin_count; i++) {
+ if (create_heap_objects) {
+ MacroAssembler masm(buffer, sizeof buffer);
+ // Generate the code/adaptor.
+ typedef void (*Generator)(MacroAssembler*, int, int);
+ Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
+ // We pass all arguments to the generator, but it may not use all of
+ // them. This works because the first arguments are on top of the
+ // stack.
+ g(&masm, functions[i].argc, functions[i].name);
+ // Move the code into the object heap.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Code::Flags flags = functions[i].flags;
+ Object* code = Heap::CreateCode(desc, NULL, flags);
+ if (code->IsRetryAfterGC()) {
+ CHECK(Heap::CollectGarbage(Failure::cast(code)->requested(),
+ Failure::cast(code)->allocation_space()));
+ code = Heap::CreateCode(desc, NULL, flags);
+ }
+ // Add any unresolved jumps or calls to the fixup list in the
+ // bootstrapper.
+ Bootstrapper::AddFixup(Code::cast(code), &masm);
+ // Log the event and add the code to the builtins array.
+ LOG(CodeCreateEvent("Builtin", Code::cast(code), functions[i].s_name));
+ builtins_[i] = code;
+ } else {
+ // Deserializing. The values will be filled in during IterateBuiltins.
+ builtins_[i] = NULL;
+ }
+ names_[i] = functions[i].s_name;
+ }
+
+ // Mark as initialized.
+ is_initialized = true;
+}
+
+
+void Builtins::TearDown() {
+ is_initialized = false;
+}
+
+
+void Builtins::IterateBuiltins(ObjectVisitor* v) {
+ v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
+}
+
+
+const char* Builtins::Lookup(byte* pc) {
+ if (is_initialized) { // may be called during initialization (disassembler!)
+ for (int i = 0; i < builtin_count; i++) {
+ Code* entry = Code::cast(builtins_[i]);
+ if (entry->contains(pc)) {
+ return names_[i];
+ }
+ }
+ }
+ return NULL;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_BUILTINS_H_
+#define V8_BUILTINS_H_
+
+
+
+namespace v8 { namespace internal {
+
+
+// Define list of builtins implemented in C.
+#define BUILTIN_LIST_C(V) \
+ V(Illegal, 0) \
+ \
+ V(EmptyFunction, 0) \
+ \
+ V(ArrayCode, 0) \
+ \
+ V(ArrayPush, 1) \
+ V(ArrayPop, 0) \
+ \
+ V(HandleApiCall, 0) \
+ V(HandleApiCallAsFunction, 0)
+
+
+// Define list of builtins implemented in assembly.
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
+ V(JSConstructCall, BUILTIN, UNINITIALIZED) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
+ \
+ V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
+ V(Return_DebugBreakEntry, BUILTIN, DEBUG_BREAK) \
+ V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \
+ V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
+ \
+ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
+ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
+ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
+ V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
+ V(LoadIC_ShortStringLength, LOAD_IC, MONOMORPHIC) \
+ V(LoadIC_MediumStringLength, LOAD_IC, MONOMORPHIC) \
+ V(LoadIC_LongStringLength, LOAD_IC, MONOMORPHIC) \
+ V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
+ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
+ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \
+ \
+ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
+ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
+ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
+ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
+ \
+ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
+ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
+ V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
+ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
+ V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \
+ \
+ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
+ V(FunctionApply, BUILTIN, UNINITIALIZED)
+
+
+// Define list of builtins implemented in JavaScript.
+#define BUILTINS_LIST_JS(V) \
+ V(EQUALS, 1) \
+ V(STRICT_EQUALS, 1) \
+ V(COMPARE, 2) \
+ V(ADD, 1) \
+ V(SUB, 1) \
+ V(MUL, 1) \
+ V(MULNEG, 1) \
+ V(DIV, 1) \
+ V(MOD, 1) \
+ V(INC, 0) \
+ V(DEC, 0) \
+ V(BIT_OR, 1) \
+ V(BIT_AND, 1) \
+ V(BIT_XOR, 1) \
+ V(UNARY_MINUS, 0) \
+ V(BIT_NOT, 0) \
+ V(SHL, 1) \
+ V(SAR, 1) \
+ V(SHR, 1) \
+ V(DELETE, 1) \
+ V(IN, 1) \
+ V(INSTANCE_OF, 1) \
+ V(GET_KEYS, 0) \
+ V(FILTER_KEY, 1) \
+ V(CALL_NON_FUNCTION, 0) \
+ V(TO_OBJECT, 0) \
+ V(TO_NUMBER, 0) \
+ V(TO_STRING, 0) \
+ V(APPLY_PREPARE, 1) \
+ V(APPLY_OVERFLOW, 1)
+
+
+class ObjectVisitor;
+
+
+class Builtins : public AllStatic {
+ public:
+ // Generate all builtin code objects. Should be called once during
+ // VM initialization.
+ static void Setup(bool create_heap_objects);
+ static void TearDown();
+
+ // Garbage collection support.
+ static void IterateBuiltins(ObjectVisitor* v);
+
+ // Disassembler support.
+ static const char* Lookup(byte* pc);
+
+ enum Name {
+#define DEF_ENUM_C(name, ignore) name,
+#define DEF_ENUM_A(name, kind, state) name,
+ BUILTIN_LIST_C(DEF_ENUM_C)
+ BUILTIN_LIST_A(DEF_ENUM_A)
+#undef DEF_ENUM_C
+#undef DEF_ENUM_A
+ builtin_count
+ };
+
+ enum CFunctionId {
+#define DEF_ENUM_C(name, ignore) c_##name,
+ BUILTIN_LIST_C(DEF_ENUM_C)
+#undef DEF_ENUM_C
+ cfunction_count
+ };
+
+ enum JavaScript {
+#define DEF_ENUM(name, ignore) name,
+ BUILTINS_LIST_JS(DEF_ENUM)
+#undef DEF_ENUM
+ id_count
+ };
+
+ static bool IsConstructCall(Address pc);
+ static bool IsArgumentsAdaptorCall(Address pc);
+
+ static Code* builtin(Name name) {
+ // Code::cast cannot be used here since we access builtins
+ // during the marking phase of mark sweep. See IC::Clear.
+ return reinterpret_cast<Code*>(builtins_[name]);
+ }
+
+ static Address builtin_address(Name name) {
+ return reinterpret_cast<Address>(&builtins_[name]);
+ }
+
+ static Address c_function_address(CFunctionId id) {
+ return c_functions_[id];
+ }
+
+ static const char* GetName(JavaScript id) { return javascript_names_[id]; }
+ static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
+ static int NumberOfJavaScriptBuiltins() { return id_count; }
+
+ // Called from stub-cache.cc.
+ static void Generate_CallIC_DebugBreak(MacroAssembler* masm);
+
+ static Object* builtin_passed_function;
+
+ private:
+ // The external C++ functions called from the code.
+ static Address c_functions_[cfunction_count];
+
+ // Note: These are always Code objects, but to conform with
+ // IterateBuiltins() above which assumes Object**'s for the callback
+ // function f, we use an Object* array here.
+ static Object* builtins_[builtin_count];
+ static const char* names_[builtin_count];
+
+ static const char* javascript_names_[id_count];
+ static int javascript_argc_[id_count];
+
+ // The offset from the beginning of the JSConstructCall builtin code
+ // object to the return address after the call. Used for determining
+ // if a call is a constructor invocation.
+ static int construct_call_pc_offset_;
+ static int arguments_adaptor_call_pc_offset_;
+
+ static void Generate_Adaptor(MacroAssembler* masm,
+ int argc,
+ CFunctionId id);
+ static void Generate_JSConstructCall(MacroAssembler* masm);
+ static void Generate_JSEntryTrampoline(MacroAssembler* masm);
+ static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
+ static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
+ static void Generate_FunctionApply(MacroAssembler* masm);
+
+ static void Generate_LoadIC_DebugBreak(MacroAssembler* masm);
+ static void Generate_StoreIC_DebugBreak(MacroAssembler* masm);
+ static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm);
+ static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm);
+ static void Generate_ConstructCall_DebugBreak(MacroAssembler* masm);
+ static void Generate_Return_DebugBreak(MacroAssembler* masm);
+ static void Generate_Return_DebugBreakEntry(MacroAssembler* masm);
+ static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_BUILTINS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CHAR_PREDICATES_INL_H_
+#define V8_CHAR_PREDICATES_INL_H_
+
+#include "char-predicates.h"
+
+namespace v8 { namespace internal {
+
+
+inline bool IsCarriageReturn(uc32 c) {
+ return c == 0x000D;
+}
+
+
+inline bool IsLineFeed(uc32 c) {
+ return c == 0x000A;
+}
+
+
+inline bool IsDecimalDigit(uc32 c) {
+ // ECMA-262, 3rd, 7.8.3 (p 16)
+ return
+ '0' <= c && c <= '9';
+}
+
+
+inline bool IsHexDigit(uc32 c) {
+ // ECMA-262, 3rd, 7.6 (p 15)
+ return
+ ('0' <= c && c <= '9') ||
+ ('A' <= c && c <= 'F') ||
+ ('a' <= c && c <= 'f');
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_CHAR_PREDICATES_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CHAR_PREDICATES_H_
+#define V8_CHAR_PREDICATES_H_
+
+namespace v8 { namespace internal {
+
+// Unicode character predicates as defined by ECMA-262, 3rd,
+// used for lexical analysis.
+
+inline bool IsCarriageReturn(uc32 c);
+inline bool IsLineFeed(uc32 c);
+inline bool IsDecimalDigit(uc32 c);
+inline bool IsHexDigit(uc32 c);
+
+struct IdentifierStart {
+ static inline bool Is(uc32 c) {
+ switch (c) {
+ case '$': case '_': case '\\': return true;
+ default: return unibrow::Letter::Is(c);
+ }
+ }
+};
+
+
+struct IdentifierPart {
+ static inline bool Is(uc32 c) {
+ return IdentifierStart::Is(c)
+ || unibrow::Number::Is(c)
+ || unibrow::CombiningMark::Is(c)
+ || unibrow::ConnectorPunctuation::Is(c);
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_CHAR_PREDICATES_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "platform.h"
+#include "top.h"
+
+using namespace v8::internal;
+
+DEFINE_bool(stack_trace_on_abort, true,
+ "print a stack trace if an assertion failure occurs");
+
+#ifdef DEBUG
+DEFINE_bool(enable_slow_asserts, false,
+ "enable asserts that are slow to execute");
+#endif
+
+static int fatal_error_handler_nesting_depth = 0;
+
+// Contains protection against recursive calls (faults while handling faults).
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ fatal_error_handler_nesting_depth++;
+ // First time we try to print an error message
+ if (fatal_error_handler_nesting_depth < 2) {
+ OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrintError(format, arguments);
+ va_end(arguments);
+ OS::PrintError("\n#\n\n");
+ }
+ // First two times we may try to print a stack dump.
+ if (fatal_error_handler_nesting_depth < 3) {
+ if (FLAG_stack_trace_on_abort) {
+ // Call this one twice on double fault
+ Top::PrintStack();
+ }
+ }
+ OS::Abort();
+}
+
+
+void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ v8::Handle<v8::Value> expected,
+ const char* value_source,
+ v8::Handle<v8::Value> value) {
+ if (!expected->Equals(value)) {
+ v8::String::AsciiValue value_str(value);
+ v8::String::AsciiValue expected_str(expected);
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
+ expected_source, value_source, *expected_str, *value_str);
+ }
+}
+
+
+void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ v8::Handle<v8::Value> unexpected,
+ const char* value_source,
+ v8::Handle<v8::Value> value) {
+ if (unexpected->Equals(value)) {
+ v8::String::AsciiValue value_str(value);
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
+ unexpected_source, value_source, *value_str);
+ }
+}
+
+
+void API_Fatal(const char* location, const char* format, ...) {
+ OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrintError(format, arguments);
+ va_end(arguments);
+ OS::PrintError("\n#\n\n");
+ OS::Abort();
+}
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CHECKS_H_
+#define V8_CHECKS_H_
+
+#include <string.h>
+
+#include "flags.h"
+
+#ifdef DEBUG
+DECLARE_bool(enable_slow_asserts);
+#endif // DEBUG
+
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+void API_Fatal(const char* location, const char* format, ...);
+
+// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
+// development, but they should not be relied on in the final product.
+#define FATAL(msg) \
+ V8_Fatal(__FILE__, __LINE__, "%s", (msg))
+
+#define UNREACHABLE() \
+ V8_Fatal(__FILE__, __LINE__, "unreachable code")
+
+#define UNIMPLEMENTED() \
+ V8_Fatal(__FILE__, __LINE__, "unimplemented code")
+
+
+// Used by the CHECK macro -- should not be called directly.
+static inline void CheckHelper(const char* file,
+ int line,
+ const char* source,
+ bool condition) {
+ if (!condition)
+ V8_Fatal(file, line, "CHECK(%s) failed", source);
+}
+
+
+// The CHECK macro checks that the given condition is true; if not, it
+// prints a message to stderr and aborts.
+#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
+
+
+// Helper function used by the CHECK_EQ function when given int
+// arguments. Should not be called directly.
+static inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source, int expected,
+ const char* value_source, int value) {
+ if (expected != value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+// Helper function used by the CHECK_NE function when given int
+// arguments. Should not be called directly.
+static inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ int unexpected,
+ const char* value_source,
+ int value) {
+ if (unexpected == value) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
+ unexpected_source, value_source, value);
+ }
+}
+
+
+// Helper function used by the CHECK function when given string
+// arguments. Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const char* expected,
+ const char* value_source,
+ const char* value) {
+ if (strcmp(expected, value) != 0) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+static inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const char* expected,
+ const char* value_source,
+ const char* value) {
+ if (expected == value ||
+ (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
+ expected_source, value_source, value);
+ }
+}
+
+
+// Helper function used by the CHECK function when given pointer
+// arguments. Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ void* expected,
+ const char* value_source,
+ void* value) {
+ if (expected != value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
+ expected_source, value_source,
+ reinterpret_cast<int>(expected), reinterpret_cast<int>(value));
+ }
+}
+
+
+static inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ void* expected,
+ const char* value_source,
+ void* value) {
+ if (expected == value) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
+ expected_source, value_source, reinterpret_cast<int>(value));
+ }
+}
+
+
+// Helper function used by the CHECK function when given floating
+// point arguments. Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ double expected,
+ const char* value_source,
+ double value) {
+ if (expected != value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+namespace v8 {
+ class Value;
+ template <class T> class Handle;
+}
+
+
+void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ v8::Handle<v8::Value> unexpected,
+ const char* value_source,
+ v8::Handle<v8::Value> value);
+
+
+void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ v8::Handle<v8::Value> expected,
+ const char* value_source,
+ v8::Handle<v8::Value> value);
+
+
+#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
+ #expected, expected, #value, value)
+
+
+#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
+ #unexpected, unexpected, #value, value)
+
+
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+
+
+// This is inspired by the static assertion facility in boost. This
+// is pretty magical. If it causes you trouble on a platform you may
+// find a fix in the boost code.
+template <bool> class StaticAssertion;
+template <> class StaticAssertion<true> { };
+// This macro joins two tokens. If one of the tokens is a macro the
+// helper call causes it to be resolved before joining.
+#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
+#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
+// Causes an error during compilation of the condition is not
+// statically known to be true. It is formulated as a typedef so that
+// it can be used wherever a typedef can be used. Beware that this
+// actually causes each use to introduce a new defined type with a
+// name depending on the source line.
+template <int> class StaticAssertionHelper { };
+#define STATIC_CHECK(test) \
+ typedef \
+ StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>(test)>)> \
+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
+
+
+// The ASSERT macro is equivalent to CHECK except that it only
+// generates code in debug builds. Ditto STATIC_ASSERT.
+#ifdef DEBUG
+#define ASSERT(condition) CHECK(condition)
+#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
+#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
+#define STATIC_ASSERT(test) STATIC_CHECK(test)
+#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
+#else
+#define ASSERT(condition) ((void) 0)
+#define ASSERT_EQ(v1, v2) ((void) 0)
+#define ASSERT_NE(v1, v2) ((void) 0)
+#define STATIC_ASSERT(test) ((void) 0)
+#define SLOW_ASSERT(condition) ((void) 0)
+#endif
+
+
+#define ASSERT_TAG_ALIGNED(address) \
+ ASSERT((reinterpret_cast<int>(address) & kHeapObjectTagMask) == 0)
+
+#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & kHeapObjectTagMask) == 0)
+
+#endif // V8_CHECKS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DEFINE_bool(print_code_stubs, false, "print code stubs");
+#endif
+
+Handle<Code> CodeStub::GetCode() {
+ uint32_t key = GetKey();
+ int index = Heap::code_stubs()->FindNumberEntry(key);
+ if (index == -1) {
+ HandleScope scope;
+
+ // Update the static counter each time a new code stub is generated.
+ Counters::code_stubs.Increment();
+
+ // Generate the new code.
+ MacroAssembler masm(NULL, 256);
+
+ bool needs_check_for_stub_calls = !AllowsStubCalls();
+ if (needs_check_for_stub_calls) {
+ // Nested stubs are not allowed for leafs.
+ ASSERT(!masm.generating_stub());
+ masm.set_generating_stub(true);
+ }
+
+ // Generate the code for the stub.
+ Generate(&masm);
+
+ if (needs_check_for_stub_calls) masm.set_generating_stub(false);
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ // TODO(1238541): Simplify this somewhat complicated encoding.
+ CodeStub::Major major = MajorKey();
+ // Lower three bits in state field.
+ InlineCacheState state = static_cast<InlineCacheState>(major & 0x07);
+ // Upper two bits in type field.
+ PropertyType type = static_cast<PropertyType>((major >> 3) & 0x03);
+ // Compute flags with state and type used to hold majr key.
+ Code::Flags flags = Code::ComputeFlags(Code::STUB, state, type);
+
+ Handle<Code> code = Factory::NewCode(desc, NULL, flags);
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, &masm);
+
+ LOG(CodeCreateEvent(GetName(), *code, ""));
+ Counters::total_stubs_code_size.Increment(code->instruction_size());
+
+#ifdef DEBUG
+ if (FLAG_print_code_stubs) {
+ Print();
+ code->Print();
+ PrintF("\n");
+ }
+#endif
+
+ // Update the dictionary and the root in Heap.
+ Handle<Dictionary> dict =
+ Factory::DictionaryAtNumberPut(Handle<Dictionary>(Heap::code_stubs()),
+ key,
+ code);
+ Heap::set_code_stubs(*dict);
+ index = Heap::code_stubs()->FindNumberEntry(key);
+ }
+ ASSERT(index != -1);
+
+ return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+}
+
+
+const char* CodeStub::MajorName(CodeStub::Major major_key) {
+ switch (major_key) {
+ case CallFunction:
+ return "CallFunction";
+ case InlinedGenericOp:
+ return "InlinedGenericOp";
+ case SmiOp:
+ return "SmiOp";
+ case Compare:
+ return "Compare";
+ case RecordWrite:
+ return "RecordWrite";
+ case GenericOp:
+ return "GenericOp";
+ case StackCheck:
+ return "StackCheck";
+ case UnarySub:
+ return "UnarySub";
+ case RevertToNumber:
+ return "RevertToNumber";
+ case CounterOp:
+ return "CounterOp";
+ case ArgumentsAccess:
+ return "ArgumentsAccess";
+ case Runtime:
+ return "Runtime";
+ case CEntry:
+ return "CEntry";
+ case JSEntry:
+ return "JSEntry";
+ case GetProperty:
+ return "GetProperty";
+ case SetProperty:
+ return "SetProperty";
+ case InvokeBuiltin:
+ return "InvokeBuiltin";
+ case JSExit:
+ return "JSExit";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODE_STUBS_H_
+#define V8_CODE_STUBS_H_
+
+namespace v8 { namespace internal {
+
+
+// Stub is base classes of all stubs.
+class CodeStub BASE_EMBEDDED {
+ public:
+ enum Major {
+ CallFunction,
+ InlinedGenericOp,
+ SmiOp,
+ Compare,
+ RecordWrite, // Last stub that allows stub calls inside.
+ GenericOp,
+ StackCheck,
+ UnarySub,
+ RevertToNumber,
+ CounterOp,
+ ArgumentsAccess,
+ Runtime,
+ CEntry,
+ JSEntry,
+ GetProperty, // ARM only
+ SetProperty, // ARM only
+ InvokeBuiltin, // ARM only
+ JSExit, // ARM only
+ NUMBER_OF_IDS
+ };
+
+ // Retrieve the code for the stub. Generate the code if needed.
+ Handle<Code> GetCode();
+
+ static Major MajorKeyFromKey(uint32_t key) {
+ return static_cast<Major>(MajorKeyBits::decode(key));
+ };
+ static int MinorKeyFromKey(uint32_t key) {
+ return MinorKeyBits::decode(key);
+ };
+ static const char* MajorName(Major major_key);
+
+ virtual ~CodeStub() {}
+
+ private:
+ // Generates the assembler code for the stub.
+ virtual void Generate(MacroAssembler* masm) = 0;
+
+ // Returns information for computing the number key.
+ virtual Major MajorKey() = 0;
+ virtual int MinorKey() = 0;
+
+ // Returns a name for logging/debugging purposes.
+ virtual const char* GetName() = 0;
+
+#ifdef DEBUG
+ virtual void Print() { PrintF("%s\n", GetName()); }
+#endif
+
+ // Computes the key based on major and minor.
+ uint32_t GetKey() {
+ ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
+ return MinorKeyBits::encode(MinorKey()) |
+ MajorKeyBits::encode(MajorKey());
+ }
+
+ bool AllowsStubCalls() { return MajorKey() <= RecordWrite; }
+
+ static const int kMajorBits = 5;
+ static const int kMinorBits = kBitsPerPointer - kMajorBits - kSmiTagSize;
+
+ class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
+ class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
+
+ friend class BreakPointIterator;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_CODE_STUBS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODE_H_
+#define V8_CODE_H_
+
+namespace v8 { namespace internal {
+
+
+// Wrapper class for passing expected and actual parameter counts as
+// either registers or immediate values. Used to make sure that the
+// caller provides exactly the expected number of parameters to the
+// callee.
+class ParameterCount BASE_EMBEDDED {
+ public:
+ explicit ParameterCount(Register reg)
+ : reg_(reg), immediate_(0) { }
+ explicit ParameterCount(int immediate)
+ : reg_(no_reg), immediate_(immediate) { }
+
+ bool is_reg() const { return !reg_.is(no_reg); }
+ bool is_immediate() const { return !is_reg(); }
+
+ Register reg() const {
+ ASSERT(is_reg());
+ return reg_;
+ }
+ int immediate() const {
+ ASSERT(is_immediate());
+ return immediate_;
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
+
+ const Register reg_;
+ const int immediate_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_CODE_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "prettyprinter.h"
+#include "scopeinfo.h"
+#include "scopes.h"
+#include "runtime.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(optimize_locals, true,
+ "optimize locals by allocating them in registers");
+DEFINE_bool(trace, false, "trace function calls");
+DECLARE_bool(debug_info);
+DECLARE_bool(debug_code);
+DECLARE_bool(optimize_locals);
+
+#ifdef DEBUG
+DECLARE_bool(gc_greedy);
+DEFINE_bool(trace_codegen, false,
+ "print name of functions for which code is generated");
+DEFINE_bool(print_code, false, "print generated code");
+DEFINE_bool(print_builtin_code, false, "print generated code for builtins");
+DEFINE_bool(print_source, false, "pretty print source code");
+DEFINE_bool(print_builtin_source, false,
+ "pretty print source code for builtins");
+DEFINE_bool(print_ast, false, "print source AST");
+DEFINE_bool(print_builtin_ast, false, "print source AST for builtins");
+DEFINE_bool(trace_calls, false, "trace calls");
+DEFINE_bool(trace_builtin_calls, false, "trace builtins calls");
+DEFINE_string(stop_at, "", "function name where to insert a breakpoint");
+#endif // DEBUG
+
+
+DEFINE_bool(check_stack, true,
+ "check stack for overflow, interrupt, breakpoint");
+
+
+class ArmCodeGenerator;
+
+
+// -----------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+ enum Type { ILLEGAL = -1, EMPTY = 0, NAMED = 1, KEYED = 2 };
+ Reference(ArmCodeGenerator* cgen, Expression* expression);
+ ~Reference();
+
+ Expression* expression() const { return expression_; }
+ Type type() const { return type_; }
+ void set_type(Type value) {
+ ASSERT(type_ == ILLEGAL);
+ type_ = value;
+ }
+ int size() const { return type_; }
+
+ bool is_illegal() const { return type_ == ILLEGAL; }
+
+ private:
+ ArmCodeGenerator* cgen_;
+ Expression* expression_;
+ Type type_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Code generation state
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+ enum AccessType {
+ UNDEFINED,
+ LOAD,
+ LOAD_TYPEOF_EXPR,
+ STORE,
+ INIT_CONST
+ };
+
+ CodeGenState()
+ : access_(UNDEFINED),
+ ref_(NULL),
+ true_target_(NULL),
+ false_target_(NULL) {
+ }
+
+ CodeGenState(AccessType access,
+ Reference* ref,
+ Label* true_target,
+ Label* false_target)
+ : access_(access),
+ ref_(ref),
+ true_target_(true_target),
+ false_target_(false_target) {
+ }
+
+ AccessType access() const { return access_; }
+ Reference* ref() const { return ref_; }
+ Label* true_target() const { return true_target_; }
+ Label* false_target() const { return false_target_; }
+
+ private:
+ AccessType access_;
+ Reference* ref_;
+ Label* true_target_;
+ Label* false_target_;
+};
+
+
+// -----------------------------------------------------------------------------
+// ArmCodeGenerator
+
+class ArmCodeGenerator: public CodeGenerator {
+ public:
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+ MacroAssembler* masm() { return masm_; }
+
+ private:
+ // Assembler
+ MacroAssembler* masm_; // to generate code
+
+ // Code generation state
+ Scope* scope_;
+ Condition cc_reg_;
+ CodeGenState* state_;
+ RegList reg_locals_; // the list of registers used to hold locals
+ int num_reg_locals_; // the number of registers holding locals
+ int break_stack_height_;
+
+ // Labels
+ Label function_return_;
+
+ // Construction/destruction
+ ArmCodeGenerator(int buffer_size,
+ Handle<Script> script,
+ bool is_eval);
+
+ virtual ~ArmCodeGenerator() { delete masm_; }
+
+ // Main code generation function
+ void GenCode(FunctionLiteral* fun);
+
+ // The following are used by class Reference.
+ void LoadReference(Reference* ref);
+ void UnloadReference(Reference* ref);
+ friend class Reference;
+
+ // State
+ bool has_cc() const { return cc_reg_ != al; }
+ CodeGenState::AccessType access() const { return state_->access(); }
+ Reference* ref() const { return state_->ref(); }
+ bool is_referenced() const { return state_->ref() != NULL; }
+ Label* true_target() const { return state_->true_target(); }
+ Label* false_target() const { return state_->false_target(); }
+
+
+ // Expressions
+ MemOperand GlobalObject() const {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+ }
+
+ MemOperand ContextOperand(Register context, int index) const {
+ return MemOperand(context, Context::SlotOffset(index));
+ }
+
+ MemOperand ParameterOperand(int index) const {
+ // index -2 corresponds to the activated closure, -1 corresponds
+ // to the receiver
+ ASSERT(-2 <= index && index < scope_->num_parameters());
+ int offset = JavaScriptFrameConstants::kParam0Offset - index * kPointerSize;
+ return MemOperand(pp, offset);
+ }
+
+ MemOperand FunctionOperand() const { return ParameterOperand(-2); }
+
+ Register SlotRegister(int slot_index);
+ MemOperand SlotOperand(Slot* slot, Register tmp);
+
+ void LoadCondition(Expression* x, CodeGenState::AccessType access,
+ Label* true_target, Label* false_target, bool force_cc);
+ void Load(Expression* x,
+ CodeGenState::AccessType access = CodeGenState::LOAD);
+ void LoadGlobal();
+
+ // Special code for typeof expressions: Unfortunately, we must
+ // be careful when loading the expression in 'typeof'
+ // expressions. We are not allowed to throw reference errors for
+ // non-existing properties of the global object, so we must make it
+ // look like an explicit property access, instead of an access
+ // through the context chain.
+ void LoadTypeofExpression(Expression* x);
+
+ // References
+ void AccessReference(Reference* ref, CodeGenState::AccessType access);
+
+ void GetValue(Reference* ref) { AccessReference(ref, CodeGenState::LOAD); }
+ void SetValue(Reference* ref) { AccessReference(ref, CodeGenState::STORE); }
+ void InitConst(Reference* ref) {
+ AccessReference(ref, CodeGenState::INIT_CONST);
+ }
+
+ void ToBoolean(Register reg, Label* true_target, Label* false_target);
+
+
+ // Access property from the reference (must be at the TOS).
+ void AccessReferenceProperty(Expression* key,
+ CodeGenState::AccessType access);
+
+ void GenericOperation(Token::Value op);
+ void Comparison(Condition cc, bool strict = false);
+
+ void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
+
+ void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ virtual void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Instantiate the function boilerplate.
+ void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+ // Control flow
+ void Branch(bool if_true, Label* L);
+ void CheckStack();
+ void CleanStack(int num_bytes);
+
+ // Node visitors
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ void RecordStatementPosition(Node* node);
+
+ // Activation frames
+ void EnterJSFrame(int argc, RegList callee_saved); // preserves r1
+ void ExitJSFrame(RegList callee_saved,
+ ExitJSFlag flag = RETURN); // preserves r0-r2
+
+ virtual void GenerateShiftDownAndTailCall(ZoneList<Expression*>* args);
+ virtual void GenerateSetThisFunction(ZoneList<Expression*>* args);
+ virtual void GenerateGetThisFunction(ZoneList<Expression*>* args);
+ virtual void GenerateSetThis(ZoneList<Expression*>* args);
+ virtual void GenerateGetArgumentsLength(ZoneList<Expression*>* args);
+ virtual void GenerateSetArgumentsLength(ZoneList<Expression*>* args);
+ virtual void GenerateTailCallWithArguments(ZoneList<Expression*>* args);
+ virtual void GenerateSetArgument(ZoneList<Expression*>* args);
+ virtual void GenerateSquashFrame(ZoneList<Expression*>* args);
+ virtual void GenerateExpandFrame(ZoneList<Expression*>* args);
+ virtual void GenerateIsSmi(ZoneList<Expression*>* args);
+ virtual void GenerateIsArray(ZoneList<Expression*>* args);
+
+ virtual void GenerateArgumentsLength(ZoneList<Expression*>* args);
+ virtual void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+ virtual void GenerateValueOf(ZoneList<Expression*>* args);
+ virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
+};
+
+
+// -----------------------------------------------------------------------------
+// ArmCodeGenerator implementation
+
+#define __ masm_->
+
+
+Handle<Code> ArmCodeGenerator::MakeCode(FunctionLiteral* flit,
+ Handle<Script> script,
+ bool is_eval) {
+#ifdef DEBUG
+ bool print_source = false;
+ bool print_ast = false;
+ bool print_code = false;
+ const char* ftype;
+
+ if (Bootstrapper::IsActive()) {
+ print_source = FLAG_print_builtin_source;
+ print_ast = FLAG_print_builtin_ast;
+ print_code = FLAG_print_builtin_code;
+ ftype = "builtin";
+ } else {
+ print_source = FLAG_print_source;
+ print_ast = FLAG_print_ast;
+ print_code = FLAG_print_code;
+ ftype = "user-defined";
+ }
+
+ if (FLAG_trace_codegen || print_source || print_ast) {
+ PrintF("*** Generate code for %s function: ", ftype);
+ flit->name()->ShortPrint();
+ PrintF(" ***\n");
+ }
+
+ if (print_source) {
+ PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
+ }
+
+ if (print_ast) {
+ PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
+ }
+#endif // DEBUG
+
+ // Generate code.
+ const int initial_buffer_size = 4 * KB;
+ ArmCodeGenerator cgen(initial_buffer_size, script, is_eval);
+ cgen.GenCode(flit);
+ if (cgen.HasStackOverflow()) {
+ Top::StackOverflow();
+ return Handle<Code>::null();
+ }
+
+ // Process any deferred code.
+ cgen.ProcessDeferred();
+
+ // Allocate and install the code.
+ CodeDesc desc;
+ cgen.masm()->GetCode(&desc);
+ ScopeInfo<> sinfo(flit->scope());
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
+ Handle<Code> code = Factory::NewCode(desc, &sinfo, flags);
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, cgen.masm());
+
+#ifdef DEBUG
+ if (print_code) {
+ // Print the source code if available.
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(flit->start_position());
+ // flit->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len = flit->end_position() - flit->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
+ }
+ PrintF("--- Code ---\n");
+ code->Print();
+ }
+#endif // DEBUG
+
+ return code;
+}
+
+
+ArmCodeGenerator::ArmCodeGenerator(int buffer_size,
+ Handle<Script> script,
+ bool is_eval)
+ : CodeGenerator(is_eval, script),
+ masm_(new MacroAssembler(NULL, buffer_size)),
+ scope_(NULL),
+ cc_reg_(al),
+ state_(NULL),
+ break_stack_height_(0) {
+}
+
+
+// Calling conventions:
+
+// r0: always contains top-of-stack (TOS), but in case of a call it's
+// the number of arguments
+// fp: frame pointer
+// sp: stack pointer
+// pp: caller's parameter pointer
+// cp: callee's context
+
+void ArmCodeGenerator::GenCode(FunctionLiteral* fun) {
+ Scope* scope = fun->scope();
+ ZoneList<Statement*>* body = fun->body();
+
+ // Initialize state.
+ { CodeGenState state;
+ state_ = &state;
+ scope_ = scope;
+ cc_reg_ = al;
+ if (FLAG_optimize_locals) {
+ num_reg_locals_ = scope->num_stack_slots() < kNumJSCalleeSaved
+ ? scope->num_stack_slots()
+ : kNumJSCalleeSaved;
+ reg_locals_ = JSCalleeSavedList(num_reg_locals_);
+ } else {
+ num_reg_locals_ = 0;
+ reg_locals_ = 0;
+ }
+
+ // Entry
+ // stack: function, receiver, arguments, return address
+ // r0: number of arguments
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: caller's parameter pointer
+ // cp: callee's context
+
+ { Comment cmnt(masm_, "[ enter JS frame");
+ EnterJSFrame(scope->num_parameters(), reg_locals_);
+ }
+ // tos: code slot
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ bkpt(0); // not supported before v5, but illegal instruction works too
+ }
+#endif
+
+ // Allocate space for locals and initialize them.
+ if (scope->num_stack_slots() > num_reg_locals_) {
+ Comment cmnt(masm_, "[ allocate space for locals");
+ // Pushing the first local materializes the code slot on the stack
+ // (formerly stored in tos register r0).
+ __ Push(Operand(Factory::undefined_value()));
+ // The remaining locals are pushed using the fact that r0 (tos)
+ // already contains the undefined value.
+ for (int i = scope->num_stack_slots(); i-- > num_reg_locals_ + 1;) {
+ __ push(r0);
+ }
+ }
+ // Initialize locals allocated in registers
+ if (num_reg_locals_ > 0) {
+ if (scope->num_stack_slots() > num_reg_locals_) {
+ // r0 contains 'undefined'
+ __ mov(SlotRegister(0), Operand(r0));
+ } else {
+ __ mov(SlotRegister(0), Operand(Factory::undefined_value()));
+ }
+ for (int i = num_reg_locals_ - 1; i > 0; i--) {
+ __ mov(SlotRegister(i), Operand(SlotRegister(0)));
+ }
+ }
+
+ if (scope->num_heap_slots() > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ Push(FunctionOperand());
+ __ CallRuntime(Runtime::kNewContext, 2);
+ // Update context local.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+
+ // TODO(1241774): Improve this code!!!
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope->num_parameters(); i++) {
+ Variable* par = scope->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ ASSERT(!scope->is_global_scope()); // no parameters in global scope
+ int parameter_offset =
+ JavaScriptFrameConstants::kParam0Offset - i * kPointerSize;
+ __ ldr(r1, MemOperand(pp, parameter_offset));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(slot_offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ }
+ }
+
+ // Store the arguments object.
+ // This must happen after context initialization because
+ // the arguments array may be stored in the context!
+ if (scope->arguments() != NULL) {
+ ASSERT(scope->arguments_shadow() != NULL);
+ Comment cmnt(masm_, "[ allocate arguments object");
+ {
+ Reference target(this, scope->arguments());
+ __ Push(FunctionOperand());
+ __ CallRuntime(Runtime::kNewArguments, 1);
+ SetValue(&target);
+ }
+ // The value of arguments must also be stored in .arguments.
+ // TODO(1241813): This code can probably be improved by fusing it with
+ // the code that stores the arguments object above.
+ {
+ Reference target(this, scope->arguments_shadow());
+ Load(scope->arguments());
+ SetValue(&target);
+ }
+ }
+
+ // Generate code to 'execute' declarations and initialize
+ // functions (source elements). In case of an illegal
+ // redeclaration we need to handle that instead of processing the
+ // declarations.
+ if (scope->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ illegal redeclarations");
+ scope->VisitIllegalRedeclaration(this);
+ } else {
+ Comment cmnt(masm_, "[ declarations");
+ ProcessDeclarations(scope->declarations());
+ }
+
+ if (FLAG_trace) __ CallRuntime(Runtime::kTraceEnter, 1);
+ CheckStack();
+
+ // Compile the body of the function in a vanilla state. Don't
+ // bother compiling all the code if the scope has an illegal
+ // redeclaration.
+ if (!scope->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+ bool is_builtin = Bootstrapper::IsActive();
+ bool should_trace =
+ is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+ if (should_trace) __ CallRuntime(Runtime::kDebugTrace, 1);
+#endif
+ VisitStatements(body);
+ }
+
+ state_ = NULL;
+ }
+
+ // exit
+ // r0: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: parameter pointer
+ // cp: callee's context
+ __ Push(Operand(Factory::undefined_value()));
+ __ bind(&function_return_);
+ if (FLAG_trace) __ CallRuntime(Runtime::kTraceExit, 1);
+ ExitJSFrame(reg_locals_);
+
+ // Code generation state must be reset.
+ scope_ = NULL;
+ ASSERT(!has_cc());
+ ASSERT(state_ == NULL);
+}
+
+
+Register ArmCodeGenerator::SlotRegister(int slot_index) {
+ Register reg;
+ reg.code_ = JSCalleeSavedCode(slot_index);
+ return reg;
+}
+
+
+MemOperand ArmCodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return ParameterOperand(index);
+
+ case Slot::LOCAL: {
+ ASSERT(0 <= index &&
+ index < scope_->num_stack_slots() &&
+ index >= num_reg_locals_);
+ int local_offset = JavaScriptFrameConstants::kLocal0Offset -
+ (index - num_reg_locals_) * kPointerSize;
+ return MemOperand(fp, local_offset);
+ }
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(cp)); // do not overwrite context register
+ Register context = cp;
+ int chain_length = scope_->ContextChainLength(slot->var()->scope());
+ for (int i = chain_length; i-- > 0;) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return MemOperand(r0, 0);
+ }
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition code
+// register. If force_cc is set, the value is forced to set the condition code
+// register and no value is pushed. If the condition code register was set,
+// has_cc() is true and cc_reg_ contains the condition to test for 'true'.
+void ArmCodeGenerator::LoadCondition(Expression* x,
+ CodeGenState::AccessType access,
+ Label* true_target,
+ Label* false_target,
+ bool force_cc) {
+ ASSERT(access == CodeGenState::LOAD ||
+ access == CodeGenState::LOAD_TYPEOF_EXPR);
+ ASSERT(!has_cc() && !is_referenced());
+
+ CodeGenState* old_state = state_;
+ CodeGenState new_state(access, NULL, true_target, false_target);
+ state_ = &new_state;
+ Visit(x);
+ state_ = old_state;
+ if (force_cc && !has_cc()) {
+ // Pop the TOS from the stack and convert it to a boolean in the
+ // condition code register.
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ ToBoolean(r1, true_target, false_target);
+ }
+ ASSERT(has_cc() || !force_cc);
+}
+
+
+void ArmCodeGenerator::Load(Expression* x, CodeGenState::AccessType access) {
+ ASSERT(access == CodeGenState::LOAD ||
+ access == CodeGenState::LOAD_TYPEOF_EXPR);
+
+ Label true_target;
+ Label false_target;
+ LoadCondition(x, access, &true_target, &false_target, false);
+
+ if (has_cc()) {
+ // convert cc_reg_ into a bool
+ Label loaded, materialize_true;
+ __ b(cc_reg_, &materialize_true);
+ __ Push(Operand(Factory::false_value()));
+ __ b(&loaded);
+ __ bind(&materialize_true);
+ __ Push(Operand(Factory::true_value()));
+ __ bind(&loaded);
+ cc_reg_ = al;
+ }
+
+ if (true_target.is_linked() || false_target.is_linked()) {
+ // we have at least one condition value
+ // that has been "translated" into a branch,
+ // thus it needs to be loaded explicitly again
+ Label loaded;
+ __ b(&loaded); // don't lose current TOS
+ bool both = true_target.is_linked() && false_target.is_linked();
+ // reincarnate "true", if necessary
+ if (true_target.is_linked()) {
+ __ bind(&true_target);
+ __ Push(Operand(Factory::true_value()));
+ }
+ // if both "true" and "false" need to be reincarnated,
+ // jump across code for "false"
+ if (both)
+ __ b(&loaded);
+ // reincarnate "false", if necessary
+ if (false_target.is_linked()) {
+ __ bind(&false_target);
+ __ Push(Operand(Factory::false_value()));
+ }
+ // everything is loaded at this point
+ __ bind(&loaded);
+ }
+ ASSERT(!has_cc());
+}
+
+
+void ArmCodeGenerator::LoadGlobal() {
+ __ Push(GlobalObject());
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the LOAD_TYPEOF_EXPR access type. => Need to handle
+// global variables w/o reference errors elsewhere.
+void ArmCodeGenerator::LoadTypeofExpression(Expression* x) {
+ Variable* variable = x->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // NOTE: This is somewhat nasty. We force the compiler to load
+ // the variable as if through '<global>.<variable>' to make sure we
+ // do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ // TODO(1241834): Fetch the position from the variable instead of using
+ // no position.
+ Property property(&global, &key, kNoPosition);
+ Load(&property);
+ } else {
+ Load(x, CodeGenState::LOAD_TYPEOF_EXPR);
+ }
+}
+
+
+Reference::Reference(ArmCodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ cgen_->UnloadReference(this);
+}
+
+
+void ArmCodeGenerator::LoadReference(Reference* ref) {
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ Load(property->obj());
+ // Used a named reference if the key is a literal symbol.
+ // We don't use a named reference if they key is a string that can be
+ // legally parsed as an integer. This is because, otherwise we don't
+ // get into the slow case code that handles [] on String objects.
+ Literal* literal = property->key()->AsLiteral();
+ uint32_t dummy;
+ if (literal != NULL && literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ if (var->is_global()) {
+ // global variable
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ // local variable
+ ref->set_type(Reference::EMPTY);
+ }
+ } else {
+ Load(e);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+}
+
+
+void ArmCodeGenerator::UnloadReference(Reference* ref) {
+ int size = ref->size();
+ if (size <= 0) {
+ // Do nothing. No popping is necessary.
+ } else {
+ __ add(sp, sp, Operand(size * kPointerSize));
+ }
+}
+
+
+void ArmCodeGenerator::AccessReference(Reference* ref,
+ CodeGenState::AccessType access) {
+ ASSERT(!has_cc());
+ ASSERT(ref->type() != Reference::ILLEGAL);
+ CodeGenState* old_state = state_;
+ CodeGenState new_state(access, ref, true_target(), false_target());
+ state_ = &new_state;
+ Visit(ref->expression());
+ state_ = old_state;
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
+// register to a boolean in the condition code register. The code
+// may jump to 'false_target' in case the register converts to 'false'.
+void ArmCodeGenerator::ToBoolean(Register reg,
+ Label* true_target,
+ Label* false_target) {
+ // Note: The generated code snippet cannot change 'reg'.
+ // Only the condition code should be set.
+
+ // Fast case checks
+
+ // Check if reg is 'false'.
+ __ cmp(reg, Operand(Factory::false_value()));
+ __ b(eq, false_target);
+
+ // Check if reg is 'true'.
+ __ cmp(reg, Operand(Factory::true_value()));
+ __ b(eq, true_target);
+
+ // Check if reg is 'undefined'.
+ __ cmp(reg, Operand(Factory::undefined_value()));
+ __ b(eq, false_target);
+
+ // Check if reg is a smi.
+ __ cmp(reg, Operand(Smi::FromInt(0)));
+ __ b(eq, false_target);
+ __ tst(reg, Operand(kSmiTagMask));
+ __ b(eq, true_target);
+
+ // Slow case: call the runtime.
+ __ push(r0);
+ if (r0.is(reg)) {
+ __ CallRuntime(Runtime::kToBool, 1);
+ } else {
+ __ mov(r0, Operand(reg));
+ __ CallRuntime(Runtime::kToBool, 1);
+ }
+ // Convert result (r0) to condition code
+ __ cmp(r0, Operand(Factory::false_value()));
+ __ pop(r0);
+
+ cc_reg_ = ne;
+}
+
+
+#undef __
+#define __ masm->
+
+
+class GetPropertyStub : public CodeStub {
+ public:
+ GetPropertyStub() { }
+
+ private:
+ Major MajorKey() { return GetProperty; }
+ int MinorKey() { return 0; }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "GetPropertyStub"; }
+};
+
+
+void GetPropertyStub::Generate(MacroAssembler* masm) {
+ Label slow, fast;
+ // Get the object from the stack.
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // 1 ~ key
+ // Check that the key is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ // Check that the object isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow);
+ // Check that the object is some kind of JS object.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_OBJECT_TYPE));
+ __ b(lt, &slow);
+
+ // Check if the object is a value-wrapper object. In that case we
+ // enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ __ cmp(r2, Operand(JS_VALUE_TYPE));
+ __ b(eq, &slow);
+
+ // Get the elements array of the object.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Factory::hash_table_map()));
+ __ b(eq, &slow);
+ // Check that the key (index) is within bounds.
+ __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
+ __ cmp(r0, Operand(r3));
+ __ b(lo, &fast);
+
+ // Slow case: Push extra copies of the arguments (2).
+ __ bind(&slow);
+ __ ldm(ia, sp, r0.bit() | r1.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit());
+ // Do tail-call to runtime routine.
+ __ mov(r0, Operand(1)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(Runtime::kGetProperty));
+
+ // Fast case: Do the load.
+ __ bind(&fast);
+ __ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+ __ cmp(r0, Operand(Factory::the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ b(eq, &slow);
+
+ masm->StubReturn(1);
+}
+
+
+class SetPropertyStub : public CodeStub {
+ public:
+ SetPropertyStub() { }
+
+ private:
+ Major MajorKey() { return SetProperty; }
+ int MinorKey() { return 0; }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "GetPropertyStub"; }
+};
+
+
+void SetPropertyStub::Generate(MacroAssembler* masm) {
+ Label slow, fast, array, extra, exit;
+ // Get the key and the object from the stack.
+ __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
+ // Check that the key is a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that the object isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &slow);
+ // Get the type of the object from its map.
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ // Check if the object is a JS array or not.
+ __ cmp(r2, Operand(JS_ARRAY_TYPE));
+ __ b(eq, &array);
+ // Check that the object is some kind of JS object.
+ __ cmp(r2, Operand(JS_OBJECT_TYPE));
+ __ b(lt, &slow);
+
+
+ // Object case: Check key against length in the elements array.
+ __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(Factory::hash_table_map()));
+ __ b(eq, &slow);
+ // Untag the key (for checking against untagged length in the fixed array).
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+ // Compute address to store into and check array bounds.
+ __ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
+ __ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(lo, &fast);
+
+
+ // Slow case: Push extra copies of the arguments (3).
+ // r0 == value
+ __ bind(&slow);
+ __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
+ // Do tail-call to runtime routine.
+ __ mov(r0, Operand(2)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(Runtime::kSetProperty));
+
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ // r0 == value, r1 == key, r2 == elements, r3 == object
+ __ bind(&extra);
+ __ b(ne, &slow); // do not leave holes in the array
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag
+ __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(hs, &slow);
+ __ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag
+ __ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment
+ __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ mov(r3, Operand(r2));
+ // NOTE: Computing the address to store into must take the fact
+ // that the key has been incremented into account.
+ int displacement = Array::kHeaderSize - kHeapObjectTag -
+ ((1 << kSmiTagSize) * 2);
+ __ add(r2, r2, Operand(displacement));
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode; if it is the
+ // length is always a smi.
+ // r0 == value, r3 == object
+ __ bind(&array);
+ __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
+ __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::hash_table_map()));
+ __ b(eq, &slow);
+
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
+ __ ldr(r1, MemOperand(sp));
+ // r0 == value, r1 == key, r2 == elements, r3 == object.
+ __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(hs, &extra);
+ __ mov(r3, Operand(r2));
+ __ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+
+ // Fast case: Do the store.
+ // r0 == value, r2 == address to store into, r3 == elements
+ __ bind(&fast);
+ __ str(r0, MemOperand(r2));
+ // Skip write barrier if the written value is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ // Update write barrier for the elements array address.
+ __ sub(r1, r2, Operand(r3));
+ __ RecordWrite(r3, r1, r2);
+ __ bind(&exit);
+ masm->StubReturn(1);
+}
+
+
+void GenericOpStub::Generate(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD: {
+ Label slow, exit;
+ // fast path
+ // Get x (y is on TOS, i.e., r0).
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ orr(r2, r1, Operand(r0)); // r2 = x | y;
+ __ add(r0, r1, Operand(r0), SetCC); // add y optimistically
+ // go slow-path in case of overflow
+ __ b(vs, &slow);
+ // go slow-path in case of non-smi operands
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ // slow path
+ __ bind(&slow);
+ __ sub(r0, r0, Operand(r1)); // revert optimistic add
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("ADD", 1, JUMP_JS);
+ // done
+ __ bind(&exit);
+ break;
+ }
+
+ case Token::SUB: {
+ Label slow, exit;
+ // fast path
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // get x
+ __ orr(r2, r1, Operand(r0)); // r2 = x | y;
+ __ sub(r3, r1, Operand(r0), SetCC); // subtract y optimistically
+ // go slow-path in case of overflow
+ __ b(vs, &slow);
+ // go slow-path in case of non-smi operands
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ mov(r0, Operand(r3), LeaveCC, eq); // conditionally set r0 to result
+ __ b(eq, &exit);
+ // slow path
+ __ bind(&slow);
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("SUB", 1, JUMP_JS);
+ // done
+ __ bind(&exit);
+ break;
+ }
+
+ case Token::MUL: {
+ Label slow, exit;
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // get x
+ // tag check
+ __ orr(r2, r1, Operand(r0)); // r2 = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ // remove tag from one operand (but keep sign), so that result is smi
+ __ mov(ip, Operand(r0, ASR, kSmiTagSize));
+ // do multiplication
+ __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1
+ // go slow on overflows (overflow bit is not set)
+ __ mov(ip, Operand(r3, ASR, 31));
+ __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
+ __ b(ne, &slow);
+ // go slow on zero result to handle -0
+ __ tst(r3, Operand(r3));
+ __ mov(r0, Operand(r3), LeaveCC, ne);
+ __ b(ne, &exit);
+ // slow case
+ __ bind(&slow);
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("MUL", 1, JUMP_JS);
+ // done
+ __ bind(&exit);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ masm->StubReturn(2);
+}
+
+
+class SmiOpStub : public CodeStub {
+ public:
+ SmiOpStub(Token::Value op, bool reversed)
+ : op_(op), reversed_(reversed) {}
+
+ private:
+ Token::Value op_;
+ bool reversed_;
+
+ Major MajorKey() { return SmiOp; }
+ int MinorKey() {
+ return (op_ == Token::ADD ? 2 : 0) | (reversed_ ? 1 : 0);
+ }
+ void Generate(MacroAssembler* masm);
+ void GenerateShared(MacroAssembler* masm);
+
+ const char* GetName() { return "SmiOpStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("SmiOpStub (token %s), (reversed %s)\n",
+ Token::String(op_), reversed_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void SmiOpStub::Generate(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD: {
+ if (!reversed_) {
+ __ sub(r0, r0, Operand(r1)); // revert optimistic add
+ __ push(r0);
+ __ push(r1);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("ADD", 1, JUMP_JS);
+ } else {
+ __ sub(r0, r0, Operand(r1)); // revert optimistic add
+ __ push(r1); // reversed
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("ADD", 1, JUMP_JS);
+ }
+ break;
+ }
+ case Token::SUB: {
+ if (!reversed_) {
+ __ push(r0);
+ __ push(r1);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("SUB", 1, JUMP_JS);
+ } else {
+ __ push(r1);
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("SUB", 1, JUMP_JS);
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+}
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ Label within_limit;
+ __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
+ __ ldr(ip, MemOperand(ip));
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &within_limit);
+ // Do tail-call to runtime routine.
+ __ push(r0);
+ __ mov(r0, Operand(0)); // not counting receiver (i.e. flushed TOS)
+ __ JumpToBuiltin(ExternalReference(Runtime::kStackGuard));
+ __ bind(&within_limit);
+
+ masm->StubReturn(1);
+}
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+ Label undo;
+ Label slow;
+ Label done;
+
+ // Enter runtime system if the value is not a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+
+ // Enter runtime system if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &slow);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r1, r0, Operand(0), SetCC);
+ __ b(vs, &slow);
+
+ // If result is a smi we are done.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ mov(r0, Operand(r1), LeaveCC, eq); // conditionally set r0 to result
+ __ b(eq, &done);
+
+ // Enter runtime system.
+ __ bind(&slow);
+ __ push(r0);
+ __ mov(r0, Operand(0)); // set number of arguments
+ __ InvokeBuiltin("UNARY_MINUS", 0, JUMP_JS);
+
+ __ bind(&done);
+ masm->StubReturn(1);
+}
+
+
+class InvokeBuiltinStub : public CodeStub {
+ public:
+ enum Kind { Inc, Dec, ToNumber };
+ InvokeBuiltinStub(Kind kind, int argc) : kind_(kind), argc_(argc) { }
+
+ private:
+ Kind kind_;
+ int argc_;
+
+ Major MajorKey() { return InvokeBuiltin; }
+ int MinorKey() { return (argc_ << 3) | static_cast<int>(kind_); }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "InvokeBuiltinStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("InvokeBuiltinStub (kind %d, argc, %d)\n",
+ static_cast<int>(kind_),
+ argc_);
+ }
+#endif
+};
+
+
+void InvokeBuiltinStub::Generate(MacroAssembler* masm) {
+ __ push(r0);
+ __ mov(r0, Operand(0)); // set number of arguments
+ switch (kind_) {
+ case ToNumber: __ InvokeBuiltin("TO_NUMBER", 0, JUMP_JS); break;
+ case Inc: __ InvokeBuiltin("INC", 0, JUMP_JS); break;
+ case Dec: __ InvokeBuiltin("DEC", 0, JUMP_JS); break;
+ default: UNREACHABLE();
+ }
+ masm->StubReturn(argc_);
+}
+
+
+class JSExitStub : public CodeStub {
+ public:
+ enum Kind { Inc, Dec, ToNumber };
+
+ JSExitStub(int num_callee_saved, RegList callee_saved, ExitJSFlag flag)
+ : num_callee_saved_(num_callee_saved),
+ callee_saved_(callee_saved),
+ flag_(flag) { }
+
+ private:
+ int num_callee_saved_;
+ RegList callee_saved_;
+ ExitJSFlag flag_;
+
+ Major MajorKey() { return JSExit; }
+ int MinorKey() { return (num_callee_saved_ << 3) | static_cast<int>(flag_); }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "JSExitStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("JSExitStub (num_callee_saved %d, flag %d)\n",
+ num_callee_saved_,
+ static_cast<int>(flag_));
+ }
+#endif
+};
+
+
+void JSExitStub::Generate(MacroAssembler* masm) {
+ __ ExitJSFrame(flag_, callee_saved_);
+ masm->StubReturn(1);
+}
+
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // r0 holds exception
+ ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ if (FLAG_optimize_locals) {
+ // Locals are allocated in callee-saved registers, so we need to restore
+ // saved callee-saved registers by unwinding the stack
+ static JSCalleeSavedBuffer regs;
+ intptr_t arg0 = reinterpret_cast<intptr_t>(®s);
+ __ push(r0);
+ __ mov(r0, Operand(arg0)); // exception in r0 (TOS) is pushed, r0 == arg0
+ // Do not push a second C entry frame, but call directly
+ __ Call(FUNCTION_ADDR(StackFrameIterator::RestoreCalleeSavedForTopHandler),
+ runtime_entry); // passing r0
+ // Frame::RestoreJSCalleeSaved returns arg0 (TOS)
+ __ mov(r1, Operand(r0));
+ __ pop(r0); // r1 holds arg0, r0 holds exception
+ __ ldm(ia, r1, kJSCalleeSaved); // restore callee-saved registers
+ }
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+ __ pop(r2); // pop next in chain
+ __ str(r2, MemOperand(r3));
+ // restore parameter- and frame-pointer and pop state.
+ __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit());
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ cmp(fp, Operand(0));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+ if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
+ // Fetch top stack handler.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(r3, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
+ StackHandlerConstants::kStateOffset;
+ __ ldr(r2, MemOperand(r3, kStateOffset));
+ __ cmp(r2, Operand(StackHandler::ENTRY));
+ __ b(eq, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kAddressDisplacement +
+ StackHandlerConstants::kNextOffset;
+ __ ldr(r3, MemOperand(r3, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ ldr(r0, MemOperand(r3, kNextOffset));
+ __ mov(r2, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r0, MemOperand(r2));
+
+ // Set external caught exception to false.
+ __ mov(r0, Operand(false));
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and TOS to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(r2));
+
+ // Restore the stack to the address of the ENTRY handler
+ __ mov(sp, Operand(r3));
+
+ // restore parameter- and frame-pointer and pop state.
+ __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit());
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ cmp(fp, Operand(0));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+ if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool do_restore) {
+ // r0: result parameter for PerformGC, if any
+ // r4: number of arguments (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+
+ if (do_gc) {
+ __ Call(FUNCTION_ADDR(Runtime::PerformGC), runtime_entry); // passing r0
+ }
+
+ // call C built-in
+ __ mov(r0, Operand(r4)); // a0 = argc
+ __ add(r1, fp, Operand(r4, LSL, kPointerSizeLog2));
+ __ add(r1, r1, Operand(ExitFrameConstants::kPPDisplacement)); // a1 = argv
+
+ // TODO(1242173): To let the GC traverse the return address of the exit
+ // frames, we need to know where the return address is. Right now,
+ // we push it on the stack to be able to find it again, but we never
+ // restore from it in case of changes, which makes it impossible to
+ // support moving the C entry code stub. This should be fixed, but currently
+ // this is OK because the CEntryStub gets generated so early in the V8 boot
+ // sequence that it is not moving ever.
+ __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
+ __ push(lr);
+#if !defined(__arm__)
+ // Notify the simulator of the transition to C code.
+ __ swi(assembler::arm::call_rt_r5);
+#else /* !defined(__arm__) */
+ __ mov(pc, Operand(r5));
+#endif /* !defined(__arm__) */
+ // result is in r0 or r0:r1 - do not destroy these registers!
+
+ // check for failure result
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ // Lower 2 bits of r2 are 0 iff r0 has failure tag.
+ __ add(r2, r0, Operand(1));
+ __ tst(r2, Operand(kFailureTagMask));
+ __ b(eq, &failure_returned);
+
+ // clear top frame
+ __ mov(r3, Operand(0));
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ str(r3, MemOperand(ip));
+
+ // Restore the memory copy of the registers by digging them out from
+ // the stack.
+ if (do_restore) {
+ // Ok to clobber r2 and r3.
+ const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+ const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ __ add(r3, fp, Operand(kOffset));
+ __ CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
+ }
+
+ // Exit C frame and return
+ // r0:r1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: caller's parameter pointer pp (restored as C callee-saved)
+
+ // Restore current context from top and clear it in debug mode.
+ __ mov(r3, Operand(Top::context_address()));
+ __ ldr(cp, MemOperand(r3));
+ __ mov(sp, Operand(fp)); // respect ABI stack constraint
+ __ ldm(ia, sp, kJSCalleeSaved | pp.bit() | fp.bit() | sp.bit() | pc.bit());
+
+ // check if we should retry or throw exception
+ Label retry;
+ __ bind(&failure_returned);
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ b(eq, &retry);
+
+ Label continue_exception;
+ // If the returned failure is EXCEPTION then promote Top::pending_exception().
+ __ cmp(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(ne, &continue_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ __ mov(ip, Operand(Factory::the_hole_value().location()));
+ __ ldr(r3, MemOperand(ip));
+ __ mov(ip, Operand(Top::pending_exception_address()));
+ __ ldr(r0, MemOperand(ip));
+ __ str(r3, MemOperand(ip));
+
+ __ bind(&continue_exception);
+ // Special handling of out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ b(eq, throw_out_of_memory_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // r0: number of arguments
+ // r1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's pp after C call)
+ // cp: current context (C callee-saved)
+ // pp: caller's parameter pointer pp (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter C frame
+ // Compute parameter pointer before making changes and save it as ip register
+ // so that it is restored as sp register on exit, thereby popping the args.
+ // ip = sp + kPointerSize*(args_len+1); // +1 for receiver
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ add(ip, ip, Operand(kPointerSize));
+
+ // all JS callee-saved are saved and traversed by GC; push in reverse order:
+ // JS callee-saved, caller_pp, caller_fp, sp_on_exit (ip==pp), caller_pc
+ __ stm(db_w, sp, kJSCalleeSaved | pp.bit() | fp.bit() | ip.bit() | lr.bit());
+ __ mov(fp, Operand(sp)); // setup new frame pointer
+
+ // Store the current context in top.
+ __ mov(ip, Operand(Top::context_address()));
+ __ str(cp, MemOperand(ip));
+
+ // remember top frame
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ str(fp, MemOperand(ip));
+
+ // Push debug marker.
+ __ mov(ip, Operand(is_debug_break ? 1 : 0));
+ __ push(ip);
+
+ if (is_debug_break) {
+ // Save the state of all registers to the stack from the memory location.
+ // Use sp as base to push.
+ __ CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
+ }
+
+ // move number of arguments (argc) into callee-saved register
+ __ mov(r4, Operand(r0));
+
+ // move pointer to builtin function into callee-saved register
+ __ mov(r5, Operand(r1));
+
+ // r0: result parameter for PerformGC, if any (setup below)
+ // r4: number of arguments
+ // r5: pointer to builtin function (C callee-saved)
+
+ Label entry;
+ __ bind(&entry);
+
+ Label throw_out_of_memory_exception;
+ Label throw_normal_exception;
+
+#ifdef DEBUG
+ if (FLAG_gc_greedy) {
+ Failure* failure = Failure::RetryAfterGC(0, NEW_SPACE);
+ __ mov(r0, Operand(reinterpret_cast<intptr_t>(failure)));
+ }
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ FLAG_gc_greedy,
+ is_debug_break);
+#else
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ false,
+ is_debug_break);
+#endif
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ true,
+ is_debug_break);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowOutOfMemory(masm);
+ // control flow for generated will not return.
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // [sp+0]: argv
+
+ Label invoke, exit;
+
+ // Called from C, so do not pop argc and args on exit (preserve sp)
+ // No need to save register-passed args
+ // Save callee-saved registers (incl. cp, pp, and fp), sp, and lr
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, kCalleeSaved | ip.bit() | lr.bit());
+
+ // Setup frame pointer
+ __ mov(fp, Operand(sp));
+
+ // Add constructor mark.
+ __ mov(ip, Operand(is_construct ? 1 : 0));
+ __ push(ip);
+
+ // Move arguments into registers expected by Builtins::JSEntryTrampoline
+ // preserve r0-r3, set r4, r5-r7 may be clobbered
+
+ // Get address of argv, see stm above.
+ __ add(r4, sp, Operand((kNumCalleeSaved + 3)*kPointerSize));
+ __ ldr(r4, MemOperand(r4)); // argv
+
+ // Save copies of the top frame descriptors on the stack.
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ ldr(r6, MemOperand(ip));
+ __ stm(db_w, sp, r6.bit());
+
+ // Call a faked try-block that does the invoke.
+ __ bl(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ __ mov(ip, Operand(Top::pending_exception_address()));
+ __ str(r0, MemOperand(ip));
+ __ mov(r0, Operand(Handle<Failure>(Failure::Exception())));
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r0-r3, r5-r7 are available.
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler returns
+ // control to the code after the bl(&invoke) above, which restores all
+ // kCalleeSaved registers (including cp, pp and fp) to their saved values
+ // before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r5, MemOperand(ip));
+ __ mov(ip, Operand(Top::pending_exception_address()));
+ __ str(r5, MemOperand(ip));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(ip, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(ip, Operand(entry));
+ }
+ __ ldr(ip, MemOperand(ip)); // deref address
+
+ // Branch and link to JSEntryTrampoline
+ __ mov(lr, Operand(pc));
+ __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Unlink this frame from the handler chain. When reading the
+ // address of the next handler, there is no need to use the address
+ // displacement since the current stack pointer (sp) points directly
+ // to the stack handler.
+ __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r3, MemOperand(ip));
+ // No need to restore registers
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize));
+
+ __ bind(&exit); // r0 holds result
+ // Restore the top frame descriptors from the stack.
+ __ ldm(ia_w, sp, r3.bit());
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ str(r3, MemOperand(ip));
+
+ // Remove constructor mark.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ // Restore callee-saved registers, sp, and return.
+#ifdef DEBUG
+ if (FLAG_debug_code) __ mov(lr, Operand(pc));
+#endif
+ __ ldm(ia, sp, kCalleeSaved | sp.bit() | pc.bit());
+}
+
+
+class ArgumentsAccessStub: public CodeStub {
+ public:
+ explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { }
+
+ private:
+ bool is_length_;
+
+ Major MajorKey() { return ArgumentsAccess; }
+ int MinorKey() { return is_length_ ? 1 : 0; }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ArgumentsAccessStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("ArgumentsAccessStub (is_length %s)\n",
+ is_length_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+ if (is_length_) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Ret();
+ } else {
+ // Check that the key is a smi.
+ Label slow;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+
+ // Get the actual number of arguments passed and do bounds
+ // check. Use unsigned comparison to get negative check for free.
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
+ __ cmp(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(hs, &slow);
+
+ // Load the argument directly from the stack and return.
+ __ sub(r1, pp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r1, JavaScriptFrameConstants::kParam0Offset));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(Runtime::kGetArgumentsProperty));
+ }
+}
+
+
+#undef __
+#define __ masm_->
+
+
+void ArmCodeGenerator::AccessReferenceProperty(
+ Expression* key,
+ CodeGenState::AccessType access) {
+ Reference::Type type = ref()->type();
+ ASSERT(type != Reference::ILLEGAL);
+
+ // TODO(1241834): Make sure that this is sufficient. If there is a chance
+ // that reference errors can be thrown below, we must distinguish
+ // between the 2 kinds of loads (typeof expression loads must not
+ // throw a reference errror).
+ bool is_load = (access == CodeGenState::LOAD ||
+ access == CodeGenState::LOAD_TYPEOF_EXPR);
+
+ if (type == Reference::NAMED) {
+ // Compute the name of the property.
+ Literal* literal = key->AsLiteral();
+ Handle<String> name(String::cast(*literal->handle()));
+
+ // Loading adds a value to the stack; push the TOS to prepare.
+ if (is_load) __ push(r0);
+
+ // Setup the name register.
+ __ mov(r2, Operand(name));
+
+ // Call the appropriate IC code.
+ if (is_load) {
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Variable* var = ref()->expression()->AsVariableProxy()->AsVariable();
+ if (var != NULL) {
+ ASSERT(var->is_global());
+ __ Call(ic, code_target_context);
+ } else {
+ __ Call(ic, code_target);
+ }
+ } else {
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, code_target);
+ }
+ return;
+ }
+
+ // Access keyed property.
+ ASSERT(type == Reference::KEYED);
+
+ if (is_load) {
+ __ push(r0); // empty tos
+ // TODO(1224671): Implement inline caching for keyed loads as on ia32.
+ GetPropertyStub stub;
+ __ CallStub(&stub);
+ } else {
+ SetPropertyStub stub;
+ __ CallStub(&stub);
+ }
+}
+
+
+void ArmCodeGenerator::GenericOperation(Token::Value op) {
+ // Stub is entered with a call: 'return address' is in lr.
+ switch (op) {
+ case Token::ADD: // fall through.
+ case Token::SUB: // fall through.
+ case Token::MUL: {
+ GenericOpStub stub(op);
+ __ CallStub(&stub);
+ break;
+ }
+
+ case Token::DIV: {
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("DIV", 1, CALL_JS);
+ break;
+ }
+
+ case Token::MOD: {
+ __ push(r0);
+ __ mov(r0, Operand(1)); // set number of arguments
+ __ InvokeBuiltin("MOD", 1, CALL_JS);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR: {
+ Label slow, exit;
+ __ pop(r1); // get x
+ // tag check
+ __ orr(r2, r1, Operand(r0)); // r2 = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ switch (op) {
+ case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
+ case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
+ case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
+ default: UNREACHABLE();
+ }
+ __ b(&exit);
+ __ bind(&slow);
+ __ push(r1); // restore stack
+ __ push(r0);
+ __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
+ switch (op) {
+ case Token::BIT_OR: __ InvokeBuiltin("BIT_OR", 1, CALL_JS); break;
+ case Token::BIT_AND: __ InvokeBuiltin("BIT_AND", 1, CALL_JS); break;
+ case Token::BIT_XOR: __ InvokeBuiltin("BIT_XOR", 1, CALL_JS); break;
+ default: UNREACHABLE();
+ }
+ __ bind(&exit);
+ break;
+ }
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ Label slow, exit;
+ __ mov(r1, Operand(r0)); // get y
+ __ pop(r0); // get x
+ // tag check
+ __ orr(r2, r1, Operand(r0)); // r2 = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ // get copies of operands
+ __ mov(r3, Operand(r0));
+ __ mov(r2, Operand(r1));
+ // remove tags from operands (but keep sign)
+ __ mov(r3, Operand(r3, ASR, kSmiTagSize));
+ __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+ // use only the 5 least significant bits of the shift count
+ __ and_(r2, r2, Operand(0x1f));
+ // perform operation
+ switch (op) {
+ case Token::SAR:
+ __ mov(r3, Operand(r3, ASR, r2));
+ // no checks of result necessary
+ break;
+
+ case Token::SHR:
+ __ mov(r3, Operand(r3, LSR, r2));
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ and_(r2, r3, Operand(0xc0000000), SetCC);
+ __ b(ne, &slow);
+ break;
+
+ case Token::SHL:
+ __ mov(r3, Operand(r3, LSL, r2));
+ // check that the *signed* result fits in a smi
+ __ add(r2, r3, Operand(0x40000000), SetCC);
+ __ b(mi, &slow);
+ break;
+
+ default: UNREACHABLE();
+ }
+ // tag result and store it in TOS (r0)
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ b(&exit);
+ // slow case
+ __ bind(&slow);
+ __ push(r0); // restore stack
+ __ mov(r0, Operand(r1));
+ __ Push(Operand(1)); // 1 argument (not counting receiver).
+ switch (op) {
+ case Token::SAR: __ InvokeBuiltin("SAR", 1, CALL_JS); break;
+ case Token::SHR: __ InvokeBuiltin("SHR", 1, CALL_JS); break;
+ case Token::SHL: __ InvokeBuiltin("SHL", 1, CALL_JS); break;
+ default: UNREACHABLE();
+ }
+ __ bind(&exit);
+ break;
+ }
+
+ case Token::COMMA:
+ // simply discard left value
+ __ add(sp, sp, Operand(kPointerSize));
+ break;
+
+ default:
+ // Other cases should have been handled before this point.
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+
+
+void ArmCodeGenerator::SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed) {
+ // NOTE: This is an attempt to inline (a bit) more of the code for
+ // some possible smi operations (like + and -) when (at least) one
+ // of the operands is a literal smi. With this optimization, the
+ // performance of the system is increased by ~15%, and the generated
+ // code size is increased by ~1% (measured on a combination of
+ // different benchmarks).
+
+ ASSERT(value->IsSmi());
+
+ Label exit;
+
+ switch (op) {
+ case Token::ADD: {
+ Label slow;
+
+ __ mov(r1, Operand(value));
+ __ add(r0, r0, Operand(r1), SetCC);
+ __ b(vs, &slow);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ __ bind(&slow);
+
+ SmiOpStub stub(Token::ADD, reversed);
+ __ CallStub(&stub);
+ break;
+ }
+
+ case Token::SUB: {
+ Label slow;
+
+ __ mov(r1, Operand(value));
+ if (!reversed) {
+ __ sub(r2, r0, Operand(r1), SetCC);
+ } else {
+ __ rsb(r2, r0, Operand(r1), SetCC);
+ }
+ __ b(vs, &slow);
+ __ tst(r2, Operand(kSmiTagMask));
+ __ mov(r0, Operand(r2), LeaveCC, eq); // conditionally set r0 to result
+ __ b(eq, &exit);
+
+ __ bind(&slow);
+
+ SmiOpStub stub(Token::SUB, reversed);
+ __ CallStub(&stub);
+ break;
+ }
+
+ default:
+ if (!reversed) {
+ __ Push(Operand(value));
+ } else {
+ __ mov(ip, Operand(value));
+ __ push(ip);
+ }
+ GenericOperation(op);
+ break;
+ }
+
+ __ bind(&exit);
+}
+
+
+void ArmCodeGenerator::Comparison(Condition cc, bool strict) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == eq);
+
+ Label exit, smi;
+ __ pop(r1);
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &smi);
+
+ // Perform non-smi comparison by runtime call.
+ __ push(r1);
+
+ // Figure out which native to call and setup the arguments.
+ const char* native;
+ int argc;
+ if (cc == eq) {
+ native = strict ? "STRICT_EQUALS" : "EQUALS";
+ argc = 1;
+ } else {
+ native = "COMPARE";
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ Push(Operand(Smi::FromInt(ncr)));
+ argc = 2;
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ Push(Operand(argc));
+ __ InvokeBuiltin(native, argc, CALL_JS);
+ __ cmp(r0, Operand(0));
+ __ b(&exit);
+
+ // test smi equality by pointer comparison.
+ __ bind(&smi);
+ __ cmp(r1, Operand(r0));
+
+ __ bind(&exit);
+ __ pop(r0); // be careful not to destroy the cc register
+ cc_reg_ = cc;
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void ArmCodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ int position) {
+ Label fast, slow, exit;
+
+ // Push the arguments ("left-to-right") on the stack.
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+
+ // Push the number of arguments.
+ __ Push(Operand(args->length()));
+
+ // Get the function to call from the stack.
+ // +1 ~ receiver.
+ __ ldr(r1, MemOperand(sp, (args->length() + 1) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); // get the map
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(eq, &fast);
+
+ __ RecordPosition(position);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ __ InvokeBuiltin("CALL_NON_FUNCTION", 0, CALL_JS);
+ __ b(&exit);
+
+ // Fast-case: Get the code from the function, call the first
+ // instruction in it, and pop function.
+ __ bind(&fast);
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, MemOperand(r1, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r1);
+
+ // Restore context and pop function from the stack.
+ __ bind(&exit);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ add(sp, sp, Operand(kPointerSize)); // discard
+}
+
+
+void ArmCodeGenerator::Branch(bool if_true, Label* L) {
+ ASSERT(has_cc());
+ Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+ __ b(cc, L);
+ cc_reg_ = al;
+}
+
+
+void ArmCodeGenerator::CheckStack() {
+ if (FLAG_check_stack) {
+ Comment cmnt(masm_, "[ check stack");
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+}
+
+
+void ArmCodeGenerator::VisitBlock(Block* node) {
+ Comment cmnt(masm_, "[ Block");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ node->set_break_stack_height(break_stack_height_);
+ VisitStatements(node->statements());
+ __ bind(node->break_target());
+}
+
+
+void ArmCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ __ Push(Operand(pairs));
+ __ Push(Operand(cp));
+ __ Push(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+
+ // Get rid of return value.
+ __ pop(r0);
+}
+
+
+void ArmCodeGenerator::VisitDeclaration(Declaration* node) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = node->proxy()->var();
+ ASSERT(var != NULL); // must have been resolved
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile time,
+ // we need to "declare" it at runtime to make sure it actually
+ // exists in the local context.
+ if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Variables with a "LOOKUP" slot were introduced as non-locals
+ // during variable resolution and must have mode DYNAMIC.
+ ASSERT(var->mode() == Variable::DYNAMIC);
+ // For now, just do a runtime call.
+ __ Push(Operand(cp));
+ __ Push(Operand(var->name()));
+ // Declaration nodes are always declared in only two modes.
+ ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+ PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+ __ Push(Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (node->mode() == Variable::CONST) {
+ __ Push(Operand(Factory::the_hole_value()));
+ } else if (node->fun() != NULL) {
+ Load(node->fun());
+ } else {
+ __ Push(Operand(0)); // no initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 5);
+ // DeclareContextSlot pops the assigned value by accepting an
+ // extra argument and returning the TOS; no need to explicitly pop
+ // here.
+ return;
+ }
+
+ ASSERT(!var->is_global());
+
+ // If we have a function or a constant, we need to initialize the variable.
+ Expression* val = NULL;
+ if (node->mode() == Variable::CONST) {
+ val = new Literal(Factory::the_hole_value());
+ } else {
+ val = node->fun(); // NULL if we don't have a function
+ }
+
+ if (val != NULL) {
+ // Set initial value.
+ Reference target(this, node->proxy());
+ Load(val);
+ SetValue(&target);
+ // Get rid of the assigned value (declarations are statements).
+ __ pop(r0); // Pop(no_reg);
+ }
+}
+
+
+void ArmCodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ Load(expression);
+ __ pop(r0); // __ Pop(no_reg)
+}
+
+
+void ArmCodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ Comment cmnt(masm_, "// EmptyStatement");
+ // nothing to do
+}
+
+
+void ArmCodeGenerator::VisitIfStatement(IfStatement* node) {
+ Comment cmnt(masm_, "[ IfStatement");
+ // Generate different code depending on which
+ // parts of the if statement are present or not.
+ bool has_then_stm = node->HasThenStatement();
+ bool has_else_stm = node->HasElseStatement();
+
+ if (FLAG_debug_info) RecordStatementPosition(node);
+
+ Label exit;
+ if (has_then_stm && has_else_stm) {
+ Label then;
+ Label else_;
+ // if (cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ Branch(false, &else_);
+ // then
+ __ bind(&then);
+ Visit(node->then_statement());
+ __ b(&exit);
+ // else
+ __ bind(&else_);
+ Visit(node->else_statement());
+
+ } else if (has_then_stm) {
+ ASSERT(!has_else_stm);
+ Label then;
+ // if (cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &then, &exit, true);
+ Branch(false, &exit);
+ // then
+ __ bind(&then);
+ Visit(node->then_statement());
+
+ } else if (has_else_stm) {
+ ASSERT(!has_then_stm);
+ Label else_;
+ // if (!cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &else_, true);
+ Branch(true, &exit);
+ // else
+ __ bind(&else_);
+ Visit(node->else_statement());
+
+ } else {
+ ASSERT(!has_then_stm && !has_else_stm);
+ // if (cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &exit, false);
+ if (has_cc()) {
+ cc_reg_ = al;
+ } else {
+ __ pop(r0); // __ Pop(no_reg)
+ }
+ }
+
+ // end
+ __ bind(&exit);
+}
+
+
+void ArmCodeGenerator::CleanStack(int num_bytes) {
+ ASSERT(num_bytes >= 0);
+ if (num_bytes > 0) {
+ __ add(sp, sp, Operand(num_bytes - kPointerSize));
+ __ pop(r0);
+ }
+}
+
+
+void ArmCodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ Comment cmnt(masm_, "[ ContinueStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ CleanStack(break_stack_height_ - node->target()->break_stack_height());
+ __ b(node->target()->continue_target());
+}
+
+
+void ArmCodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ Comment cmnt(masm_, "[ BreakStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ CleanStack(break_stack_height_ - node->target()->break_stack_height());
+ __ b(node->target()->break_target());
+}
+
+
+void ArmCodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Load(node->expression());
+ __ b(&function_return_);
+}
+
+
+void ArmCodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Load(node->expression());
+ __ CallRuntime(Runtime::kPushContext, 2);
+ // Update context local.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void ArmCodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ Comment cmnt(masm_, "[ WithExitStatement");
+ // Pop context.
+ __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
+ // Update context local.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void ArmCodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ node->set_break_stack_height(break_stack_height_);
+
+ Load(node->tag());
+
+ Label next, fall_through, default_case;
+ ZoneList<CaseClause*>* cases = node->cases();
+ int length = cases->length();
+
+ for (int i = 0; i < length; i++) {
+ CaseClause* clause = cases->at(i);
+
+ Comment cmnt(masm_, "[ case clause");
+
+ if (clause->is_default()) {
+ // Bind the default case label, so we can branch to it when we
+ // have compared against all other cases.
+ ASSERT(default_case.is_unused()); // at most one default clause
+
+ // If the default case is the first (but not only) case, we have
+ // to jump past it for now. Once we're done with the remaining
+ // clauses, we'll branch back here. If it isn't the first case,
+ // we jump past it by avoiding to chain it into the next chain.
+ if (length > 1) {
+ if (i == 0) __ b(&next);
+ __ bind(&default_case);
+ }
+
+ } else {
+ __ bind(&next);
+ next.Unuse();
+ __ push(r0); // duplicate TOS
+ Load(clause->label());
+ Comparison(eq, true);
+ Branch(false, &next);
+ __ pop(r0); // __ Pop(no_reg)
+ }
+
+ // Generate code for the body.
+ __ bind(&fall_through);
+ fall_through.Unuse();
+ VisitStatements(clause->statements());
+ __ b(&fall_through);
+ }
+
+ __ bind(&next);
+ __ pop(r0); // __ Pop(no_reg)
+ if (default_case.is_bound()) __ b(&default_case);
+
+ __ bind(&fall_through);
+ __ bind(node->break_target());
+}
+
+
+void ArmCodeGenerator::VisitLoopStatement(LoopStatement* node) {
+ Comment cmnt(masm_, "[ LoopStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ node->set_break_stack_height(break_stack_height_);
+
+ // simple condition analysis
+ enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+ if (node->cond() == NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ info = ALWAYS_TRUE;
+ } else {
+ Literal* lit = node->cond()->AsLiteral();
+ if (lit != NULL) {
+ if (lit->IsTrue()) {
+ info = ALWAYS_TRUE;
+ } else if (lit->IsFalse()) {
+ info = ALWAYS_FALSE;
+ }
+ }
+ }
+
+ Label loop, entry;
+
+ // init
+ if (node->init() != NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ Visit(node->init());
+ }
+ if (node->type() != LoopStatement::DO_LOOP && info != ALWAYS_TRUE) {
+ __ b(&entry);
+ }
+
+ // body
+ __ bind(&loop);
+ Visit(node->body());
+
+ // next
+ __ bind(node->continue_target());
+ if (node->next() != NULL) {
+ // Record source position of the statement as this code which is after the
+ // code for the body actually belongs to the loop statement and not the
+ // body.
+ if (FLAG_debug_info) __ RecordPosition(node->statement_pos());
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ Visit(node->next());
+ }
+
+ // cond
+ __ bind(&entry);
+ switch (info) {
+ case ALWAYS_TRUE:
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ __ b(&loop);
+ break;
+ case ALWAYS_FALSE:
+ break;
+ case DONT_KNOW:
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ LoadCondition(node->cond(),
+ CodeGenState::LOAD,
+ &loop,
+ node->break_target(),
+ true);
+ Branch(true, &loop);
+ break;
+ }
+
+ // exit
+ __ bind(node->break_target());
+}
+
+
+void ArmCodeGenerator::VisitForInStatement(ForInStatement* node) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+
+ // We keep stuff on the stack while the body is executing.
+ // Record it, so that a break/continue crossing this statement
+ // can restore the stack.
+ const int kForInStackSize = 5 * kPointerSize;
+ break_stack_height_ += kForInStackSize;
+ node->set_break_stack_height(break_stack_height_);
+
+ Label loop, next, entry, cleanup, exit, primitive, jsobject;
+ Label filter_key, end_del_check, fixed_array, non_string;
+
+ // Get the object to enumerate over (converted to JSObject).
+ Load(node->enumerable());
+
+ // Both SpiderMonkey and kjs ignore null and undefined in contrast
+ // to the specification. 12.6.4 mandates a call to ToObject.
+ __ cmp(r0, Operand(Factory::undefined_value()));
+ __ b(eq, &exit);
+ __ cmp(r0, Operand(Factory::null_value()));
+ __ b(eq, &exit);
+
+ // Stack layout in body:
+ // [iteration counter (Smi)]
+ // [length of array]
+ // [FixedArray]
+ // [Map or 0]
+ // [Object]
+
+ // Check if enumerable is already a JSObject
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &primitive);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(JS_OBJECT_TYPE));
+ __ b(hs, &jsobject);
+
+ __ bind(&primitive);
+ __ Push(Operand(0));
+ __ InvokeBuiltin("TO_OBJECT", 0, CALL_JS);
+
+
+ __ bind(&jsobject);
+
+ // Get the set of properties (as a FixedArray or Map).
+ __ push(r0); // duplicate the object being enumerated
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a Map, we can do a fast modification check.
+ // Otherwise, we got a FixedArray, and we have to do a slow check.
+ __ mov(r2, Operand(r0));
+ __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::meta_map()));
+ __ b(ne, &fixed_array);
+
+ // Get enum cache
+ __ mov(r1, Operand(r0));
+ __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+ __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
+ __ ldr(r2,
+ FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ Push(Operand(r2));
+ __ Push(FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Push(Operand(Smi::FromInt(0)));
+ __ b(&entry);
+
+
+ __ bind(&fixed_array);
+
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ __ push(r1); // insert 0 in place of Map
+
+ // Push the length of the array and the initial index onto the stack.
+ __ Push(FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Push(Operand(Smi::FromInt(0)));
+ __ b(&entry);
+
+ // Body.
+ __ bind(&loop);
+ Visit(node->body());
+
+ // Next.
+ __ bind(node->continue_target());
+ __ bind(&next);
+ __ add(r0, r0, Operand(Smi::FromInt(1)));
+
+ // Condition.
+ __ bind(&entry);
+
+ __ ldr(ip, MemOperand(sp, 0));
+ __ cmp(r0, Operand(ip));
+ __ b(hs, &cleanup);
+
+ // Get the i'th entry of the array.
+ __ ldr(r2, MemOperand(sp, kPointerSize));
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Get Map or 0.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ // Check if this (still) matches the map of the enumerable.
+ // If not, we have to filter the key.
+ __ ldr(r1, MemOperand(sp, 3 * kPointerSize));
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(r2));
+ __ b(eq, &end_del_check);
+
+ // Convert the entry to a string (or null if it isn't a property anymore).
+ __ Push(MemOperand(sp, 4 * kPointerSize)); // push enumerable
+ __ Push(Operand(r3)); // push entry
+ __ Push(Operand(1));
+ __ InvokeBuiltin("FILTER_KEY", 1, CALL_JS);
+ __ mov(r3, Operand(r0));
+ __ pop(r0);
+
+ // If the property has been removed while iterating, we just skip it.
+ __ cmp(r3, Operand(Factory::null_value()));
+ __ b(eq, &next);
+
+
+ __ bind(&end_del_check);
+
+ // Store the entry in the 'each' expression and take another spin in the loop.
+ __ Push(Operand(r3));
+ { Reference each(this, node->each());
+ if (!each.is_illegal()) {
+ if (each.size() > 0) __ Push(MemOperand(sp, kPointerSize * each.size()));
+ SetValue(&each);
+ if (each.size() > 0) __ pop(r0);
+ }
+ }
+ __ pop(r0);
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ __ jmp(&loop);
+
+ // Cleanup.
+ __ bind(&cleanup);
+ __ bind(node->break_target());
+ __ add(sp, sp, Operand(4 * kPointerSize));
+
+ // Exit.
+ __ bind(&exit);
+ __ pop(r0);
+
+ break_stack_height_ -= kForInStackSize;
+}
+
+
+void ArmCodeGenerator::VisitTryCatch(TryCatch* node) {
+ Comment cmnt(masm_, "[ TryCatch");
+
+ Label try_block, exit;
+
+ __ push(r0);
+ __ bl(&try_block);
+
+
+ // --- Catch block ---
+
+ // Store the caught exception in the catch variable.
+ { Reference ref(this, node->catch_var());
+ // Load the exception to the top of the stack.
+ __ Push(MemOperand(sp, ref.size() * kPointerSize));
+ SetValue(&ref);
+ }
+
+ // Remove the exception from the stack.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ // Restore TOS register caching.
+ __ pop(r0);
+
+ VisitStatements(node->catch_block()->statements());
+ __ b(&exit);
+
+
+ // --- Try block ---
+ __ bind(&try_block);
+
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+
+ // Introduce shadow labels for all escapes from the try block,
+ // including returns. We should probably try to unify the escaping
+ // labels and the return label.
+ int nof_escapes = node->escaping_labels()->length();
+ List<LabelShadow*> shadows(1 + nof_escapes);
+ shadows.Add(new LabelShadow(&function_return_));
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatements(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ int nof_unlinks = 0;
+ for (int i = 0; i <= nof_escapes; i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+
+ // Unlink from try chain.
+ // TOS contains code slot
+ const int kNextOffset = StackHandlerConstants::kNextOffset +
+ StackHandlerConstants::kAddressDisplacement;
+ __ ldr(r1, MemOperand(sp, kNextOffset)); // read next_sp
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r1, MemOperand(r3));
+ ASSERT(StackHandlerConstants::kCodeOffset == 0); // first field is code
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ // Code slot popped.
+ __ pop(r0); // restore TOS
+ if (nof_unlinks > 0) __ b(&exit);
+
+ // Generate unlink code for all used shadow labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_linked()) {
+ // Unlink from try chain; be careful not to destroy the TOS.
+ __ bind(shadows[i]);
+
+ bool is_return = (shadows[i]->shadowed() == &function_return_);
+ if (!is_return) {
+ // Break/continue case. TOS is the code slot of the handler.
+ __ push(r0); // flush TOS
+ }
+
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+
+ __ ldr(r1, MemOperand(sp, kNextOffset));
+ __ str(r1, MemOperand(r3));
+ ASSERT(StackHandlerConstants::kCodeOffset == 0); // first field is code
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ // Code slot popped.
+
+ if (!is_return) {
+ __ pop(r0); // restore TOS
+ }
+
+ __ b(shadows[i]->shadowed());
+ }
+ }
+
+ __ bind(&exit);
+}
+
+
+void ArmCodeGenerator::VisitTryFinally(TryFinally* node) {
+ Comment cmnt(masm_, "[ TryFinally");
+
+ // State: Used to keep track of reason for entering the finally
+ // block. Should probably be extended to hold information for
+ // break/continue from within the try block.
+ enum { FALLING, THROWING, JUMPING };
+
+ Label exit, unlink, try_block, finally_block;
+
+ __ push(r0);
+ __ bl(&try_block);
+
+ // In case of thrown exceptions, this is where we continue.
+ __ mov(r2, Operand(Smi::FromInt(THROWING)));
+ __ b(&finally_block);
+
+
+ // --- Try block ---
+ __ bind(&try_block);
+
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+
+ // Introduce shadow labels for all escapes from the try block,
+ // including returns. We should probably try to unify the escaping
+ // labels and the return label.
+ int nof_escapes = node->escaping_labels()->length();
+ List<LabelShadow*> shadows(1 + nof_escapes);
+ shadows.Add(new LabelShadow(&function_return_));
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatements(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required
+ // unlinks.
+ int nof_unlinks = 0;
+ for (int i = 0; i <= nof_escapes; i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+
+ // Set the state on the stack to FALLING.
+ __ Push(Operand(Factory::undefined_value())); // fake TOS
+ __ mov(r2, Operand(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) __ b(&unlink);
+
+ // Generate code that sets the state for all used shadow labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_linked()) {
+ __ bind(shadows[i]);
+ if (shadows[i]->shadowed() != &function_return_) {
+ // Fake TOS for break and continue (not return).
+ __ Push(Operand(Factory::undefined_value()));
+ }
+ __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
+ __ b(&unlink);
+ }
+ }
+
+ // Unlink from try chain; be careful not to destroy the TOS.
+ __ bind(&unlink);
+
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+ const int kNextOffset = StackHandlerConstants::kNextOffset +
+ StackHandlerConstants::kAddressDisplacement;
+ __ ldr(r1, MemOperand(sp, kNextOffset));
+ __ str(r1, MemOperand(r3));
+ ASSERT(StackHandlerConstants::kCodeOffset == 0); // first field is code
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ // Code slot popped.
+
+
+ // --- Finally block ---
+ __ bind(&finally_block);
+
+ // Push the state on the stack. If necessary move the state to a
+ // local variable to avoid having extra values on the stack while
+ // evaluating the finally block.
+ __ Push(Operand(r2));
+ if (node->finally_var() != NULL) {
+ Reference target(this, node->finally_var());
+ SetValue(&target);
+ ASSERT(target.size() == 0); // no extra stuff on the stack
+ __ pop(r0);
+ }
+
+ // Generate code for the statements in the finally block.
+ VisitStatements(node->finally_block()->statements());
+
+ // Get the state from the stack - or the local variable - and
+ // restore the TOS register.
+ if (node->finally_var() != NULL) {
+ Reference target(this, node->finally_var());
+ GetValue(&target);
+ }
+ __ Pop(r2);
+
+ // Generate code that jumps to the right destination for all used
+ // shadow labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_bound()) {
+ __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
+ if (shadows[i]->shadowed() != &function_return_) {
+ Label next;
+ __ b(ne, &next);
+ __ pop(r0); // pop faked TOS
+ __ b(shadows[i]->shadowed());
+ __ bind(&next);
+ } else {
+ __ b(eq, shadows[i]->shadowed());
+ }
+ }
+ }
+
+ // Check if we need to rethrow the exception.
+ __ cmp(r2, Operand(Smi::FromInt(THROWING)));
+ __ b(ne, &exit);
+
+ // Rethrow exception.
+ __ CallRuntime(Runtime::kReThrow, 1);
+
+ // Done.
+ __ bind(&exit);
+ __ pop(r0); // restore TOS caching.
+}
+
+
+void ArmCodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ Comment cmnt(masm_, "[ DebuggerStatament");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ __ CallRuntime(Runtime::kDebugBreak, 1);
+}
+
+
+void ArmCodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Push the boilerplate on the stack.
+ __ Push(Operand(boilerplate));
+
+ // Create a new closure.
+ __ Push(Operand(cp));
+ __ CallRuntime(Runtime::kNewClosure, 2);
+}
+
+
+void ArmCodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ InstantiateBoilerplate(boilerplate);
+}
+
+
+void ArmCodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+ InstantiateBoilerplate(node->boilerplate());
+}
+
+
+void ArmCodeGenerator::VisitConditional(Conditional* node) {
+ Comment cmnt(masm_, "[ Conditional");
+ Label then, else_, exit;
+ LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ Branch(false, &else_);
+ __ bind(&then);
+ Load(node->then_expression(), access());
+ __ b(&exit);
+ __ bind(&else_);
+ Load(node->else_expression(), access());
+ __ bind(&exit);
+}
+
+
+void ArmCodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+
+ if (node->type() == Slot::LOOKUP) {
+ ASSERT(node->var()->mode() == Variable::DYNAMIC);
+
+ // For now, just do a runtime call.
+ __ Push(Operand(cp));
+ __ Push(Operand(node->var()->name()));
+
+ switch (access()) {
+ case CodeGenState::UNDEFINED:
+ UNREACHABLE();
+ break;
+
+ case CodeGenState::LOAD:
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ // result (TOS) is the value that was loaded
+ break;
+
+ case CodeGenState::LOAD_TYPEOF_EXPR:
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ // result (TOS) is the value that was loaded
+ break;
+
+ case CodeGenState::STORE:
+ // Storing a variable must keep the (new) value on the stack. This
+ // is necessary for compiling assignment expressions.
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ // result (TOS) is the value that was stored
+ break;
+
+ case CodeGenState::INIT_CONST:
+ // Same as STORE but ignores attribute (e.g. READ_ONLY) of
+ // context slot so that we can initialize const properties
+ // (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top
+ // context.
+ //
+ // Note that we must declare the foo upon entry of eval(),
+ // via a context slot declaration, but we cannot initialize
+ // it at the same time, because the const declaration may
+ // be at the end of the eval code (sigh...) and the const
+ // variable may have been used before (where its value is
+ // 'undefined'). Thus, we can only do the initialization
+ // when we actually encounter the expression and when the
+ // expression operands are defined and valid, and thus we
+ // need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+
+ } else {
+ // Note: We would like to keep the assert below, but it fires because
+ // of some nasty code in LoadTypeofExpression() which should be removed...
+ // ASSERT(node->var()->mode() != Variable::DYNAMIC);
+
+ switch (access()) {
+ case CodeGenState::UNDEFINED:
+ UNREACHABLE();
+ break;
+
+ case CodeGenState::LOAD: // fall through
+ case CodeGenState::LOAD_TYPEOF_EXPR:
+ // Special handling for locals allocated in registers.
+ if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
+ node->index() < num_reg_locals_) {
+ __ Push(Operand(SlotRegister(node->index())));
+ } else {
+ __ Push(SlotOperand(node, r2));
+ }
+ if (node->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't
+ // been initialized yet) which needs to be converted into the
+ // 'undefined' value.
+ Comment cmnt(masm_, "[ Unhole const");
+ __ cmp(r0, Operand(Factory::the_hole_value()));
+ __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+ }
+ break;
+
+ case CodeGenState::INIT_CONST: {
+ ASSERT(node->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ { Comment cmnt(masm_, "[ Init const");
+ Label L;
+ if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
+ node->index() < num_reg_locals_) {
+ __ mov(r2, Operand(SlotRegister(node->index())));
+ } else {
+ __ ldr(r2, SlotOperand(node, r2));
+ }
+ __ cmp(r2, Operand(Factory::the_hole_value()));
+ __ b(ne, &L);
+ // We must execute the store.
+ if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
+ node->index() < num_reg_locals_) {
+ __ mov(SlotRegister(node->index()), Operand(r0));
+ } else {
+ // r2 may be loaded with context; used below in RecordWrite.
+ __ str(r0, SlotOperand(node, r2));
+ }
+ if (node->type() == Slot::CONTEXT) {
+ // Skip write barrier if the written value is a smi.
+ Label exit;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ // r2 is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + node->index() * kPointerSize;
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, r1);
+ __ bind(&exit);
+ }
+ __ bind(&L);
+ }
+ break;
+ }
+
+ case CodeGenState::STORE: {
+ // Storing a variable must keep the (new) value on the stack. This
+ // is necessary for compiling assignment expressions.
+ // Special handling for locals allocated in registers.
+ //
+ // Note: We will reach here even with node->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end
+ // up calling this code.
+ if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
+ node->index() < num_reg_locals_) {
+ __ mov(SlotRegister(node->index()), Operand(r0));
+ } else {
+ // r2 may be loaded with context; used below in RecordWrite.
+ __ str(r0, SlotOperand(node, r2));
+ }
+ if (node->type() == Slot::CONTEXT) {
+ // Skip write barrier if the written value is a smi.
+ Label exit;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ // r2 is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + node->index() * kPointerSize;
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, r1);
+ __ bind(&exit);
+ }
+ break;
+ }
+ }
+ }
+}
+
+
+void ArmCodeGenerator::VisitVariableProxy(VariableProxy* proxy_node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Variable* node = proxy_node->var();
+
+ Expression* x = node->rewrite();
+ if (x != NULL) {
+ Visit(x);
+ return;
+ }
+
+ ASSERT(node->is_global());
+ if (is_referenced()) {
+ if (node->AsProperty() != NULL) {
+ __ RecordPosition(node->AsProperty()->position());
+ }
+ AccessReferenceProperty(new Literal(node->name()), access());
+
+ } else {
+ // All stores are through references.
+ ASSERT(access() != CodeGenState::STORE);
+ Reference property(this, proxy_node);
+ GetValue(&property);
+ }
+}
+
+
+void ArmCodeGenerator::VisitLiteral(Literal* node) {
+ Comment cmnt(masm_, "[ Literal");
+ __ Push(Operand(node->handle()));
+}
+
+
+void ArmCodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ Comment cmnt(masm_, "[ RexExp Literal");
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ ldr(r1, MemOperand(pp, 0));
+
+ // Load the literals array of the function.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+ Label done;
+ __ cmp(r2, Operand(Factory::undefined_value()));
+ __ b(ne, &done);
+
+ // If the entry is undefined we call the runtime system to computed
+ // the literal.
+ __ Push(Operand(r1)); // literal array (0)
+ __ Push(Operand(Smi::FromInt(node->literal_index()))); // literal index (1)
+ __ Push(Operand(node->pattern())); // RegExp pattern (2)
+ __ Push(Operand(node->flags())); // RegExp flags (3)
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ Pop(r2);
+ __ bind(&done);
+
+ // Push the literal.
+ __ Push(Operand(r2));
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateObjectLiteral.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class ObjectLiteralDeferred: public DeferredCode {
+ public:
+ ObjectLiteralDeferred(CodeGenerator* generator, ObjectLiteral* node)
+ : DeferredCode(generator), node_(node) {
+ set_comment("[ ObjectLiteralDeferred");
+ }
+ virtual void Generate();
+ private:
+ ObjectLiteral* node_;
+};
+
+
+void ObjectLiteralDeferred::Generate() {
+ // If the entry is undefined we call the runtime system to computed
+ // the literal.
+
+ // Literal array (0).
+ __ Push(Operand(r1));
+ // Literal index (1).
+ __ Push(Operand(Smi::FromInt(node_->literal_index())));
+ // Constant properties (2).
+ __ Push(Operand(node_->constant_properties()));
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ Pop(r2);
+}
+
+
+void ArmCodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ ldr(r1, MemOperand(pp, 0));
+
+ // Load the literals array of the function.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code.
+ __ cmp(r2, Operand(Factory::undefined_value()));
+ __ b(eq, deferred->enter());
+ __ bind(deferred->exit());
+
+ // Push the object literal boilerplate.
+ __ Push(Operand(r2));
+ // Clone the boilerplate object.
+ __ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
+
+ for (int i = 0; i < node->properties()->length(); i++) {
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ Literal* key = property->key();
+ Expression* value = property->value();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT: break;
+ case ObjectLiteral::Property::COMPUTED: // fall through
+ case ObjectLiteral::Property::PROTOTYPE: {
+ // Save a copy of the resulting object on the stack.
+ __ push(r0);
+ Load(key);
+ Load(value);
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ // Restore the result object from the stack.
+ __ pop(r0);
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ __ push(r0);
+ Load(key);
+ __ Push(Operand(Smi::FromInt(1)));
+ Load(value);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ pop(r0);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ __ push(r0);
+ Load(key);
+ __ Push(Operand(Smi::FromInt(0)));
+ Load(value);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ pop(r0);
+ break;
+ }
+ }
+ }
+}
+
+
+void ArmCodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ // Load the resulting object.
+ Load(node->result());
+ for (int i = 0; i < node->values()->length(); i++) {
+ Expression* value = node->values()->at(i);
+
+ // If value is literal the property value is already
+ // set in the boilerplate object.
+ if (value->AsLiteral() == NULL) {
+ // The property must be set by generated code.
+ Load(value);
+
+ // Fetch the object literal
+ __ ldr(r1, MemOperand(sp, 0));
+ // Get the elements array.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+
+ // Write to the indexed properties array.
+ int offset = i * kPointerSize + Array::kHeaderSize;
+ __ str(r0, FieldMemOperand(r1, offset));
+
+ // Update the write barrier for the array address.
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r1, r3, r2);
+
+ __ pop(r0);
+ }
+ }
+}
+
+
+void ArmCodeGenerator::VisitAssignment(Assignment* node) {
+ Comment cmnt(masm_, "[ Assignment");
+
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Reference target(this, node->target());
+ if (target.is_illegal()) return;
+
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ Load(node->value());
+
+ } else {
+ GetValue(&target);
+ Literal* literal = node->value()->AsLiteral();
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(), literal->handle(), false);
+ } else {
+ Load(node->value());
+ GenericOperation(node->binary_op());
+ }
+ }
+
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL &&
+ (var->mode() == Variable::CONST) &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+ } else {
+ __ RecordPosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ InitConst(&target);
+ } else {
+ SetValue(&target);
+ }
+ }
+}
+
+
+void ArmCodeGenerator::VisitThrow(Throw* node) {
+ Comment cmnt(masm_, "[ Throw");
+
+ Load(node->exception());
+ __ RecordPosition(node->position());
+ __ CallRuntime(Runtime::kThrow, 1);
+}
+
+
+void ArmCodeGenerator::VisitProperty(Property* node) {
+ Comment cmnt(masm_, "[ Property");
+ if (is_referenced()) {
+ __ RecordPosition(node->position());
+ AccessReferenceProperty(node->key(), access());
+ } else {
+ // All stores are through references.
+ ASSERT(access() != CodeGenState::STORE);
+ Reference property(this, node);
+ __ RecordPosition(node->position());
+ GetValue(&property);
+ }
+}
+
+
+void ArmCodeGenerator::VisitCall(Call* node) {
+ Comment cmnt(masm_, "[ Call");
+
+ ZoneList<Expression*>* args = node->arguments();
+
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ // Standard function call.
+
+ // Check if the function is a variable or a property.
+ Expression* function = node->expression();
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
+
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
+
+ if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ __ Push(Operand(var->name()));
+ LoadGlobal();
+
+ // Load the arguments.
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ __ Push(Operand(args->length()));
+
+ // Setup the receiver register and call the IC initialization code.
+ Handle<Code> stub = ComputeCallInitialize(args->length());
+ __ ldr(r1, GlobalObject());
+ __ RecordPosition(node->position());
+ __ Call(stub, code_target_context);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // Remove the function from the stack.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // ----------------------------------
+ // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
+ // ----------------------------------
+
+ // Load the function
+ __ Push(Operand(cp));
+ __ Push(Operand(var->name()));
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ // r0: slot value; r1: receiver
+
+ // Load the receiver.
+ __ push(r0);
+ __ mov(r0, Operand(r1));
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+
+ } else if (property != NULL) {
+ // Check if the key is a literal string.
+ Literal* literal = property->key()->AsLiteral();
+
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ // ------------------------------------------------------------------
+ // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+ // ------------------------------------------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ __ Push(Operand(literal->handle()));
+ Load(property->obj());
+
+ // Load the arguments.
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ __ Push(Operand(args->length()));
+
+ // Set the receiver register and call the IC initialization code.
+ Handle<Code> stub = ComputeCallInitialize(args->length());
+ __ ldr(r1, MemOperand(sp, args->length() * kPointerSize));
+ __ RecordPosition(node->position());
+ __ Call(stub, code_target);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // Remove the function from the stack.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ } else {
+ // -------------------------------------------
+ // JavaScript example: 'array[index](1, 2, 3)'
+ // -------------------------------------------
+
+ // Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ GetValue(&ref);
+
+ // Pass receiver to called function.
+ __ Push(MemOperand(sp, ref.size() * kPointerSize));
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
+
+ } else {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is not global
+ // ----------------------------------
+
+ // Load the function.
+ Load(function);
+
+ // Pass the global object as the receiver.
+ LoadGlobal();
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
+}
+
+
+void ArmCodeGenerator::VisitCallNew(CallNew* node) {
+ Comment cmnt(masm_, "[ CallNew");
+
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments. This is different from ordinary calls, where the
+ // actual function to call is resolved after the arguments have been
+ // evaluated.
+
+ // Compute function to call and use the global object as the
+ // receiver.
+ Load(node->expression());
+ LoadGlobal();
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+
+ // Push the number of arguments.
+ __ Push(Operand(args->length()));
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ __ RecordPosition(position);
+ __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ js_construct_call);
+ __ add(sp, sp, Operand(kPointerSize)); // discard
+}
+
+
+void ArmCodeGenerator::GenerateSetThisFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ str(r0, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void ArmCodeGenerator::GenerateGetThisFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ __ Push(MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void ArmCodeGenerator::GenerateSetThis(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ str(r0, MemOperand(pp, JavaScriptFrameConstants::kReceiverOffset));
+}
+
+
+void ArmCodeGenerator::GenerateSetArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ mov(r0, Operand(r0, LSR, kSmiTagSize));
+ __ str(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
+ __ mov(r0, Operand(Smi::FromInt(0)));
+}
+
+
+void ArmCodeGenerator::GenerateGetArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+}
+
+
+void ArmCodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Label leave;
+ Load(args->at(0));
+ // r0 contains object.
+ // if (object->IsSmi()) return TOS.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &leave);
+ // It is a heap object - get map.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return TOS.
+ __ cmp(r1, Operand(JS_VALUE_TYPE));
+ __ b(ne, &leave);
+ // Load the value.
+ __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ bind(&leave);
+}
+
+
+void ArmCodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Label leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
+ __ pop(r1);
+ // r0 contains value.
+ // r1 contains object.
+ // if (object->IsSmi()) return object.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &leave);
+ // It is a heap object - get map.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return object.
+ __ cmp(r2, Operand(JS_VALUE_TYPE));
+ __ b(ne, &leave);
+ // Store the value.
+ __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+ // Update the write barrier.
+ __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
+ __ RecordWrite(r1, r2, r3);
+ // Leave.
+ __ bind(&leave);
+}
+
+
+void ArmCodeGenerator::GenerateTailCallWithArguments(
+ ZoneList<Expression*>* args) {
+ // r0 = number of arguments (smi)
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ mov(r0, Operand(r0, LSR, kSmiTagSize));
+
+ // r1 = new function (previously written to stack)
+ __ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+
+ // Reset parameter pointer and frame pointer to previous frame
+ ExitJSFrame(reg_locals_, DO_NOT_RETURN);
+
+ // Jump (tail-call) to the function in register r1.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+ __ add(pc, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ return;
+}
+
+
+void ArmCodeGenerator::GenerateSetArgument(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ // r1 = args[i]
+ Comment cmnt(masm_, "[ GenerateSetArgument");
+ Load(args->at(1));
+ __ mov(r1, Operand(r0));
+ // r0 = i
+ Load(args->at(0));
+#if defined(DEBUG)
+ { Label L;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &L);
+ __ stop("SMI expected");
+ __ bind(&L);
+ }
+#endif // defined(DEBUG)
+ __ add(r2, pp, Operand(JavaScriptFrameConstants::kParam0Offset));
+ __ str(r1,
+ MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize, NegOffset));
+ __ pop(r0);
+}
+
+
+void ArmCodeGenerator::GenerateSquashFrame(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ // Load r1 with old number of arguments, r0 with new number, r1 > r0.
+ Load(args->at(0));
+ __ mov(r1, Operand(r0, LSR, kSmiTagSize));
+ Load(args->at(1));
+ __ mov(r0, Operand(r0, LSR, kSmiTagSize));
+ // r1 = number of words to move stack.
+ __ sub(r1, r1, Operand(r0));
+ // r2 is source.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerPCOffset));
+ // Move down frame pointer fp.
+ __ add(fp, fp, Operand(r1, LSL, kPointerSizeLog2));
+ // r1 is destination.
+ __ add(r1, fp, Operand(StandardFrameConstants::kCallerPCOffset));
+
+ Label move;
+ __ bind(&move);
+ __ ldr(r3, MemOperand(r2, -kPointerSize, PostIndex));
+ __ str(r3, MemOperand(r1, -kPointerSize, PostIndex));
+ __ cmp(r2, Operand(sp));
+ __ b(ne, &move);
+ __ ldr(r3, MemOperand(r2));
+ __ str(r3, MemOperand(r1));
+
+ // Move down stack pointer esp.
+ __ mov(sp, Operand(r1));
+ // Balance stack and put something GC-able in r0.
+ __ pop(r0);
+}
+
+
+void ArmCodeGenerator::GenerateExpandFrame(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ // Load r1 with new number of arguments, r0 with old number (as Smi), r1 > r0.
+ Load(args->at(1));
+ __ mov(r1, Operand(r0, LSR, kSmiTagSize));
+ Load(args->at(0));
+ // r1 = number of words to move stack.
+ __ sub(r1, r1, Operand(r0, LSR, kSmiTagSize));
+ Label end_of_expand_frame;
+ if (FLAG_check_stack) {
+ Label not_too_big;
+ __ sub(r2, sp, Operand(r1, LSL, kPointerSizeLog2));
+ __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
+ __ ldr(ip, MemOperand(ip));
+ __ cmp(r2, Operand(ip));
+ __ b(gt, ¬_too_big);
+ __ pop(r0);
+ __ mov(r0, Operand(Factory::false_value()));
+ __ b(&end_of_expand_frame);
+ __ bind(¬_too_big);
+ }
+ // r3 is source.
+ __ mov(r3, Operand(sp));
+ // r0 is copy limit + 1 word
+ __ add(r0, fp,
+ Operand(StandardFrameConstants::kCallerPCOffset + kPointerSize));
+ // Move up frame pointer fp.
+ __ sub(fp, fp, Operand(r1, LSL, kPointerSizeLog2));
+ // Move up stack pointer sp.
+ __ sub(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
+ // r1 is destination (r1 = source - r1).
+ __ mov(r2, Operand(0));
+ __ sub(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
+ __ add(r1, r3, Operand(r2));
+
+ Label move;
+ __ bind(&move);
+ __ ldr(r2, MemOperand(r3, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r1, kPointerSize, PostIndex));
+ __ cmp(r3, Operand(r0));
+ __ b(ne, &move);
+
+ // Balance stack and put success value in top of stack
+ __ pop(r0);
+ __ mov(r0, Operand(Factory::true_value()));
+ __ bind(&end_of_expand_frame);
+}
+
+
+void ArmCodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ tst(r0, Operand(kSmiTagMask));
+ __ pop(r0);
+ cc_reg_ = eq;
+}
+
+
+// This is used in the implementation of apply on ia32 but it is not
+// used on ARM yet.
+void ArmCodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ __ int3();
+ cc_reg_ = eq;
+}
+
+
+void ArmCodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ // Flush the TOS cache and seed the result with the formal
+ // parameters count, which will be used in case no arguments adaptor
+ // frame is found below the current frame.
+ __ push(r0);
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+
+ // Call the shared stub to get to the arguments.length.
+ ArgumentsAccessStub stub(true);
+ __ CallStub(&stub);
+}
+
+
+void ArmCodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the key onto the stack and set register r1 to the formal
+ // parameters count for the currently executing function.
+ Load(args->at(0));
+ __ mov(r1, Operand(Smi::FromInt(scope_->num_parameters())));
+
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(false);
+ __ CallStub(&stub);
+}
+
+
+void ArmCodeGenerator::GenerateShiftDownAndTailCall(
+ ZoneList<Expression*>* args) {
+ // r0 = number of arguments
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ mov(r0, Operand(r0, LSR, kSmiTagSize));
+
+ // Get the 'this' function and exit the frame without returning.
+ __ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
+ ExitJSFrame(reg_locals_, DO_NOT_RETURN);
+ // return address in lr
+
+ // Move arguments one element down the stack.
+ Label move;
+ Label moved;
+ __ sub(r2, r0, Operand(0), SetCC);
+ __ b(eq, &moved);
+ __ bind(&move);
+ __ sub(ip, r2, Operand(1));
+ __ ldr(r3, MemOperand(sp, ip, LSL, kPointerSizeLog2));
+ __ str(r3, MemOperand(sp, r2, LSL, kPointerSizeLog2));
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &move);
+ __ bind(&moved);
+
+ // Remove the TOS (copy of last argument)
+ __ add(sp, sp, Operand(kPointerSize));
+
+ // Jump (tail-call) to the function in register r1.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+ __ add(pc, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ return;
+}
+
+
+void ArmCodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ if (CheckForInlineRuntimeCall(node))
+ return;
+
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
+
+ if (function == NULL) {
+ // Prepare stack for calling JS runtime function.
+ __ Push(Operand(node->name()));
+ // Push the builtins object found in the current global object.
+ __ ldr(r1, GlobalObject());
+ __ Push(FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+ }
+
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+
+ if (function != NULL) {
+ // Call the C runtime function.
+ __ CallRuntime(function, args->length());
+ } else {
+ // Call the JS runtime function.
+ __ Push(Operand(args->length()));
+ __ ldr(r1, MemOperand(sp, args->length() * kPointerSize));
+ Handle<Code> stub = ComputeCallInitialize(args->length());
+ __ Call(stub, code_target);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ add(sp, sp, Operand(kPointerSize));
+ }
+}
+
+
+void ArmCodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ Comment cmnt(masm_, "[ UnaryOperation");
+
+ Token::Value op = node->op();
+
+ if (op == Token::NOT) {
+ LoadCondition(node->expression(),
+ CodeGenState::LOAD,
+ false_target(),
+ true_target(),
+ true);
+ cc_reg_ = NegateCondition(cc_reg_);
+
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ if (property != NULL) {
+ Load(property->obj());
+ Load(property->key());
+ __ Push(Operand(1)); // not counting receiver
+ __ InvokeBuiltin("DELETE", 1, CALL_JS);
+ return;
+ }
+
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ __ Push(Operand(variable->name()));
+ __ Push(Operand(1)); // not counting receiver
+ __ InvokeBuiltin("DELETE", 1, CALL_JS);
+ return;
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // lookup the context holding the named variable
+ __ Push(Operand(cp));
+ __ Push(Operand(variable->name()));
+ __ CallRuntime(Runtime::kLookupContext, 2);
+ // r0: context
+ __ Push(Operand(variable->name()));
+ __ Push(Operand(1)); // not counting receiver
+ __ InvokeBuiltin("DELETE", 1, CALL_JS);
+ return;
+ }
+
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ __ Push(Operand(Factory::false_value()));
+
+ } else {
+ // Default: Result of deleting expressions is true.
+ Load(node->expression()); // may have side-effects
+ __ mov(r0, Operand(Factory::true_value()));
+ }
+
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ __ CallRuntime(Runtime::kTypeof, 1);
+
+ } else {
+ Load(node->expression());
+ switch (op) {
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
+ break;
+
+ case Token::SUB: {
+ UnarySubStub stub;
+ __ CallStub(&stub);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ // smi check
+ Label smi_label;
+ Label continue_label;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &smi_label);
+
+ __ Push(Operand(0)); // not counting receiver
+ __ InvokeBuiltin("BIT_NOT", 0, CALL_JS);
+
+ __ b(&continue_label);
+ __ bind(&smi_label);
+ __ mvn(r0, Operand(r0));
+ __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
+ __ bind(&continue_label);
+ break;
+ }
+
+ case Token::VOID:
+ // since the stack top is cached in r0, popping and then
+ // pushing a value can be done by just writing to r0.
+ __ mov(r0, Operand(Factory::undefined_value()));
+ break;
+
+ case Token::ADD:
+ __ Push(Operand(0)); // not counting receiver
+ __ InvokeBuiltin("TO_NUMBER", 0, CALL_JS);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void ArmCodeGenerator::VisitCountOperation(CountOperation* node) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
+
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+ // Postfix: Make room for the result.
+ if (is_postfix) __ Push(Operand(0));
+
+ { Reference target(this, node->expression());
+ if (target.is_illegal()) return;
+ GetValue(&target);
+
+ Label slow, exit;
+
+ // Load the value (1) into register r1.
+ __ mov(r1, Operand(Smi::FromInt(1)));
+
+ // Check for smi operand.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+
+ // Postfix: Store the old value as the result.
+ if (is_postfix) __ str(r0, MemOperand(sp, target.size() * kPointerSize));
+
+ // Perform optimistic increment/decrement.
+ if (is_increment) {
+ __ add(r0, r0, Operand(r1), SetCC);
+ } else {
+ __ sub(r0, r0, Operand(r1), SetCC);
+ }
+
+ // If the increment/decrement didn't overflow, we're done.
+ __ b(vc, &exit);
+
+ // Revert optimistic increment/decrement.
+ if (is_increment) {
+ __ sub(r0, r0, Operand(r1));
+ } else {
+ __ add(r0, r0, Operand(r1));
+ }
+
+ // Slow case: Convert to number.
+ __ bind(&slow);
+
+ // Postfix: Convert the operand to a number and store it as the result.
+ if (is_postfix) {
+ InvokeBuiltinStub stub(InvokeBuiltinStub::ToNumber, 2);
+ __ CallStub(&stub);
+ // Store to result (on the stack).
+ __ str(r0, MemOperand(sp, target.size() * kPointerSize));
+ }
+
+ // Compute the new value by calling the right JavaScript native.
+ if (is_increment) {
+ InvokeBuiltinStub stub(InvokeBuiltinStub::Inc, 1);
+ __ CallStub(&stub);
+ } else {
+ InvokeBuiltinStub stub(InvokeBuiltinStub::Dec, 1);
+ __ CallStub(&stub);
+ }
+
+ // Store the new value in the target if not const.
+ __ bind(&exit);
+ if (!is_const) SetValue(&target);
+ }
+
+ // Postfix: Discard the new value and use the old.
+ if (is_postfix) __ pop(r0);
+}
+
+
+void ArmCodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = node->op();
+
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
+
+ // NOTE: If the left hand side produces a materialized value (not in
+ // the CC register), we force the right hand side to do the
+ // same. This is necessary because we may have to branch to the exit
+ // after evaluating the left hand side (due to the shortcut
+ // semantics), but the compiler must (statically) know if the result
+ // of compiling the binary operation is materialized or not.
+
+ if (op == Token::AND) {
+ Label is_true;
+ LoadCondition(node->left(),
+ CodeGenState::LOAD,
+ &is_true,
+ false_target(),
+ false);
+ if (has_cc()) {
+ Branch(false, false_target());
+
+ // Evaluate right side expression.
+ __ bind(&is_true);
+ LoadCondition(node->right(),
+ CodeGenState::LOAD,
+ true_target(),
+ false_target(),
+ false);
+
+ } else {
+ Label pop_and_continue, exit;
+
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ ToBoolean(r0, &pop_and_continue, &exit);
+ Branch(false, &exit);
+
+ // Pop the result of evaluating the first part.
+ __ bind(&pop_and_continue);
+ __ pop(r0);
+
+ // Evaluate right side expression.
+ __ bind(&is_true);
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ __ bind(&exit);
+ }
+
+ } else if (op == Token::OR) {
+ Label is_false;
+ LoadCondition(node->left(),
+ CodeGenState::LOAD,
+ true_target(),
+ &is_false,
+ false);
+ if (has_cc()) {
+ Branch(true, true_target());
+
+ // Evaluate right side expression.
+ __ bind(&is_false);
+ LoadCondition(node->right(),
+ CodeGenState::LOAD,
+ true_target(),
+ false_target(),
+ false);
+
+ } else {
+ Label pop_and_continue, exit;
+
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ ToBoolean(r0, &exit, &pop_and_continue);
+ Branch(true, &exit);
+
+ // Pop the result of evaluating the first part.
+ __ bind(&pop_and_continue);
+ __ pop(r0);
+
+ // Evaluate right side expression.
+ __ bind(&is_false);
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ __ bind(&exit);
+ }
+
+ } else {
+ // Optimize for the case where (at least) one of the expressions
+ // is a literal small integer.
+ Literal* lliteral = node->left()->AsLiteral();
+ Literal* rliteral = node->right()->AsLiteral();
+
+ if (rliteral != NULL && rliteral->handle()->IsSmi()) {
+ Load(node->left());
+ SmiOperation(node->op(), rliteral->handle(), false);
+
+ } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
+ Load(node->right());
+ SmiOperation(node->op(), lliteral->handle(), true);
+
+ } else {
+ Load(node->left());
+ Load(node->right());
+ GenericOperation(node->op());
+ }
+ }
+}
+
+
+void ArmCodeGenerator::VisitThisFunction(ThisFunction* node) {
+ __ Push(FunctionOperand());
+}
+
+
+void ArmCodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+
+ // NOTE: To make null checks efficient, we check if either left or
+ // right is the literal 'null'. If so, we optimize the code by
+ // inlining a null check instead of calling the (very) general
+ // runtime routine for checking equality.
+
+ bool left_is_null =
+ left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
+ bool right_is_null =
+ right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
+
+ if (op == Token::EQ || op == Token::EQ_STRICT) {
+ // The 'null' value is only equal to 'null' or 'undefined'.
+ if (left_is_null || right_is_null) {
+ Load(left_is_null ? right : left);
+ Label exit, undetectable;
+ __ cmp(r0, Operand(Factory::null_value()));
+
+ // The 'null' value is only equal to 'undefined' if using
+ // non-strict comparisons.
+ if (op != Token::EQ_STRICT) {
+ __ b(eq, &exit);
+ __ cmp(r0, Operand(Factory::undefined_value()));
+
+ // NOTE: it can be undetectable object.
+ __ b(eq, &exit);
+ __ tst(r0, Operand(kSmiTagMask));
+
+ __ b(ne, &undetectable);
+ __ pop(r0);
+ __ b(false_target());
+
+ __ bind(&undetectable);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+ }
+
+ __ bind(&exit);
+ __ pop(r0);
+
+ cc_reg_ = eq;
+ return;
+ }
+ }
+
+
+ // NOTE: To make typeof testing for natives implemented in
+ // JavaScript really efficient, we generate special code for
+ // expressions of the form: 'typeof <expression> == <string>'.
+
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(String::cast(*right->AsLiteral()->handle()));
+
+ // Load the operand, move it to register r1, and restore TOS.
+ LoadTypeofExpression(operation->expression());
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+
+ if (check->Equals(Heap::number_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, true_target());
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::heap_number_map()));
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, false_target());
+
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+ // NOTE: it might be an undetectable string object
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, false_target());
+
+ __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+ cc_reg_ = lt;
+
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ cmp(r1, Operand(Factory::true_value()));
+ __ b(eq, true_target());
+ __ cmp(r1, Operand(Factory::false_value()));
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ cmp(r1, Operand(Factory::undefined_value()));
+ __ b(eq, true_target());
+
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, false_target());
+
+ // NOTE: it can be undetectable object.
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, false_target());
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, false_target());
+
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::null_value()));
+ __ b(eq, true_target());
+
+ // NOTE: it might be an undetectable object.
+ __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, false_target());
+
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, false_target());
+ __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ cc_reg_ = le;
+
+ } else {
+ // Uncommon case: Typeof testing against a string literal that
+ // is never returned from the typeof operator.
+ __ b(false_target());
+ }
+ return;
+ }
+
+ Load(left);
+ Load(right);
+ switch (op) {
+ case Token::EQ:
+ Comparison(eq, false);
+ break;
+
+ case Token::LT:
+ Comparison(lt);
+ break;
+
+ case Token::GT:
+ Comparison(gt);
+ break;
+
+ case Token::LTE:
+ Comparison(le);
+ break;
+
+ case Token::GTE:
+ Comparison(ge);
+ break;
+
+ case Token::EQ_STRICT:
+ Comparison(eq, true);
+ break;
+
+ case Token::IN:
+ __ Push(Operand(1)); // not counting receiver
+ __ InvokeBuiltin("IN", 1, CALL_JS);
+ break;
+
+ case Token::INSTANCEOF:
+ __ Push(Operand(1)); // not counting receiver
+ __ InvokeBuiltin("INSTANCE_OF", 1, CALL_JS);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArmCodeGenerator::RecordStatementPosition(Node* node) {
+ if (FLAG_debug_info) {
+ int statement_pos = node->statement_pos();
+ if (statement_pos == kNoPosition) return;
+ __ RecordStatementPosition(statement_pos);
+ }
+}
+
+
+void ArmCodeGenerator::EnterJSFrame(int argc, RegList callee_saved) {
+ __ EnterJSFrame(argc, callee_saved);
+}
+
+
+void ArmCodeGenerator::ExitJSFrame(RegList callee_saved, ExitJSFlag flag) {
+ // The JavaScript debugger expects ExitJSFrame to be implemented as a stub,
+ // so that a breakpoint can be inserted at the end of a function.
+ int num_callee_saved = NumRegs(callee_saved);
+
+ // We support a fixed number of register variable configurations
+ ASSERT(num_callee_saved <= 5 &&
+ JSCalleeSavedList(num_callee_saved) == callee_saved);
+
+ JSExitStub stub(num_callee_saved, callee_saved, flag);
+ __ CallJSExitStub(&stub);
+}
+
+
+#undef __
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator interface
+
+// MakeCode() is just a wrapper for CodeGenerator::MakeCode()
+// so we don't have to expose the entire CodeGenerator class in
+// the .h file.
+Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval) {
+ Handle<Code> code = ArmCodeGenerator::MakeCode(fun, script, is_eval);
+ if (!code.is_null()) {
+ Counters::total_compiled_code_size.Increment(code->instruction_size());
+ }
+ return code;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "prettyprinter.h"
+#include "scopeinfo.h"
+#include "scopes.h"
+#include "runtime.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(trace, false, "trace function calls");
+DEFINE_bool(defer_negation, true, "defer negation operation");
+DECLARE_bool(debug_info);
+DECLARE_bool(debug_code);
+
+#ifdef DEBUG
+DECLARE_bool(gc_greedy);
+DEFINE_bool(trace_codegen, false,
+ "print name of functions for which code is generated");
+DEFINE_bool(print_code, false, "print generated code");
+DEFINE_bool(print_builtin_code, false, "print generated code for builtins");
+DEFINE_bool(print_source, false, "pretty print source code");
+DEFINE_bool(print_builtin_source, false,
+ "pretty print source code for builtins");
+DEFINE_bool(print_ast, false, "print source AST");
+DEFINE_bool(print_builtin_ast, false, "print source AST for builtins");
+DEFINE_bool(trace_calls, false, "trace calls");
+DEFINE_bool(trace_builtin_calls, false, "trace builtins calls");
+DEFINE_string(stop_at, "", "function name where to insert a breakpoint");
+#endif // DEBUG
+
+
+DEFINE_bool(check_stack, true,
+ "check stack for overflow, interrupt, breakpoint");
+
+#define TOS (Operand(esp, 0))
+
+
+class Ia32CodeGenerator;
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+
+// -----------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+ enum Type { ILLEGAL = -1, EMPTY = 0, NAMED = 1, KEYED = 2 };
+ Reference(Ia32CodeGenerator* cgen, Expression* expression);
+ ~Reference();
+
+ Expression* expression() const { return expression_; }
+ Type type() const { return type_; }
+ void set_type(Type value) {
+ ASSERT(type_ == ILLEGAL);
+ type_ = value;
+ }
+ int size() const { return type_; }
+
+ bool is_illegal() const { return type_ == ILLEGAL; }
+
+ private:
+ Ia32CodeGenerator* cgen_;
+ Expression* expression_;
+ Type type_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Code generation state
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+ enum AccessType {
+ UNDEFINED,
+ LOAD,
+ LOAD_TYPEOF_EXPR,
+ STORE,
+ INIT_CONST
+ };
+
+ CodeGenState()
+ : access_(UNDEFINED),
+ ref_(NULL),
+ true_target_(NULL),
+ false_target_(NULL) {
+ }
+
+ CodeGenState(AccessType access,
+ Reference* ref,
+ Label* true_target,
+ Label* false_target)
+ : access_(access),
+ ref_(ref),
+ true_target_(true_target),
+ false_target_(false_target) {
+ }
+
+ AccessType access() const { return access_; }
+ Reference* ref() const { return ref_; }
+ Label* true_target() const { return true_target_; }
+ Label* false_target() const { return false_target_; }
+
+ private:
+ AccessType access_;
+ Reference* ref_;
+ Label* true_target_;
+ Label* false_target_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Ia32CodeGenerator
+
+class Ia32CodeGenerator: public CodeGenerator {
+ public:
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+ MacroAssembler* masm() { return masm_; }
+
+ private:
+ // Assembler
+ MacroAssembler* masm_; // to generate code
+
+ // Code generation state
+ Scope* scope_;
+ Condition cc_reg_;
+ CodeGenState* state_;
+ bool is_inside_try_;
+ int break_stack_height_;
+
+ // Labels
+ Label function_return_;
+
+ // Construction/destruction
+ Ia32CodeGenerator(int buffer_size,
+ Handle<Script> script,
+ bool is_eval);
+ virtual ~Ia32CodeGenerator() { delete masm_; }
+
+ // Main code generation function
+ void GenCode(FunctionLiteral* fun);
+
+ // The following are used by class Reference.
+ void LoadReference(Reference* ref);
+ void UnloadReference(Reference* ref);
+ friend class Reference;
+
+ bool TryDeferNegate(Expression* x);
+
+ // State
+ bool has_cc() const { return cc_reg_ >= 0; }
+ CodeGenState::AccessType access() const { return state_->access(); }
+ Reference* ref() const { return state_->ref(); }
+ bool is_referenced() const { return state_->ref() != NULL; }
+ Label* true_target() const { return state_->true_target(); }
+ Label* false_target() const { return state_->false_target(); }
+
+ // Expressions
+ Operand GlobalObject() const {
+ return ContextOperand(esi, Context::GLOBAL_INDEX);
+ }
+
+ // Support functions for accessing parameters.
+ Operand ParameterOperand(int index) const {
+ ASSERT(-2 <= index && index < scope_->num_parameters());
+ return Operand(ebp, (1 + scope_->num_parameters() - index) * kPointerSize);
+ }
+
+ Operand ReceiverOperand() const { return ParameterOperand(-1); }
+ Operand FunctionOperand() const {
+ return Operand(ebp, JavaScriptFrameConstants::kFunctionOffset);
+ }
+
+ Operand ContextOperand(Register context, int index) const {
+ return Operand(context, Context::SlotOffset(index));
+ }
+
+ Operand SlotOperand(Slot* slot, Register tmp);
+
+ void LoadCondition(Expression* x,
+ CodeGenState::AccessType access,
+ Label* true_target,
+ Label* false_target,
+ bool force_cc);
+ void Load(Expression* x,
+ CodeGenState::AccessType access = CodeGenState::LOAD);
+ void LoadGlobal();
+
+ // Special code for typeof expressions: Unfortunately, we must
+ // be careful when loading the expression in 'typeof'
+ // expressions. We are not allowed to throw reference errors for
+ // non-existing properties of the global object, so we must make it
+ // look like an explicit property access, instead of an access
+ // through the context chain.
+ void LoadTypeofExpression(Expression* x);
+
+ // References
+ void AccessReference(Reference* ref, CodeGenState::AccessType access);
+
+ void GetValue(Reference* ref) { AccessReference(ref, CodeGenState::LOAD); }
+ void SetValue(Reference* ref) { AccessReference(ref, CodeGenState::STORE); }
+ void InitConst(Reference* ref) {
+ AccessReference(ref, CodeGenState::INIT_CONST);
+ }
+
+ void ToBoolean(Label* true_target, Label* false_target);
+
+
+ // Access property from the reference (must be at the TOS).
+ void AccessReferenceProperty(Expression* key,
+ CodeGenState::AccessType access);
+
+ void GenericOperation(Token::Value op,
+ OverwriteMode overwrite_mode = NO_OVERWRITE);
+
+ bool InlinedGenericOperation(
+ Token::Value op,
+ const OverwriteMode overwrite_mode = NO_OVERWRITE,
+ bool negate_result = false);
+ void Comparison(Condition cc, bool strict = false);
+
+ void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
+
+ void SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode);
+
+ void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ virtual void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Instantiate the function boilerplate.
+ void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+ // Control flow
+ void Branch(bool if_true, Label* L);
+ void CheckStack();
+ void CleanStack(int num_bytes);
+
+ // Node visitors
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ void RecordStatementPosition(Node* node);
+
+ // Activation frames.
+ void EnterJSFrame();
+ void ExitJSFrame();
+
+ virtual void GenerateShiftDownAndTailCall(ZoneList<Expression*>* args);
+ virtual void GenerateSetThisFunction(ZoneList<Expression*>* args);
+ virtual void GenerateGetThisFunction(ZoneList<Expression*>* args);
+ virtual void GenerateSetThis(ZoneList<Expression*>* args);
+ virtual void GenerateGetArgumentsLength(ZoneList<Expression*>* args);
+ virtual void GenerateSetArgumentsLength(ZoneList<Expression*>* args);
+ virtual void GenerateTailCallWithArguments(ZoneList<Expression*>* args);
+ virtual void GenerateSetArgument(ZoneList<Expression*>* args);
+ virtual void GenerateSquashFrame(ZoneList<Expression*>* args);
+ virtual void GenerateExpandFrame(ZoneList<Expression*>* args);
+ virtual void GenerateIsSmi(ZoneList<Expression*>* args);
+ virtual void GenerateIsArray(ZoneList<Expression*>* args);
+
+ virtual void GenerateArgumentsLength(ZoneList<Expression*>* args);
+ virtual void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+ virtual void GenerateValueOf(ZoneList<Expression*>* args);
+ virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
+};
+
+
+// -----------------------------------------------------------------------------
+// Ia32CodeGenerator implementation
+
+#define __ masm_->
+
+
+Handle<Code> Ia32CodeGenerator::MakeCode(FunctionLiteral* flit,
+ Handle<Script> script,
+ bool is_eval) {
+#ifdef DEBUG
+ bool print_source = false;
+ bool print_ast = false;
+ bool print_code = false;
+ const char* ftype;
+
+ if (Bootstrapper::IsActive()) {
+ print_source = FLAG_print_builtin_source;
+ print_ast = FLAG_print_builtin_ast;
+ print_code = FLAG_print_builtin_code;
+ ftype = "builtin";
+ } else {
+ print_source = FLAG_print_source;
+ print_ast = FLAG_print_ast;
+ print_code = FLAG_print_code;
+ ftype = "user-defined";
+ }
+
+ if (FLAG_trace_codegen || print_source || print_ast) {
+ PrintF("*** Generate code for %s function: ", ftype);
+ flit->name()->ShortPrint();
+ PrintF(" ***\n");
+ }
+
+ if (print_source) {
+ PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
+ }
+
+ if (print_ast) {
+ PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
+ }
+#endif // DEBUG
+
+ // Generate code.
+ const int initial_buffer_size = 4 * KB;
+ Ia32CodeGenerator cgen(initial_buffer_size, script, is_eval);
+ cgen.GenCode(flit);
+ if (cgen.HasStackOverflow()) {
+ Top::StackOverflow();
+ return Handle<Code>::null();
+ }
+
+ // Process any deferred code.
+ cgen.ProcessDeferred();
+
+ // Allocate and install the code.
+ CodeDesc desc;
+ cgen.masm()->GetCode(&desc);
+ ScopeInfo<> sinfo(flit->scope());
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
+ Handle<Code> code = Factory::NewCode(desc, &sinfo, flags);
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, cgen.masm());
+
+#ifdef DEBUG
+ if (print_code) {
+ // Print the source code if available.
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(flit->start_position());
+ // flit->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len = flit->end_position() - flit->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
+ }
+ PrintF("--- Code ---\n");
+ code->Print();
+ }
+#endif // DEBUG
+
+ return code;
+}
+
+
+Ia32CodeGenerator::Ia32CodeGenerator(int buffer_size,
+ Handle<Script> script,
+ bool is_eval)
+ : CodeGenerator(is_eval, script),
+ masm_(new MacroAssembler(NULL, buffer_size)),
+ scope_(NULL),
+ cc_reg_(no_condition),
+ state_(NULL),
+ is_inside_try_(false),
+ break_stack_height_(0) {
+}
+
+
+// Calling conventions:
+// ebp: frame pointer
+// esp: stack pointer
+// edi: caller's parameter pointer
+// esi: callee's context
+
+
+void Ia32CodeGenerator::GenCode(FunctionLiteral* fun) {
+ // Record the position for debugging purposes.
+ __ RecordPosition(fun->start_position());
+
+ Scope* scope = fun->scope();
+ ZoneList<Statement*>* body = fun->body();
+
+ // Initialize state.
+ { CodeGenState state;
+ state_ = &state;
+ scope_ = scope;
+ cc_reg_ = no_condition;
+
+ // Entry
+ // stack: function, receiver, arguments, return address
+ // esp: stack pointer
+ // ebp: frame pointer
+ // edi: caller's parameter pointer
+ // esi: callee's context
+
+ { Comment cmnt(masm_, "[ enter JS frame");
+ EnterJSFrame();
+ }
+ // tos: code slot
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ // This section now only allocates and copies the formals into the
+ // arguments object. It saves the address in ecx, which is saved
+ // at any point before either garbage collection or ecx is
+ // overwritten. The flag arguments_array_allocated communicates
+ // with the store into the arguments variable and guards the lazy
+ // pushes of ecx to TOS. The flag arguments_array_saved notes
+ // when the push has happened.
+ bool arguments_object_allocated = false;
+ bool arguments_object_saved = false;
+
+ // Allocate arguments object.
+ // The arguments object pointer needs to be saved in ecx, since we need
+ // to store arguments into the context.
+ if (scope->arguments() != NULL) {
+ ASSERT(scope->arguments_shadow() != NULL);
+ Comment cmnt(masm_, "[ allocate arguments object");
+ __ push(FunctionOperand());
+ __ CallRuntime(Runtime::kNewArguments, 1);
+ __ mov(ecx, Operand(eax));
+ arguments_object_allocated = true;
+ }
+
+ // Allocate space for locals and initialize them.
+ if (scope->num_stack_slots() > 0) {
+ Comment cmnt(masm_, "[ allocate space for locals");
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ for (int i = scope->num_stack_slots(); i-- > 0; ) __ push(eax);
+ }
+
+ if (scope->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Save the arguments object pointer, if any.
+ if (arguments_object_allocated && !arguments_object_saved) {
+ __ push(Operand(ecx));
+ arguments_object_saved = true;
+ }
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ push(FunctionOperand());
+ __ CallRuntime(Runtime::kNewContext, 2);
+ __ push(eax);
+ // Update context local.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ // Restore the arguments array pointer, if any.
+ }
+
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope->num_parameters(); i++) {
+ Variable* par = scope->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // Save the arguments object pointer, if any.
+ if (arguments_object_allocated && !arguments_object_saved) {
+ __ push(Operand(ecx));
+ arguments_object_saved = true;
+ }
+ ASSERT(!scope->is_global_scope()); // no parameters in global scope
+ __ mov(eax, ParameterOperand(i));
+ // Loads ecx with context; used below in RecordWrite.
+ __ mov(SlotOperand(slot, ecx), eax);
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(ecx, offset, eax, ebx);
+ }
+ }
+ }
+
+ // This section stores the pointer to the arguments object that
+ // was allocated and copied into above. If the address was not
+ // saved to TOS, we push ecx onto the stack.
+
+ // Store the arguments object.
+ // This must happen after context initialization because
+ // the arguments object may be stored in the context
+ if (arguments_object_allocated) {
+ ASSERT(scope->arguments() != NULL);
+ ASSERT(scope->arguments_shadow() != NULL);
+ Comment cmnt(masm_, "[ store arguments object");
+ {
+ Reference target(this, scope->arguments());
+ if (!arguments_object_saved) {
+ __ push(Operand(ecx));
+ }
+ SetValue(&target);
+ }
+ // The value of arguments must also be stored in .arguments.
+ // TODO(1241813): This code can probably be improved by fusing it with
+ // the code that stores the arguments object above.
+ {
+ Reference target(this, scope->arguments_shadow());
+ Load(scope->arguments());
+ SetValue(&target);
+ }
+ }
+
+ // Generate code to 'execute' declarations and initialize
+ // functions (source elements). In case of an illegal
+ // redeclaration we need to handle that instead of processing the
+ // declarations.
+ if (scope->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ illegal redeclarations");
+ scope->VisitIllegalRedeclaration(this);
+ } else {
+ Comment cmnt(masm_, "[ declarations");
+ ProcessDeclarations(scope->declarations());
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 1);
+ __ push(eax);
+ }
+ CheckStack();
+
+ // Compile the body of the function in a vanilla state. Don't
+ // bother compiling all the code if the scope has an illegal
+ // redeclaration.
+ if (!scope->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+ bool is_builtin = Bootstrapper::IsActive();
+ bool should_trace =
+ is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+ if (should_trace) {
+ __ CallRuntime(Runtime::kDebugTrace, 1);
+ __ push(eax);
+ }
+#endif
+ VisitStatements(body);
+
+ // Generate a return statement if necessary.
+ if (body->is_empty() || body->last()->AsReturnStatement() == NULL) {
+ Literal undefined(Factory::undefined_value());
+ ReturnStatement statement(&undefined);
+ statement.set_statement_pos(fun->end_position());
+ VisitReturnStatement(&statement);
+ }
+ }
+
+ state_ = NULL;
+ }
+
+ // Code generation state must be reset.
+ scope_ = NULL;
+ ASSERT(!has_cc());
+ ASSERT(state_ == NULL);
+}
+
+
+Operand Ia32CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER: return ParameterOperand(index);
+
+ case Slot::LOCAL: {
+ ASSERT(0 <= index && index < scope_->num_stack_slots());
+ const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ return Operand(ebp, kLocal0Offset - index * kPointerSize);
+ }
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(esi)); // do not overwrite context register
+ Register context = esi;
+ int chain_length = scope_->ContextChainLength(slot->var()->scope());
+ for (int i = chain_length; i-- > 0;) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(eax);
+ }
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition code
+// register. If force_cc is set, the value is forced to set the condition code
+// register and no value is pushed. If the condition code register was set,
+// has_cc() is true and cc_reg_ contains the condition to test for 'true'.
+void Ia32CodeGenerator::LoadCondition(Expression* x,
+ CodeGenState::AccessType access,
+ Label* true_target,
+ Label* false_target,
+ bool force_cc) {
+ ASSERT(access == CodeGenState::LOAD ||
+ access == CodeGenState::LOAD_TYPEOF_EXPR);
+ ASSERT(!has_cc() && !is_referenced());
+
+ CodeGenState* old_state = state_;
+ CodeGenState new_state(access, NULL, true_target, false_target);
+ state_ = &new_state;
+ Visit(x);
+ state_ = old_state;
+ if (force_cc && !has_cc()) {
+ ToBoolean(true_target, false_target);
+ }
+ ASSERT(has_cc() || !force_cc);
+}
+
+
+void Ia32CodeGenerator::Load(Expression* x, CodeGenState::AccessType access) {
+ ASSERT(access == CodeGenState::LOAD ||
+ access == CodeGenState::LOAD_TYPEOF_EXPR);
+
+ Label true_target;
+ Label false_target;
+ LoadCondition(x, access, &true_target, &false_target, false);
+
+ if (has_cc()) {
+ // convert cc_reg_ into a bool
+
+ Label loaded, materialize_true;
+ __ j(cc_reg_, &materialize_true);
+ __ push(Immediate(Factory::false_value()));
+ __ jmp(&loaded);
+ __ bind(&materialize_true);
+ __ push(Immediate(Factory::true_value()));
+ __ bind(&loaded);
+ cc_reg_ = no_condition;
+ }
+
+ if (true_target.is_linked() || false_target.is_linked()) {
+ // we have at least one condition value
+ // that has been "translated" into a branch,
+ // thus it needs to be loaded explicitly again
+ Label loaded;
+ __ jmp(&loaded); // don't lose current TOS
+ bool both = true_target.is_linked() && false_target.is_linked();
+ // reincarnate "true", if necessary
+ if (true_target.is_linked()) {
+ __ bind(&true_target);
+ __ push(Immediate(Factory::true_value()));
+ }
+ // if both "true" and "false" need to be reincarnated,
+ // jump across code for "false"
+ if (both)
+ __ jmp(&loaded);
+ // reincarnate "false", if necessary
+ if (false_target.is_linked()) {
+ __ bind(&false_target);
+ __ push(Immediate(Factory::false_value()));
+ }
+ // everything is loaded at this point
+ __ bind(&loaded);
+ }
+ ASSERT(!has_cc());
+}
+
+
+void Ia32CodeGenerator::LoadGlobal() {
+ __ push(GlobalObject());
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the LOAD_TYPEOF_EXPR access type. => Need to handle
+// global variables w/o reference errors elsewhere.
+void Ia32CodeGenerator::LoadTypeofExpression(Expression* x) {
+ Variable* variable = x->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // NOTE: This is somewhat nasty. We force the compiler to load
+ // the variable as if through '<global>.<variable>' to make sure we
+ // do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ // TODO(1241834): Fetch the position from the variable instead of using
+ // no position.
+ Property property(&global, &key, kNoPosition);
+ Load(&property);
+ } else {
+ Load(x, CodeGenState::LOAD_TYPEOF_EXPR);
+ }
+}
+
+
+Reference::Reference(Ia32CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ cgen_->UnloadReference(this);
+}
+
+
+void Ia32CodeGenerator::LoadReference(Reference* ref) {
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ Load(property->obj());
+ // Used a named reference if the key is a literal symbol.
+ // We don't use a named reference if they key is a string that can be
+ // legally parsed as an integer. This is because, otherwise we don't
+ // get into the slow case code that handles [] on String objects.
+ Literal* literal = property->key()->AsLiteral();
+ uint32_t dummy;
+ if (literal != NULL && literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ if (var->is_global()) {
+ // global variable
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ // local variable
+ ref->set_type(Reference::EMPTY);
+ }
+ } else {
+ Load(e);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ push(eax);
+ }
+}
+
+
+void Ia32CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop n references on the stack while preserving TOS
+ Comment cmnt(masm_, "[ UnloadReference");
+ int size = ref->size();
+ if (size <= 0) {
+ // Do nothing. No popping is necessary.
+ } else if (size == 1) {
+ __ pop(eax);
+ __ mov(TOS, eax);
+ } else {
+ __ pop(eax);
+ __ add(Operand(esp), Immediate(size * kPointerSize));
+ __ push(eax);
+ }
+}
+
+
+void Ia32CodeGenerator::AccessReference(Reference* ref,
+ CodeGenState::AccessType access) {
+ ASSERT(!has_cc());
+ ASSERT(ref->type() != Reference::ILLEGAL);
+ CodeGenState* old_state = state_;
+ CodeGenState new_state(access, ref, true_target(), false_target());
+ state_ = &new_state;
+ Visit(ref->expression());
+ state_ = old_state;
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
+// register to a boolean in the condition code register. The code
+// may jump to 'false_target' in case the register converts to 'false'.
+void Ia32CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
+ // Note: The generated code snippet cannot change 'reg'.
+ // Only the condition code should be set.
+
+ Comment cmnt(masm_, "[ ToBoolean");
+
+ // the value to convert should be popped from the stack
+ __ pop(eax);
+
+ // Fast case checks
+
+ // Check if value is 'false'.
+ __ cmp(eax, Factory::false_value());
+ __ j(equal, false_target);
+
+ // Check if value is 'true'.
+ __ cmp(eax, Factory::true_value());
+ __ j(equal, true_target);
+
+ // Check if reg is 'undefined'.
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, false_target);
+
+ // Check if reg is 'null'.
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, false_target);
+
+ // Check if value is a Smi.
+ __ cmp(eax, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ j(equal, false_target);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, true_target, taken);
+
+ // Slow case: call the runtime.
+ __ push(eax); // undo the pop(eax) from above
+ __ CallRuntime(Runtime::kToBool, 1);
+ // Convert result (eax) to condition code
+ __ cmp(eax, Factory::false_value());
+
+ ASSERT(not_equal == not_zero);
+ cc_reg_ = not_equal;
+}
+
+
+void Ia32CodeGenerator::AccessReferenceProperty(
+ Expression* key,
+ CodeGenState::AccessType access) {
+ Reference::Type type = ref()->type();
+ ASSERT(type != Reference::ILLEGAL);
+
+ // TODO(1241834): Make sure that this is sufficient. If there is a chance
+ // that reference errors can be thrown below, we must distinguish
+ // between the 2 kinds of loads (typeof expression loads must not
+ // throw a reference errror).
+ bool is_load = (access == CodeGenState::LOAD ||
+ access == CodeGenState::LOAD_TYPEOF_EXPR);
+
+ if (type == Reference::NAMED) {
+ // Compute the name of the property.
+ Literal* literal = key->AsLiteral();
+ Handle<String> name(String::cast(*literal->handle()));
+
+ // Call the appropriate IC code.
+ if (is_load) {
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Variable* var = ref()->expression()->AsVariableProxy()->AsVariable();
+ // Setup the name register.
+ __ Set(ecx, Immediate(name));
+ if (var != NULL) {
+ ASSERT(var->is_global());
+ __ call(ic, code_target_context);
+ } else {
+ __ call(ic, code_target);
+ }
+ } else {
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ __ pop(eax);
+ // Setup the name register.
+ __ Set(ecx, Immediate(name));
+ __ call(ic, code_target);
+ }
+ } else {
+ // Access keyed property.
+ ASSERT(type == Reference::KEYED);
+
+ if (is_load) {
+ // Call IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Variable* var = ref()->expression()->AsVariableProxy()->AsVariable();
+ if (var != NULL) {
+ ASSERT(var->is_global());
+ __ call(ic, code_target_context);
+ } else {
+ __ call(ic, code_target);
+ }
+ } else {
+ // Call IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ __ pop(eax);
+ __ call(ic, code_target);
+ }
+ }
+ __ push(eax); // IC call leaves result in eax, push it out
+}
+
+
+#undef __
+#define __ masm->
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Code pattern for loading floating point values. Input values must
+ // be either Smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+ // Test if operands are Smi or number objects (fp). Requirements:
+ // operand_1 in eax, operand_2 in edx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch);
+ // Allocate a heap number in new space with undefined value.
+ // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
+ static void AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch1,
+ Register scratch2);
+};
+
+
+class InlinedGenericOpStub: public CodeStub {
+ public:
+ InlinedGenericOpStub(Token::Value op, OverwriteMode mode, bool negate_result)
+ : op_(op), mode_(mode), negate_result_(negate_result) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool negate_result_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("InlinedGenericOpStub (op %s), (mode %d), (negate_result %s)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ negate_result_ ? "true" : "false");
+ }
+#endif
+
+ // Minor key encoding in 16 bits OOOOOOOOOOOOOMMN.
+ class NegateBits: public BitField<bool, 0, 1> {};
+ class ModeBits: public BitField<OverwriteMode, 1, 2> {};
+ class OpBits: public BitField<Token::Value, 3, 13> {};
+
+ Major MajorKey() { return InlinedGenericOp; }
+ int MinorKey() {
+ // Encode the three parameters in a unique 16 bit value.
+ return NegateBits::encode(negate_result_) |
+ OpBits::encode(op_) |
+ ModeBits::encode(mode_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
+const char* InlinedGenericOpStub::GetName() {
+ switch (op_) {
+ case Token::ADD: return "InlinedGenericOpStub_ADD";
+ case Token::SUB: return "InlinedGenericOpStub_SUB";
+ case Token::MUL: return "InlinedGenericOpStub_MUL";
+ case Token::DIV: return "InlinedGenericOpStub_DIV";
+ case Token::BIT_OR: return "InlinedGenericOpStub_BIT_OR";
+ case Token::BIT_AND: return "InlinedGenericOpStub_BIT_AND";
+ case Token::BIT_XOR: return "InlinedGenericOpStub_BIT_XOR";
+ case Token::SAR: return "InlinedGenericOpStub_SAR";
+ case Token::SHL: return "InlinedGenericOpStub_SHL";
+ case Token::SHR: return "InlinedGenericOpStub_SHR";
+ default: return "InlinedGenericOpStub";
+ }
+}
+
+
+void InlinedGenericOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
+ switch (op_) {
+ case Token::ADD: {
+ // eax: y.
+ // edx: x.
+ if (negate_result_) UNIMPLEMENTED();
+ Label revert;
+ __ mov(ecx, Operand(eax));
+ __ or_(ecx, Operand(edx)); // ecx = x | y.
+ __ add(eax, Operand(edx)); // Add y optimistically.
+ // Go slow-path in case of overflow.
+ __ j(overflow, &revert, not_taken);
+ // Go slow-path in case of non-Smi operands.
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &revert, not_taken);
+ __ ret(2 * kPointerSize); // Remove all operands.
+
+ // Revert optimistic add.
+ __ bind(&revert);
+ __ sub(eax, Operand(edx));
+ break;
+ }
+
+ case Token::SUB: {
+ // eax: y.
+ // edx: x.
+ if (negate_result_) UNIMPLEMENTED();
+ Label revert;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y.
+ __ sub(edx, Operand(eax)); // Subtract y optimistically.
+ // Go slow-path in case of overflow.
+ __ j(overflow, &revert, not_taken);
+ // Go slow-path in case of non-Smi operands.
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &revert, not_taken);
+ __ mov(eax, Operand(edx));
+ __ ret(2 * kPointerSize); // Remove all operands.
+
+ // Revert optimistic sub.
+ __ bind(&revert);
+ __ add(edx, Operand(eax));
+ break;
+ }
+
+ case Token::MUL: {
+ // eax: y
+ // edx: x
+ // a) both operands SMI and result fits into a SMI -> return.
+ // b) at least one of operans non-SMI -> non_smi_operands.
+ // c) result does not fit in a SMI -> non_smi_result.
+ Label non_smi_operands, non_smi_result;
+ // Tag check.
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y.
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ __ test(ecx, Immediate(kSmiTagMask));
+ // Jump if not both Smi; check if float numbers.
+ __ j(not_zero, &non_smi_operands, not_taken);
+
+ // Get copies of operands.
+ __ mov(ebx, Operand(eax));
+ __ mov(ecx, Operand(edx));
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // adjust code below
+ // Remove tag from one of the operands (but keep sign).
+ __ sar(ecx, kSmiTagSize);
+ // Do multiplication.
+ __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax.
+ // Go slow on overflows.
+ __ j(overflow, &non_smi_result, not_taken);
+ // ...but operands OK for float arithmetic.
+
+ if (negate_result_) {
+ __ xor_(ecx, Operand(ecx));
+ __ sub(ecx, Operand(eax));
+ // Go slow on overflows.
+ __ j(overflow, &non_smi_result, not_taken);
+ __ mov(eax, Operand(ecx));
+ }
+ // If the result is +0 we may need to check if the result should
+ // really be -0. Welcome to the -0 fan club.
+ __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
+
+ __ ret(2 * kPointerSize);
+
+ __ bind(&non_smi_result);
+ // TODO(1243132): Do not check float operands here.
+ __ bind(&non_smi_operands);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ break;
+ }
+
+ case Token::DIV: {
+ // eax: y
+ // edx: x
+ if (negate_result_) UNIMPLEMENTED();
+ Label non_smi_operands, non_smi_result, division_by_zero;
+ __ mov(ebx, Operand(eax)); // Get y
+ __ mov(eax, Operand(edx)); // Get x
+
+ __ cdq(); // Sign extend eax into edx:eax.
+ // Tag check.
+ __ mov(ecx, Operand(ebx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y.
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ __ test(ecx, Immediate(kSmiTagMask));
+ // Jump if not both Smi; check if float numbers.
+ __ j(not_zero, &non_smi_operands, not_taken);
+ __ test(ebx, Operand(ebx)); // Check for 0 divisor.
+ __ j(zero, &division_by_zero, not_taken);
+
+ __ idiv(ebx);
+ // Check for the corner case of dividing the most negative smi by -1.
+ // (We cannot use the overflow flag, since it is not set by idiv.)
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ __ j(equal, &non_smi_result);
+ // If the result is +0 we may need to check if the result should
+ // really be -0. Welcome to the -0 fan club.
+ __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y.
+ __ test(edx, Operand(edx));
+ // Use floats if there's a remainder.
+ __ j(not_zero, &non_smi_result, not_taken);
+ __ shl(eax, kSmiTagSize);
+ __ ret(2 * kPointerSize); // Remove all operands.
+
+ __ bind(&division_by_zero);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ jmp(&call_runtime); // Division by zero must go through runtime.
+
+ __ bind(&non_smi_result);
+ // TODO(1243132): Do not check float operands here.
+ __ bind(&non_smi_operands);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ // Smi-case for bitops should already have been inlined.
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ }
+ }
+
+ // eax: y
+ // edx: x
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+
+ // Fast-case: Both operands are numbers.
+
+ // Allocate a heap number, if needed.
+ // Bitops allocate _after_ computation to allow for smi results.
+ if (!Token::IsBitOp(op_)) {
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, ecx, edx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+ switch (op_) {
+ case Token::ADD: {
+ __ faddp(1);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ break;
+ }
+
+ case Token::SUB: {
+ __ fsubp(1);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ break;
+ }
+
+ case Token::MUL: {
+ __ fmulp(1);
+ if (negate_result_) __ fchs();
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ break;
+ }
+
+ case Token::DIV: {
+ __ fdivp(1);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_int32_operands, non_smi_result, skip_allocation;
+ // Reserve space for converted numbers.
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+
+ // Check if right operand is int32.
+ __ fist_s(Operand(esp, 1 * kPointerSize));
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf();
+ __ j(not_zero, &non_int32_operands);
+ __ j(parity_even, &non_int32_operands);
+
+ // Check if left operand is int32.
+ __ fist_s(Operand(esp, 0 * kPointerSize));
+ __ fild_s(Operand(esp, 0 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf();
+ __ j(not_zero, &non_int32_operands);
+ __ j(parity_even, &non_int32_operands);
+
+ // Get int32 operands and perform bitop.
+ __ pop(eax);
+ __ pop(ecx);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::SAR: __ sar(eax); break;
+ case Token::SHL: __ shl(eax); break;
+ case Token::SHR: __ shr(eax); break;
+ default: UNREACHABLE();
+ }
+
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &non_smi_result);
+
+ // Tag smi result and return.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(eax, Operand(eax, times_2, kSmiTag));
+ __ ret(2 * kPointerSize);
+
+ // All ops except SHR return a signed int32 that we load in a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, Operand(eax)); // ebx: result
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+ ecx, edx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ }
+ __ bind(&non_int32_operands);
+ // Restore stacks and operands before calling runtime.
+ __ ffree(0);
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+
+ // SHR should return uint32 - go to runtime for non-smi/negative result.
+ if (op_ == Token::SHR) __ bind(&non_smi_result);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ break;
+ }
+
+ default: UNREACHABLE(); break;
+ }
+
+ // Slow-case: Use the runtime system to get the right result.
+ __ bind(&call_runtime);
+ if (negate_result_) {
+ switch (op_) {
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MULNEG, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch1,
+ Register scratch2) {
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ mov(Operand(scratch1), Immediate(allocation_top));
+ __ mov(eax, Operand(scratch1, 0));
+ __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top
+ __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
+ __ j(above, need_gc, not_taken);
+
+ __ mov(Operand(scratch1, 0), scratch2); // store new top
+ __ mov(Operand(eax, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // Tag old top and use as result.
+ __ add(Operand(eax), Immediate(kHeapObjectTag));
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register scratch) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ sar(scratch, kSmiTagSize);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ sar(scratch, kSmiTagSize);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // test if both operands are floats or Smi -> scratch=k_is_float;
+ // otherwise scratch=k_not_float
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other, not_taken); // argument in edx is OK
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in eax is OK
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+#undef __
+#define __ masm->
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+ Label undo;
+ Label slow;
+ Label done;
+
+ // Enter runtime system if the value is not a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Enter runtime system if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, &undo, not_taken);
+
+ // If result is a smi we are done.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done, taken);
+
+ // Undo optimistic sub and enter runtime system.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+
+ // Enter runtime system.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address
+ __ push(eax);
+ __ push(ecx); // push return address
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+
+ __ bind(&done);
+
+ masm->StubReturn(1);
+}
+
+
+// TODO(1217800): Implement MOD like ADD/SUB/MUL/DIV
+// and get rid of GenericOpStub.
+void GenericOpStub::Generate(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::MOD: {
+ Label fast, slow;
+ __ mov(ebx, Operand(eax)); // get y
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // get x
+ __ cdq(); // sign extend eax into edx:eax
+ // tag check
+ __ mov(ecx, Operand(ebx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ __ test(ebx, Operand(ebx)); // test for y == 0
+ __ j(not_zero, &fast, taken);
+
+ // Slow case: Call native operator implementation.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address
+ __ push(ebx);
+ __ push(ecx); // push return address
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+
+ // Fast case: Do integer division and use remainder.
+ __ bind(&fast);
+ __ idiv(ebx);
+ __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
+ __ mov(eax, Operand(edx));
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+ masm->StubReturn(2);
+}
+
+
+class ArgumentsAccessStub: public CodeStub {
+ public:
+ explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { }
+
+ private:
+ bool is_length_;
+
+ Major MajorKey() { return ArgumentsAccess; }
+ int MinorKey() { return is_length_ ? 1 : 0; }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ArgumentsAccessStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("ArgumentsAccessStub (is_length %s)\n",
+ is_length_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+ // Check that the key is a smi for non-length access.
+ Label slow;
+ if (!is_length_) {
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ }
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ j(equal, &adaptor);
+
+ // The displacement is used for skipping the return address on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+
+ if (is_length_) {
+ // Do nothing. The length is already in register eax.
+ } else {
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(ebx, Operand(eax));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack.
+ __ lea(edx, Operand(ebp, eax, times_2, 0));
+ __ neg(ebx);
+ __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
+ }
+
+ // Return the length or the argument.
+ __ ret(0);
+
+ // Arguments adaptor case: Find the length or the actual argument in
+ // the calling frame.
+ __ bind(&adaptor);
+ if (is_length_) {
+ // Read the arguments length from the adaptor frame.
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ } else {
+ // Check index against actual arguments limit found in the
+ // arguments adaptor frame. Use unsigned comparison to get
+ // negative check for free.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(ebx, Operand(ecx));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack.
+ __ lea(edx, Operand(edx, ecx, times_2, 0));
+ __ neg(ebx);
+ __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
+ }
+
+ // Return the length or the argument.
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ if (!is_length_) {
+ __ bind(&slow);
+ __ Set(eax, Immediate(0)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(Runtime::kGetArgumentsProperty));
+ }
+}
+
+
+#undef __
+#define __ masm_->
+
+
+// Return true if code was generated for operation 'type'.
+// NOTE: The code below assumes that the slow cases (calls to runtime)
+// never return a constant/immutable object.
+// TODO(1217800): MOD is not yet implemented.
+bool Ia32CodeGenerator::InlinedGenericOperation(
+ Token::Value op,
+ const OverwriteMode overwrite_mode,
+ bool negate_result) {
+ const char* comment = NULL;
+ if (negate_result) {
+ switch (op) {
+ case Token::ADD: comment = "[ GenericOpCode Token::ADDNEG"; break;
+ case Token::SUB: comment = "[ GenericOpCode Token::SUBNEG"; break;
+ case Token::MUL: comment = "[ GenericOpCode Token::MULNEG"; break;
+ case Token::DIV: comment = "[ GenericOpCode Token::DIVNEG"; break;
+ default: return false;
+ }
+ } else {
+ switch (op) {
+ case Token::ADD: comment = "[ GenericOpCode Token::ADD"; break;
+ case Token::SUB: comment = "[ GenericOpCode Token::SUB"; break;
+ case Token::MUL: comment = "[ GenericOpCode Token::MUL"; break;
+ case Token::DIV: comment = "[ GenericOpCode Token::DIV"; break;
+ default: return false;
+ }
+ }
+ Comment cmnt(masm_, comment);
+ InlinedGenericOpStub stub(op, overwrite_mode, negate_result);
+ __ CallStub(&stub);
+ __ push(eax);
+ return true;
+}
+
+
+void Ia32CodeGenerator::GenericOperation(Token::Value op,
+ OverwriteMode overwrite_mode) {
+ // Stub is entered with a call: 'return address' is on stack.
+ switch (op) {
+ case Token::MOD: {
+ GenericOpStub stub(op);
+ __ pop(eax);
+ __ CallStub(&stub);
+ __ push(eax);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR: {
+ Label slow, exit;
+ __ pop(eax); // get y
+ __ pop(edx); // get x
+ __ mov(ecx, Operand(edx)); // prepare smi check
+ // tag check
+ __ or_(ecx, Operand(eax)); // ecx = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, taken);
+ switch (op) {
+ case Token::BIT_OR: __ or_(eax, Operand(edx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(edx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(edx)); break;
+ default: UNREACHABLE();
+ }
+ __ jmp(&exit);
+ __ bind(&slow);
+ __ push(edx); // restore stack slots
+ __ push(eax);
+ InlinedGenericOpStub stub(op, overwrite_mode, false);
+ __ CallStub(&stub);
+ __ bind(&exit);
+ __ push(eax); // push the result to the stack
+ break;
+ }
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ Label slow, exit;
+ __ pop(edx); // get y
+ __ pop(eax); // get x
+ // tag check
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ // get copies of operands
+ __ mov(ebx, Operand(eax));
+ __ mov(ecx, Operand(edx));
+ // remove tags from operands (but keep sign)
+ __ sar(ebx, kSmiTagSize);
+ __ sar(ecx, kSmiTagSize);
+ // perform operation
+ switch (op) {
+ case Token::SAR:
+ __ sar(ebx);
+ // no checks of result necessary
+ break;
+
+ case Token::SHR:
+ __ shr(ebx);
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ test(ebx, Immediate(0xc0000000));
+ __ j(not_zero, &slow, not_taken);
+ break;
+
+ case Token::SHL:
+ __ shl(ebx);
+ // check that the *signed* result fits in a smi
+ __ lea(ecx, Operand(ebx, 0x40000000));
+ __ test(ecx, Immediate(0x80000000));
+ __ j(not_zero, &slow, not_taken);
+ break;
+
+ default: UNREACHABLE();
+ }
+ // tag result and store it in TOS (eax)
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(eax, Operand(ebx, times_2, kSmiTag));
+ __ jmp(&exit);
+ // slow case
+ __ bind(&slow);
+ __ push(eax); // restore stack
+ __ push(edx);
+ InlinedGenericOpStub stub(op, overwrite_mode, false);
+ __ CallStub(&stub);
+ __ bind(&exit);
+ __ push(eax);
+ break;
+ }
+
+ case Token::COMMA: {
+ // simply discard left value
+ __ pop(eax);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ push(eax);
+ break;
+ }
+
+ default:
+ // Other cases should have been handled before this point.
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+class DeferredInlinedSmiOperation: public DeferredCode {
+ public:
+ DeferredInlinedSmiOperation(CodeGenerator* generator,
+ Token::Value op, int value,
+ OverwriteMode overwrite_mode) :
+ DeferredCode(generator), op_(op), value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiOperation");
+ }
+ virtual void Generate() {
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(value_)));
+ InlinedGenericOpStub igostub(op_, overwrite_mode_, false);
+ __ CallStub(&igostub);
+ }
+
+ private:
+ Token::Value op_;
+ int value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlinedSmiOperationReversed: public DeferredCode {
+ public:
+ DeferredInlinedSmiOperationReversed(CodeGenerator* generator,
+ Token::Value op, int value,
+ OverwriteMode overwrite_mode) :
+ DeferredCode(generator), op_(op), value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiOperationReversed");
+ }
+ virtual void Generate() {
+ __ push(Immediate(Smi::FromInt(value_)));
+ __ push(eax);
+ InlinedGenericOpStub igostub(op_, overwrite_mode_, false);
+ __ CallStub(&igostub);
+ }
+
+ private:
+ Token::Value op_;
+ int value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlinedSmiAdd: public DeferredCode {
+ public:
+ DeferredInlinedSmiAdd(CodeGenerator* generator, int value,
+ OverwriteMode overwrite_mode) :
+ DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiAdd");
+ }
+
+ virtual void Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ Immediate immediate(Smi::FromInt(value_));
+ __ sub(Operand(eax), immediate);
+ __ push(eax);
+ __ push(immediate);
+ InlinedGenericOpStub igostub(Token::ADD, overwrite_mode_, false);
+ __ CallStub(&igostub);
+ }
+
+ private:
+ int value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlinedSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlinedSmiAddReversed(CodeGenerator* generator, int value,
+ OverwriteMode overwrite_mode) :
+ DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiAddReversed");
+ }
+
+ virtual void Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ Immediate immediate(Smi::FromInt(value_));
+ __ sub(Operand(eax), immediate);
+ __ push(immediate);
+ __ push(eax);
+ InlinedGenericOpStub igostub(Token::ADD, overwrite_mode_, false);
+ __ CallStub(&igostub);
+ }
+
+ private:
+ int value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlinedSmiSub: public DeferredCode {
+ public:
+ DeferredInlinedSmiSub(CodeGenerator* generator, int value,
+ OverwriteMode overwrite_mode) :
+ DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiSub");
+ }
+
+ virtual void Generate() {
+ // Undo the optimistic sub operation and call the shared stub.
+ Immediate immediate(Smi::FromInt(value_));
+ __ add(Operand(eax), immediate);
+ __ push(eax);
+ __ push(immediate);
+ InlinedGenericOpStub igostub(Token::SUB, overwrite_mode_, false);
+ __ CallStub(&igostub);
+ }
+
+ private:
+ int value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlinedSmiSubReversed: public DeferredCode {
+ public:
+ // tos_reg is used to save the TOS value before reversing the operands
+ // eax will contain the immediate value after undoing the optimistic sub.
+ DeferredInlinedSmiSubReversed(CodeGenerator* generator, Register tos_reg,
+ OverwriteMode overwrite_mode) :
+ DeferredCode(generator), tos_reg_(tos_reg),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiSubReversed");
+ }
+
+ virtual void Generate() {
+ // Undo the optimistic sub operation and call the shared stub.
+ __ add(eax, Operand(tos_reg_));
+ __ push(eax);
+ __ push(Operand(tos_reg_));
+ InlinedGenericOpStub igostub(Token::SUB, overwrite_mode_, false);
+ __ CallStub(&igostub);
+ }
+
+ private:
+ Register tos_reg_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void Ia32CodeGenerator::SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
+ // NOTE: This is an attempt to inline (a bit) more of the code for
+ // some possible smi operations (like + and -) when (at least) one
+ // of the operands is a literal smi. With this optimization, the
+ // performance of the system is increased by ~15%, and the generated
+ // code size is increased by ~1% (measured on a combination of
+ // different benchmarks).
+
+ // TODO(1217802): Optimize some special cases of operations
+ // involving a smi literal (multiply by 2, shift by 0, etc.).
+
+ // Get the literal value.
+ int int_value = Smi::cast(*value)->value();
+
+ switch (op) {
+ case Token::ADD: {
+ DeferredCode* deferred = NULL;
+ if (!reversed) {
+ deferred = new DeferredInlinedSmiAdd(this, int_value, overwrite_mode);
+ } else {
+ deferred = new DeferredInlinedSmiAddReversed(this, int_value,
+ overwrite_mode);
+ }
+ __ pop(eax);
+ __ add(Operand(eax), Immediate(value));
+ __ j(overflow, deferred->enter(), not_taken);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+ __ bind(deferred->exit());
+ __ push(eax);
+ break;
+ }
+
+ case Token::SUB: {
+ DeferredCode* deferred = NULL;
+ __ pop(eax);
+ if (!reversed) {
+ deferred = new DeferredInlinedSmiSub(this, int_value, overwrite_mode);
+ __ sub(Operand(eax), Immediate(value));
+ } else {
+ deferred = new DeferredInlinedSmiSubReversed(this, edx, overwrite_mode);
+ __ mov(edx, Operand(eax));
+ __ mov(Operand(eax), Immediate(value));
+ __ sub(eax, Operand(edx));
+ }
+ __ j(overflow, deferred->enter(), not_taken);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+ __ bind(deferred->exit());
+ __ push(eax);
+ break;
+ }
+
+ case Token::SAR: {
+ if (reversed) {
+ __ pop(eax);
+ __ push(Immediate(value));
+ __ push(eax);
+ GenericOperation(op);
+ } else {
+ int shift_value = int_value & 0x1f; // only least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlinedSmiOperation(this, Token::SAR, shift_value,
+ overwrite_mode);
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+ __ sar(eax, shift_value);
+ __ and_(eax, ~kSmiTagMask);
+ __ bind(deferred->exit());
+ __ push(eax);
+ }
+ break;
+ }
+
+ case Token::SHR: {
+ if (reversed) {
+ __ pop(eax);
+ __ push(Immediate(value));
+ __ push(eax);
+ GenericOperation(op);
+ } else {
+ int shift_value = int_value & 0x1f; // only least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlinedSmiOperation(this, Token::SHR, shift_value,
+ overwrite_mode);
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ mov(ebx, Operand(eax));
+ __ j(not_zero, deferred->enter(), not_taken);
+ __ sar(ebx, kSmiTagSize);
+ __ shr(ebx, shift_value);
+ __ test(ebx, Immediate(0xc0000000));
+ __ j(not_zero, deferred->enter(), not_taken);
+ // tag result and store it in TOS (eax)
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(eax, Operand(ebx, times_2, kSmiTag));
+ __ bind(deferred->exit());
+ __ push(eax);
+ }
+ break;
+ }
+
+ case Token::SHL: {
+ if (reversed) {
+ __ pop(eax);
+ __ push(Immediate(value));
+ __ push(eax);
+ GenericOperation(op);
+ } else {
+ int shift_value = int_value & 0x1f; // only least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlinedSmiOperation(this, Token::SHL, shift_value,
+ overwrite_mode);
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ mov(ebx, Operand(eax));
+ __ j(not_zero, deferred->enter(), not_taken);
+ __ sar(ebx, kSmiTagSize);
+ __ shl(ebx, shift_value);
+ __ lea(ecx, Operand(ebx, 0x40000000));
+ __ test(ecx, Immediate(0x80000000));
+ __ j(not_zero, deferred->enter(), not_taken);
+ // tag result and store it in TOS (eax)
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(eax, Operand(ebx, times_2, kSmiTag));
+ __ bind(deferred->exit());
+ __ push(eax);
+ }
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ DeferredCode* deferred = NULL;
+ if (!reversed) {
+ deferred = new DeferredInlinedSmiOperation(this, op, int_value,
+ overwrite_mode);
+ } else {
+ deferred = new DeferredInlinedSmiOperationReversed(this, op, int_value,
+ overwrite_mode);
+ }
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+ if (op == Token::BIT_AND) {
+ __ and_(Operand(eax), Immediate(value));
+ } else if (op == Token::BIT_XOR) {
+ __ xor_(Operand(eax), Immediate(value));
+ } else {
+ ASSERT(op == Token::BIT_OR);
+ __ or_(Operand(eax), Immediate(value));
+ }
+ __ bind(deferred->exit());
+ __ push(eax);
+ break;
+ }
+
+ default: {
+ if (!reversed) {
+ __ push(Immediate(value));
+ } else {
+ __ pop(eax);
+ __ push(Immediate(value));
+ __ push(eax);
+ }
+ bool done = InlinedGenericOperation(op, overwrite_mode,
+ false /*negate_result*/);
+ if (!done) GenericOperation(op);
+ break;
+ }
+ }
+}
+
+
+#undef __
+#define __ masm->
+
+class CompareStub: public CodeStub {
+ public:
+ CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Condition cc_;
+ bool strict_;
+
+ Major MajorKey() { return Compare; }
+
+ int MinorKey() {
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT(static_cast<int>(cc_) < (1 << 15));
+ return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
+ }
+
+ const char* GetName() { return "CompareStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CompareStub (cc %d), (strict %s)\n",
+ static_cast<int>(cc_),
+ strict_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label call_builtin, done;
+ // Save the return address (and get it off the stack).
+ __ pop(ecx);
+
+ // Push arguments.
+ __ push(eax);
+ __ push(edx);
+ __ push(ecx);
+
+ // Inlined floating point compare.
+ // Call builtin if operands are not floating point or SMI.
+ FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+ __ FCmp();
+
+ // Jump to builtin for NaN.
+ __ j(parity_even, &call_builtin, not_taken);
+
+ // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
+ Label below_lbl, above_lbl;
+ // use edx, eax to convert unsigned to signed comparision
+ __ j(below, &below_lbl, not_taken);
+ __ j(above, &above_lbl, not_taken);
+
+ __ xor_(eax, Operand(eax)); // equal
+ __ ret(2 * kPointerSize);
+
+ __ bind(&below_lbl);
+ __ mov(eax, -1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&above_lbl);
+ __ mov(eax, 1);
+ __ ret(2 * kPointerSize); // eax, edx were pushed
+
+ __ bind(&call_builtin);
+ // must swap argument order
+ __ pop(ecx);
+ __ pop(edx);
+ __ pop(eax);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == less || cc_ == less_equal) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
+ ncr = LESS;
+ }
+ __ push(Immediate(Smi::FromInt(ncr)));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(eax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(eax);
+
+ // Do tail-call to runtime routine.
+ __ Set(eax, Immediate(0)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(Runtime::kStackGuard));
+}
+
+
+#undef __
+#define __ masm_->
+
+
+class ComparisonDeferred: public DeferredCode {
+ public:
+ ComparisonDeferred(CodeGenerator* generator, Condition cc, bool strict) :
+ DeferredCode(generator), cc_(cc), strict_(strict) {
+ set_comment("[ ComparisonDeferred");
+ }
+ virtual void Generate();
+
+ private:
+ Condition cc_;
+ bool strict_;
+};
+
+
+void ComparisonDeferred::Generate() {
+ CompareStub stub(cc_, strict_);
+ // "parameters" setup by calling code in edx and eax
+ __ CallStub(&stub);
+ __ cmp(eax, 0);
+ // "result" is returned in the flags
+}
+
+
+void Ia32CodeGenerator::Comparison(Condition cc, bool strict) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
+
+ ComparisonDeferred* deferred = new ComparisonDeferred(this, cc, strict);
+ __ pop(eax);
+ __ pop(edx);
+ __ mov(ecx, Operand(eax));
+ __ or_(ecx, Operand(edx));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+ // Test smi equality by pointer comparison.
+ __ cmp(edx, Operand(eax));
+ __ bind(deferred->exit());
+ cc_reg_ = cc;
+}
+
+
+class SmiComparisonDeferred: public DeferredCode {
+ public:
+ SmiComparisonDeferred(CodeGenerator* generator,
+ Condition cc,
+ bool strict,
+ int value)
+ : DeferredCode(generator), cc_(cc), strict_(strict), value_(value) {
+ set_comment("[ ComparisonDeferred");
+ }
+ virtual void Generate();
+
+ private:
+ Condition cc_;
+ bool strict_;
+ int value_;
+};
+
+
+void SmiComparisonDeferred::Generate() {
+ CompareStub stub(cc_, strict_);
+ // Setup parameters and call stub.
+ __ mov(edx, Operand(eax));
+ __ mov(Operand(eax), Immediate(Smi::FromInt(value_)));
+ __ CallStub(&stub);
+ __ cmp(eax, 0);
+ // "result" is returned in the flags
+}
+
+
+void Ia32CodeGenerator::SmiComparison(Condition cc,
+ Handle<Object> value,
+ bool strict) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
+
+ SmiComparisonDeferred* deferred =
+ new SmiComparisonDeferred(this, cc, strict, Smi::cast(*value)->value());
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+ // Test smi equality by pointer comparison.
+ __ cmp(Operand(eax), Immediate(value));
+ __ bind(deferred->exit());
+ cc_reg_ = cc;
+}
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+ explicit CallFunctionStub(int argc) : argc_(argc) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+
+ const char* GetName() { return "CallFunctionStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+};
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow, fast;
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ masm->mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ masm->test(edi, Immediate(kSmiTagMask));
+ masm->j(zero, &slow, not_taken);
+ // Get the map.
+ masm->mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ masm->movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ masm->cmp(ecx, JS_FUNCTION_TYPE);
+ masm->j(not_equal, &slow, not_taken);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ masm->InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ masm->bind(&slow);
+ masm->Set(eax, Immediate(argc_));
+ masm->Set(ebx, Immediate(0));
+ masm->GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ masm->jmp(adaptor, code_target);
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void Ia32CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+
+ // Record the position for debugging purposes.
+ __ RecordPosition(position);
+
+ // Use the shared code stub to call the function.
+ CallFunctionStub call_function(args->length());
+ __ CallStub(&call_function);
+
+ // Restore context and pop function from the stack.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(TOS, eax);
+}
+
+
+void Ia32CodeGenerator::Branch(bool if_true, Label* L) {
+ ASSERT(has_cc());
+ Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+ __ j(cc, L);
+ cc_reg_ = no_condition;
+}
+
+
+class StackCheckDeferred: public DeferredCode {
+ public:
+ explicit StackCheckDeferred(CodeGenerator* generator)
+ : DeferredCode(generator) {
+ set_comment("[ StackCheckDeferred");
+ }
+ virtual void Generate();
+};
+
+
+void StackCheckDeferred::Generate() {
+ StackCheckStub stub;
+ __ CallStub(&stub);
+}
+
+
+void Ia32CodeGenerator::CheckStack() {
+ if (FLAG_check_stack) {
+ StackCheckDeferred* deferred = new StackCheckDeferred(this);
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ __ j(below, deferred->enter(), not_taken);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void Ia32CodeGenerator::VisitBlock(Block* node) {
+ Comment cmnt(masm_, "[ Block");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ node->set_break_stack_height(break_stack_height_);
+ VisitStatements(node->statements());
+ __ bind(node->break_target());
+}
+
+
+void Ia32CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ __ push(Immediate(pairs));
+ __ push(Operand(esi));
+ __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void Ia32CodeGenerator::VisitDeclaration(Declaration* node) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = node->proxy()->var();
+ ASSERT(var != NULL); // must have been resolved
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile time,
+ // we need to "declare" it at runtime to make sure it actually
+ // exists in the local context.
+ if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Variables with a "LOOKUP" slot were introduced as non-locals
+ // during variable resolution and must have mode DYNAMIC.
+ ASSERT(var->mode() == Variable::DYNAMIC);
+ // For now, just do a runtime call.
+ __ push(Operand(esi));
+ __ push(Immediate(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+ PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (node->mode() == Variable::CONST) {
+ __ push(Immediate(Factory::the_hole_value()));
+ } else if (node->fun() != NULL) {
+ Load(node->fun());
+ } else {
+ __ push(Immediate(0)); // no initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 5);
+ // DeclareContextSlot pops the assigned value by accepting an
+ // extra argument and returning the TOS; no need to explicitly
+ // pop here.
+ __ push(eax);
+ return;
+ }
+
+ ASSERT(!var->is_global());
+
+ // If we have a function or a constant, we need to initialize the variable.
+ Expression* val = NULL;
+ if (node->mode() == Variable::CONST) {
+ val = new Literal(Factory::the_hole_value());
+ } else {
+ val = node->fun(); // NULL if we don't have a function
+ }
+
+ if (val != NULL) {
+ // Set initial value.
+ Reference target(this, node->proxy());
+ Load(val);
+ SetValue(&target);
+ // Get rid of the assigned value (declarations are statements).
+ __ pop(eax); // Pop(no_reg);
+ }
+}
+
+
+void Ia32CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ Load(expression);
+ __ pop(eax); // remove the lingering expression result from the top of stack
+}
+
+
+void Ia32CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ Comment cmnt(masm_, "// EmptyStatement");
+ // nothing to do
+}
+
+
+void Ia32CodeGenerator::VisitIfStatement(IfStatement* node) {
+ Comment cmnt(masm_, "[ IfStatement");
+ // Generate different code depending on which
+ // parts of the if statement are present or not.
+ bool has_then_stm = node->HasThenStatement();
+ bool has_else_stm = node->HasElseStatement();
+
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Label exit;
+ if (has_then_stm && has_else_stm) {
+ Label then;
+ Label else_;
+ // if (cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ Branch(false, &else_);
+ // then
+ __ bind(&then);
+ Visit(node->then_statement());
+ __ jmp(&exit);
+ // else
+ __ bind(&else_);
+ Visit(node->else_statement());
+
+ } else if (has_then_stm) {
+ ASSERT(!has_else_stm);
+ Label then;
+ // if (cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &then, &exit, true);
+ Branch(false, &exit);
+ // then
+ __ bind(&then);
+ Visit(node->then_statement());
+
+ } else if (has_else_stm) {
+ ASSERT(!has_then_stm);
+ Label else_;
+ // if (!cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &else_, true);
+ Branch(true, &exit);
+ // else
+ __ bind(&else_);
+ Visit(node->else_statement());
+
+ } else {
+ ASSERT(!has_then_stm && !has_else_stm);
+ // if (cond)
+ LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &exit, false);
+ if (has_cc()) {
+ cc_reg_ = no_condition;
+ } else {
+ // No cc value set up, that means the boolean was pushed.
+ // Pop it again, since it is not going to be used.
+ __ pop(eax);
+ }
+ }
+
+ // end
+ __ bind(&exit);
+}
+
+
+void Ia32CodeGenerator::CleanStack(int num_bytes) {
+ ASSERT(num_bytes >= 0);
+ if (num_bytes > 0) {
+ __ add(Operand(esp), Immediate(num_bytes));
+ }
+}
+
+
+void Ia32CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ Comment cmnt(masm_, "[ ContinueStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ CleanStack(break_stack_height_ - node->target()->break_stack_height());
+ __ jmp(node->target()->continue_target());
+}
+
+
+void Ia32CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ Comment cmnt(masm_, "[ BreakStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ CleanStack(break_stack_height_ - node->target()->break_stack_height());
+ __ jmp(node->target()->break_target());
+}
+
+
+void Ia32CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Load(node->expression());
+
+ // Move the function result into eax
+ __ pop(eax);
+
+ // If we're inside a try statement or the return instruction
+ // sequence has been generated, we just jump to that
+ // point. Otherwise, we generate the return instruction sequence and
+ // bind the function return label.
+ if (is_inside_try_ || function_return_.is_bound()) {
+ __ jmp(&function_return_);
+ } else {
+ __ bind(&function_return_);
+ if (FLAG_trace) {
+ __ push(eax); // undo the pop(eax) from above
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ __ bind(&check_exit_codesize);
+
+ // Leave the frame and return popping the arguments and the
+ // receiver.
+ ExitJSFrame();
+ __ ret((scope_->num_parameters() + 1) * kPointerSize);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+ __ SizeOfCodeGeneratedSince(&check_exit_codesize));
+ }
+}
+
+
+void Ia32CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Load(node->expression());
+ __ CallRuntime(Runtime::kPushContext, 2);
+ __ push(eax);
+ // Update context local.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+}
+
+
+void Ia32CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ Comment cmnt(masm_, "[ WithExitStatement");
+ // Pop context.
+ __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
+ // Update context local.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+}
+
+
+void Ia32CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ node->set_break_stack_height(break_stack_height_);
+
+ Load(node->tag());
+
+ Label next, fall_through, default_case;
+ ZoneList<CaseClause*>* cases = node->cases();
+ int length = cases->length();
+
+ for (int i = 0; i < length; i++) {
+ CaseClause* clause = cases->at(i);
+
+ Comment cmnt(masm_, "[ case clause");
+
+ if (clause->is_default()) {
+ // Bind the default case label, so we can branch to it when we
+ // have compared against all other cases.
+ ASSERT(default_case.is_unused()); // at most one default clause
+
+ // If the default case is the first (but not only) case, we have
+ // to jump past it for now. Once we're done with the remaining
+ // clauses, we'll branch back here. If it isn't the first case,
+ // we jump past it by avoiding to chain it into the next chain.
+ if (length > 1) {
+ if (i == 0) __ jmp(&next);
+ __ bind(&default_case);
+ }
+
+ } else {
+ __ bind(&next);
+ next.Unuse();
+ __ mov(eax, TOS);
+ __ push(eax); // duplicate TOS
+ Load(clause->label());
+ Comparison(equal, true);
+ Branch(false, &next);
+ // Entering the case statement -> remove the switch value from the stack
+ __ pop(eax);
+ }
+
+ // Generate code for the body.
+ __ bind(&fall_through);
+ fall_through.Unuse();
+ VisitStatements(clause->statements());
+ __ jmp(&fall_through);
+ }
+
+ __ bind(&next);
+ // Reached the end of the case statements -> remove the switch value
+ // from the stack
+ __ pop(eax); // Pop(no_reg)
+ if (default_case.is_bound()) __ jmp(&default_case);
+
+ __ bind(&fall_through);
+ __ bind(node->break_target());
+}
+
+
+void Ia32CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+ Comment cmnt(masm_, "[ LoopStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ node->set_break_stack_height(break_stack_height_);
+
+ // simple condition analysis
+ enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+ if (node->cond() == NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ info = ALWAYS_TRUE;
+ } else {
+ Literal* lit = node->cond()->AsLiteral();
+ if (lit != NULL) {
+ if (lit->IsTrue()) {
+ info = ALWAYS_TRUE;
+ } else if (lit->IsFalse()) {
+ info = ALWAYS_FALSE;
+ }
+ }
+ }
+
+ Label loop, entry;
+
+ // init
+ if (node->init() != NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ Visit(node->init());
+ }
+ if (node->type() != LoopStatement::DO_LOOP && info != ALWAYS_TRUE) {
+ __ jmp(&entry);
+ }
+
+ // body
+ __ bind(&loop);
+ Visit(node->body());
+
+ // next
+ __ bind(node->continue_target());
+ if (node->next() != NULL) {
+ // Record source position of the statement as this code which is after the
+ // code for the body actually belongs to the loop statement and not the
+ // body.
+ if (FLAG_debug_info) __ RecordPosition(node->statement_pos());
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ Visit(node->next());
+ }
+
+ // cond
+ __ bind(&entry);
+ switch (info) {
+ case ALWAYS_TRUE:
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ __ jmp(&loop);
+ break;
+ case ALWAYS_FALSE:
+ break;
+ case DONT_KNOW:
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ LoadCondition(node->cond(), CodeGenState::LOAD, &loop,
+ node->break_target(), true);
+ Branch(true, &loop);
+ break;
+ }
+
+ // exit
+ __ bind(node->break_target());
+}
+
+
+void Ia32CodeGenerator::VisitForInStatement(ForInStatement* node) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+
+ // We keep stuff on the stack while the body is executing.
+ // Record it, so that a break/continue crossing this statement
+ // can restore the stack.
+ const int kForInStackSize = 5 * kPointerSize;
+ break_stack_height_ += kForInStackSize;
+ node->set_break_stack_height(break_stack_height_);
+
+ Label loop, next, entry, cleanup, exit, primitive, jsobject;
+ Label end_del_check, fixed_array;
+
+ // Get the object to enumerate over (converted to JSObject).
+ Load(node->enumerable());
+
+ // Both SpiderMonkey and kjs ignore null and undefined in contrast
+ // to the specification. 12.6.4 mandates a call to ToObject.
+ __ pop(eax);
+
+ // eax: value to be iterated over
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, &exit);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &exit);
+
+ // Stack layout in body:
+ // [iteration counter (Smi)] <- slot 0
+ // [length of array] <- slot 1
+ // [FixedArray] <- slot 2
+ // [Map or 0] <- slot 3
+ // [Object] <- slot 4
+
+ // Check if enumerable is already a JSObject
+ // eax: value to be iterated over
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &primitive);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, JS_OBJECT_TYPE);
+ __ j(above_equal, &jsobject);
+
+ __ bind(&primitive);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ // function call returns the value in eax, which is where we want it below
+
+
+ __ bind(&jsobject);
+
+ // Get the set of properties (as a FixedArray or Map).
+ // eax: value to be iterated over
+ __ push(eax); // push the object being iterated over (slot 4)
+
+ __ push(eax); // push the Object (slot 4) for the runtime call
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a Map, we can do a fast modification check.
+ // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // eax: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
+ __ mov(edx, Operand(eax));
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ecx, Factory::meta_map());
+ __ j(not_equal, &fixed_array);
+
+ // Get enum cache
+ // eax: map (result from call to Runtime::kGetPropertyNamesFast)
+ __ mov(ecx, Operand(eax));
+ __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
+ // Get the bridge array held in the enumeration index field.
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
+ // Get the cache from the bridge array.
+ __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ push(eax); // <- slot 3
+ __ push(Operand(edx)); // <- slot 2
+ __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
+ __ shl(eax, kSmiTagSize);
+ __ push(eax); // <- slot 1
+ __ push(Immediate(Smi::FromInt(0))); // <- slot 0
+ __ jmp(&entry);
+
+
+ __ bind(&fixed_array);
+
+ // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+ __ push(Immediate(Smi::FromInt(0))); // <- slot 3
+ __ push(eax); // <- slot 2
+
+ // Push the length of the array and the initial index onto the stack.
+ __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ shl(eax, kSmiTagSize);
+ __ push(eax); // <- slot 1
+ __ push(Immediate(Smi::FromInt(0))); // <- slot 0
+ __ jmp(&entry);
+
+ // Body.
+ __ bind(&loop);
+ Visit(node->body());
+
+ // Next.
+ __ bind(node->continue_target());
+ __ bind(&next);
+ __ pop(eax);
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ push(eax);
+
+ // Condition.
+ __ bind(&entry);
+
+ __ mov(eax, Operand(esp, 0 * kPointerSize)); // load the current count
+ __ cmp(eax, Operand(esp, kPointerSize)); // compare to the array length
+ __ j(above_equal, &cleanup);
+ // TODO(1222589): remove redundant load here, which is only needed in
+ // PUSH_TOS/POP_TOS mode
+ __ mov(eax, Operand(esp, 0 * kPointerSize)); // load the current count
+
+ // Get the i'th entry of the array.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, Operand(edx, eax, times_2,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case eax: current iteration count ebx: i'th entry
+ // of the enum cache
+ __ mov(edx, Operand(esp, 3 * kPointerSize));
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ // eax: current iteration count
+ // ebx: i'th entry of the enum cache
+ // edx: expected map value
+ __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ cmp(ecx, Operand(edx));
+ __ j(equal, &end_del_check);
+
+ // Convert the entry to a string (or null if it isn't a property anymore).
+ __ push(Operand(esp, 4 * kPointerSize)); // push enumerable
+ __ push(Operand(ebx)); // push entry
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ mov(ebx, Operand(eax));
+
+ // If the property has been removed while iterating, we just skip it.
+ __ cmp(ebx, Factory::null_value());
+ __ j(equal, &next);
+
+
+ __ bind(&end_del_check);
+
+ // Store the entry in the 'each' expression and take another spin in the loop.
+ // edx: i'th entry of the enum cache (or string there of)
+ __ push(Operand(ebx));
+ { Reference each(this, node->each());
+ if (!each.is_illegal()) {
+ if (each.size() > 0) {
+ __ push(Operand(esp, kPointerSize * each.size()));
+ }
+ SetValue(&each);
+ if (each.size() > 0) {
+ __ pop(eax);
+ }
+ }
+ }
+ __ pop(eax); // pop the i'th entry pushed above
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ __ jmp(&loop);
+
+ // Cleanup.
+ __ bind(&cleanup);
+ __ bind(node->break_target());
+ __ add(Operand(esp), Immediate(5 * kPointerSize));
+
+ // Exit.
+ __ bind(&exit);
+
+ break_stack_height_ -= kForInStackSize;
+}
+
+
+void Ia32CodeGenerator::VisitTryCatch(TryCatch* node) {
+ Comment cmnt(masm_, "[ TryCatch");
+
+ Label try_block, exit;
+
+ __ call(&try_block);
+ // --- Catch block ---
+ __ push(eax);
+
+ // Store the caught exception in the catch variable.
+ { Reference ref(this, node->catch_var());
+ // Load the exception to the top of the stack.
+ __ push(Operand(esp, ref.size() * kPointerSize));
+ SetValue(&ref);
+ __ pop(eax); // pop the pushed exception
+ }
+
+ // Remove the exception from the stack.
+ __ pop(edx);
+
+ VisitStatements(node->catch_block()->statements());
+ __ jmp(&exit);
+
+
+ // --- Try block ---
+ __ bind(&try_block);
+
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
+ __ push(eax); //
+
+ // Introduce shadow labels for all escapes from the try block,
+ // including returns. We should probably try to unify the escaping
+ // labels and the return label.
+ int nof_escapes = node->escaping_labels()->length();
+ List<LabelShadow*> shadows(1 + nof_escapes);
+ shadows.Add(new LabelShadow(&function_return_));
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ bool was_inside_try = is_inside_try_;
+ is_inside_try_ = true;
+ VisitStatements(node->try_block()->statements());
+ is_inside_try_ = was_inside_try;
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ int nof_unlinks = 0;
+ for (int i = 0; i <= nof_escapes; i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+
+ // Unlink from try chain.
+ __ pop(eax);
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(Operand::StaticVariable(handler_address), eax); // TOS == next_sp
+ __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+ // next_sp popped.
+ if (nof_unlinks > 0) __ jmp(&exit);
+
+ // Generate unlink code for all used shadow labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_linked()) {
+ // Unlink from try chain; be careful not to destroy the TOS.
+ __ bind(shadows[i]);
+
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ mov(edx, Operand::StaticVariable(handler_address));
+ const int kNextOffset = StackHandlerConstants::kNextOffset +
+ StackHandlerConstants::kAddressDisplacement;
+ __ lea(esp, Operand(edx, kNextOffset));
+
+ __ pop(Operand::StaticVariable(handler_address));
+ __ add(Operand(esp),
+ Immediate(StackHandlerConstants::kSize - kPointerSize));
+ // next_sp popped.
+ __ jmp(shadows[i]->shadowed());
+ }
+ }
+
+ __ bind(&exit);
+}
+
+
+void Ia32CodeGenerator::VisitTryFinally(TryFinally* node) {
+ Comment cmnt(masm_, "[ TryFinally");
+
+ // State: Used to keep track of reason for entering the finally
+ // block. Should probably be extended to hold information for
+ // break/continue from within the try block.
+ enum { FALLING, THROWING, JUMPING };
+
+ Label exit, unlink, try_block, finally_block;
+
+ __ call(&try_block);
+
+ __ push(eax);
+ // In case of thrown exceptions, this is where we continue.
+ __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
+ __ jmp(&finally_block);
+
+
+ // --- Try block ---
+ __ bind(&try_block);
+
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
+ __ push(eax); //
+
+ // Introduce shadow labels for all escapes from the try block,
+ // including returns. We should probably try to unify the escaping
+ // labels and the return label.
+ int nof_escapes = node->escaping_labels()->length();
+ List<LabelShadow*> shadows(1 + nof_escapes);
+ shadows.Add(new LabelShadow(&function_return_));
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ bool was_inside_try = is_inside_try_;
+ is_inside_try_ = true;
+ VisitStatements(node->try_block()->statements());
+ is_inside_try_ = was_inside_try;
+
+ // Stop the introduced shadowing and count the number of required
+ // unlinks.
+ int nof_unlinks = 0;
+ for (int i = 0; i <= nof_escapes; i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+
+ // Set the state on the stack to FALLING.
+ __ push(Immediate(Factory::undefined_value())); // fake TOS
+ __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) __ jmp(&unlink);
+
+ // Generate code that sets the state for all used shadow labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_linked()) {
+ __ bind(shadows[i]);
+ if (shadows[i]->shadowed() == &function_return_) {
+ // Materialize the return value on the stack.
+ __ push(eax);
+ } else {
+ // Fake TOS for break and continue.
+ __ push(Immediate(Factory::undefined_value()));
+ }
+ __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
+ __ jmp(&unlink);
+ }
+ }
+
+ // Unlink from try chain; be careful not to destroy the TOS.
+ __ bind(&unlink);
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ pop(eax); // preserve the TOS in a register across stack manipulation
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(edx, Operand::StaticVariable(handler_address));
+ const int kNextOffset = StackHandlerConstants::kNextOffset +
+ StackHandlerConstants::kAddressDisplacement;
+ __ lea(esp, Operand(edx, kNextOffset));
+
+ __ pop(Operand::StaticVariable(handler_address));
+ __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+ // next_sp popped.
+ __ push(eax); // preserve the TOS in a register across stack manipulation
+
+ // --- Finally block ---
+ __ bind(&finally_block);
+
+ // Push the state on the stack. If necessary move the state to a
+ // local variable to avoid having extra values on the stack while
+ // evaluating the finally block.
+ __ push(ecx);
+ if (node->finally_var() != NULL) {
+ Reference target(this, node->finally_var());
+ SetValue(&target);
+ ASSERT(target.size() == 0); // no extra stuff on the stack
+ __ pop(edx); // remove the extra value that was pushed above
+ }
+
+ // Generate code for the statements in the finally block.
+ VisitStatements(node->finally_block()->statements());
+
+ // Get the state from the stack - or the local variable - and
+ // restore the TOS register.
+ if (node->finally_var() != NULL) {
+ Reference target(this, node->finally_var());
+ GetValue(&target);
+ }
+ __ pop(ecx);
+
+ // Restore return value or faked TOS.
+ __ pop(eax);
+
+ // Generate code that jumps to the right destination for all used
+ // shadow labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_bound()) {
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
+ __ j(equal, shadows[i]->shadowed());
+ }
+ }
+
+ // Check if we need to rethrow the exception.
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
+ __ j(not_equal, &exit);
+
+ // Rethrow exception.
+ __ push(eax); // undo pop from above
+ __ CallRuntime(Runtime::kReThrow, 1);
+
+ // Done.
+ __ bind(&exit);
+}
+
+
+void Ia32CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ __ CallRuntime(Runtime::kDebugBreak, 1);
+ __ push(eax);
+}
+
+
+void Ia32CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Push the boilerplate on the stack.
+ __ push(Immediate(boilerplate));
+
+ // Create a new closure.
+ __ push(esi);
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ __ push(eax);
+}
+
+
+void Ia32CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ InstantiateBoilerplate(boilerplate);
+}
+
+
+void Ia32CodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+ InstantiateBoilerplate(node->boilerplate());
+}
+
+
+void Ia32CodeGenerator::VisitConditional(Conditional* node) {
+ Comment cmnt(masm_, "[ Conditional");
+ Label then, else_, exit;
+ LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ Branch(false, &else_);
+ __ bind(&then);
+ Load(node->then_expression(), access());
+ __ jmp(&exit);
+ __ bind(&else_);
+ Load(node->else_expression(), access());
+ __ bind(&exit);
+}
+
+
+void Ia32CodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+
+ if (node->type() == Slot::LOOKUP) {
+ ASSERT(node->var()->mode() == Variable::DYNAMIC);
+
+ // For now, just do a runtime call.
+ __ push(Operand(esi));
+ __ push(Immediate(node->var()->name()));
+
+ switch (access()) {
+ case CodeGenState::UNDEFINED:
+ UNREACHABLE();
+ break;
+
+ case CodeGenState::LOAD:
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ push(eax);
+ // result (TOS) is the value that was loaded
+ break;
+
+ case CodeGenState::LOAD_TYPEOF_EXPR:
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(eax);
+ // result (TOS) is the value that was loaded
+ break;
+
+ case CodeGenState::STORE:
+ // Storing a variable must keep the (new) value on the
+ // stack. This is necessary for compiling assignment
+ // expressions.
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ __ push(eax);
+ // result (TOS) is the value that was stored
+ break;
+
+ case CodeGenState::INIT_CONST:
+ // Same as STORE but ignores attribute (e.g. READ_ONLY) of
+ // context slot so that we can initialize const properties
+ // (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top
+ // context.
+ //
+ // Note that we must declare the foo upon entry of eval(),
+ // via a context slot declaration, but we cannot initialize
+ // it at the same time, because the const declaration may
+ // be at the end of the eval code (sigh...) and the const
+ // variable may have been used before (where its value is
+ // 'undefined'). Thus, we can only do the initialization
+ // when we actually encounter the expression and when the
+ // expression operands are defined and valid, and thus we
+ // need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ //
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ push(eax);
+ break;
+ }
+
+ } else {
+ // Note: We would like to keep the assert below, but it fires because
+ // of some nasty code in LoadTypeofExpression() which should be removed...
+ // ASSERT(node->var()->mode() != Variable::DYNAMIC);
+
+ switch (access()) {
+ case CodeGenState::UNDEFINED:
+ UNREACHABLE();
+ break;
+
+ case CodeGenState::LOAD: // fall through
+ case CodeGenState::LOAD_TYPEOF_EXPR:
+ if (node->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't
+ // been initialized yet) which needs to be converted into the
+ // 'undefined' value.
+ Comment cmnt(masm_, "[ Load const");
+ Label L;
+ __ mov(eax, SlotOperand(node, ecx));
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &L);
+ __ mov(eax, Factory::undefined_value());
+ __ bind(&L);
+ __ push(eax);
+ } else {
+ __ push(SlotOperand(node, ecx));
+ }
+ break;
+
+ case CodeGenState::INIT_CONST:
+ ASSERT(node->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ { Comment cmnt(masm_, "[ Init const");
+ Label L;
+ __ mov(eax, SlotOperand(node, ecx));
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &L);
+ // We must execute the store.
+ __ mov(eax, TOS);
+ __ mov(SlotOperand(node, ecx), eax);
+ if (node->type() == Slot::CONTEXT) {
+ // ecx is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + node->index() * kPointerSize;
+ __ RecordWrite(ecx, offset, eax, ebx);
+ }
+ __ bind(&L);
+ }
+ break;
+
+ case CodeGenState::STORE:
+ // Storing a variable must keep the (new) value on the stack. This
+ // is necessary for compiling assignment expressions.
+ // ecx may be loaded with context; used below in RecordWrite.
+ //
+ // Note: We will reach here even with node->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end
+ // up calling this code.
+ __ mov(eax, TOS);
+ __ mov(SlotOperand(node, ecx), eax);
+ if (node->type() == Slot::CONTEXT) {
+ // ecx is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + node->index() * kPointerSize;
+ __ RecordWrite(ecx, offset, eax, ebx);
+ }
+ break;
+ }
+ }
+}
+
+
+void Ia32CodeGenerator::VisitVariableProxy(VariableProxy* proxy_node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Variable* node = proxy_node->var();
+
+ Expression* x = node->rewrite();
+ if (x != NULL) {
+ Visit(x);
+ return;
+ }
+
+ ASSERT(node->is_global());
+ if (is_referenced()) {
+ if (node->AsProperty() != NULL) {
+ __ RecordPosition(node->AsProperty()->position());
+ }
+ AccessReferenceProperty(new Literal(node->name()), access());
+
+ } else {
+ // All stores are through references.
+ ASSERT(access() != CodeGenState::STORE);
+ Reference property(this, proxy_node);
+ GetValue(&property);
+ }
+}
+
+
+void Ia32CodeGenerator::VisitLiteral(Literal* node) {
+ Comment cmnt(masm_, "[ Literal");
+ __ push(Immediate(node->handle()));
+}
+
+
+class RegExpDeferred: public DeferredCode {
+ public:
+ RegExpDeferred(CodeGenerator* generator, RegExpLiteral* node)
+ : DeferredCode(generator), node_(node) {
+ set_comment("[ RegExpDeferred");
+ }
+ virtual void Generate();
+ private:
+ RegExpLiteral* node_;
+};
+
+
+void RegExpDeferred::Generate() {
+ // If the entry is undefined we call the runtime system to computed
+ // the literal.
+
+ // Literal array (0).
+ __ push(ecx);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // RegExp pattern (2).
+ __ push(Immediate(node_->pattern()));
+ // RegExp flags (3).
+ __ push(Immediate(node_->flags()));
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(ebx, Operand(eax)); // "caller" expects result in ebx
+}
+
+
+void Ia32CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ Comment cmnt(masm_, "[ RegExp Literal");
+ RegExpDeferred* deferred = new RegExpDeferred(this, node);
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ mov(ecx, FunctionOperand());
+
+ // Load the literals array of the function.
+ __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
+
+ // Check whether we need to materialize the RegExp object.
+ // If so, jump to the deferred code.
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(equal, deferred->enter(), not_taken);
+ __ bind(deferred->exit());
+
+ // Push the literal.
+ __ push(ebx);
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateObjectLiteral.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class ObjectLiteralDeferred: public DeferredCode {
+ public:
+ ObjectLiteralDeferred(CodeGenerator* generator, ObjectLiteral* node)
+ : DeferredCode(generator), node_(node) {
+ set_comment("[ ObjectLiteralDeferred");
+ }
+ virtual void Generate();
+ private:
+ ObjectLiteral* node_;
+};
+
+
+void ObjectLiteralDeferred::Generate() {
+ // If the entry is undefined we call the runtime system to computed
+ // the literal.
+
+ // Literal array (0).
+ __ push(Operand(ecx));
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // Constant properties (2).
+ __ push(Immediate(node_->constant_properties()));
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ mov(ebx, Operand(eax));
+}
+
+
+void Ia32CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ mov(ecx, FunctionOperand());
+
+ // Load the literals array of the function.
+ __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code.
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(equal, deferred->enter(), not_taken);
+ __ bind(deferred->exit());
+
+ // Push the literal.
+ __ push(ebx);
+ // Clone the boilerplate object.
+ __ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
+ // Push the new cloned literal object as the result.
+ __ push(eax);
+
+ for (int i = 0; i < node->properties()->length(); i++) {
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT: break;
+ case ObjectLiteral::Property::COMPUTED: {
+ Handle<Object> key(property->key()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ if (key->IsSymbol()) {
+ __ mov(eax, TOS);
+ __ push(eax);
+ Load(property->value());
+ __ pop(eax);
+ __ Set(ecx, Immediate(key));
+ __ call(ic, code_target);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ // Ignore result.
+ break;
+ }
+ // Fall through
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ __ mov(eax, TOS);
+ __ push(eax);
+ Load(property->key());
+ Load(property->value());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ // Ignore result.
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ // Duplicate the resulting object on the stack. The runtime
+ // function will pop the three arguments passed in.
+ __ mov(eax, TOS);
+ __ push(eax);
+ Load(property->key());
+ __ push(Immediate(Smi::FromInt(1)));
+ Load(property->value());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore result.
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ // Duplicate the resulting object on the stack. The runtime
+ // function will pop the three arguments passed in.
+ __ mov(eax, TOS);
+ __ push(eax);
+ Load(property->key());
+ __ push(Immediate(Smi::FromInt(0)));
+ Load(property->value());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore result.
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+}
+
+
+void Ia32CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ // Load the resulting object.
+ Load(node->result());
+ for (int i = 0; i < node->values()->length(); i++) {
+ Expression* value = node->values()->at(i);
+
+ // If value is literal the property value is already
+ // set in the boilerplate object.
+ if (value->AsLiteral() == NULL) {
+ // The property must be set by generated code.
+ Load(value);
+
+ // Get the value off the stack.
+ __ pop(eax);
+ // Fetch the object literal while leaving on the stack.
+ __ mov(ecx, TOS);
+ // Get the elements array.
+ __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+
+ // Write to the indexed properties array.
+ int offset = i * kPointerSize + Array::kHeaderSize;
+ __ mov(FieldOperand(ecx, offset), eax);
+
+ // Update the write barrier for the array address.
+ __ RecordWrite(ecx, offset, eax, ebx);
+ }
+ }
+}
+
+
+void Ia32CodeGenerator::VisitAssignment(Assignment* node) {
+ Comment cmnt(masm_, "[ Assignment");
+
+ if (FLAG_debug_info) RecordStatementPosition(node);
+ Reference target(this, node->target());
+ if (target.is_illegal()) return;
+
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ Load(node->value());
+
+ } else {
+ GetValue(&target);
+ Literal* literal = node->value()->AsLiteral();
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(), literal->handle(), false, NO_OVERWRITE);
+ } else {
+ Load(node->value());
+ bool done = InlinedGenericOperation(node->binary_op(), NO_OVERWRITE,
+ false /*negate_result*/);
+ if (!done) {
+ GenericOperation(node->binary_op());
+ }
+ }
+ }
+
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL &&
+ var->mode() == Variable::CONST &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+ } else {
+ __ RecordPosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ InitConst(&target);
+ } else {
+ SetValue(&target);
+ }
+ }
+}
+
+
+void Ia32CodeGenerator::VisitThrow(Throw* node) {
+ Comment cmnt(masm_, "[ Throw");
+
+ Load(node->exception());
+ __ RecordPosition(node->position());
+ __ CallRuntime(Runtime::kThrow, 1);
+ __ push(eax);
+}
+
+
+void Ia32CodeGenerator::VisitProperty(Property* node) {
+ Comment cmnt(masm_, "[ Property");
+
+ if (is_referenced()) {
+ __ RecordPosition(node->position());
+ AccessReferenceProperty(node->key(), access());
+ } else {
+ // All stores are through references.
+ ASSERT(access() != CodeGenState::STORE);
+ Reference property(this, node);
+ __ RecordPosition(node->position());
+ GetValue(&property);
+ }
+}
+
+
+void Ia32CodeGenerator::VisitCall(Call* node) {
+ Comment cmnt(masm_, "[ Call");
+
+ ZoneList<Expression*>* args = node->arguments();
+
+ if (FLAG_debug_info) RecordStatementPosition(node);
+
+ // Check if the function is a variable or a property.
+ Expression* function = node->expression();
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
+
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
+
+ if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ __ push(Immediate(var->name()));
+ LoadGlobal();
+
+ // Load the arguments.
+ for (int i = 0; i < args->length(); i++) {
+ Load(args->at(i));
+ }
+
+ // Setup the receiver register and call the IC initialization code.
+ Handle<Code> stub = ComputeCallInitialize(args->length());
+ __ RecordPosition(node->position());
+ __ call(stub, code_target_context);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // Overwrite the function on the stack with the result.
+ __ mov(TOS, eax);
+
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // ----------------------------------
+ // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
+ // ----------------------------------
+
+ // Load the function
+ __ push(Operand(esi));
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ // eax: slot value; edx: receiver
+
+ // Load the receiver.
+ __ push(eax);
+ __ push(edx);
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+
+ } else if (property != NULL) {
+ // Check if the key is a literal string.
+ Literal* literal = property->key()->AsLiteral();
+
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ // ------------------------------------------------------------------
+ // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+ // ------------------------------------------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ __ push(Immediate(literal->handle()));
+ Load(property->obj());
+
+ // Load the arguments.
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+
+ // Call the IC initialization code.
+ Handle<Code> stub = ComputeCallInitialize(args->length());
+ __ RecordPosition(node->position());
+ __ call(stub, code_target);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // Overwrite the function on the stack with the result.
+ __ mov(TOS, eax);
+
+ } else {
+ // -------------------------------------------
+ // JavaScript example: 'array[index](1, 2, 3)'
+ // -------------------------------------------
+
+ // Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ GetValue(&ref);
+
+ // Pass receiver to called function.
+ __ push(Operand(esp, ref.size() * kPointerSize));
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
+
+ } else {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is not global
+ // ----------------------------------
+
+ // Load the function.
+ Load(function);
+
+ // Pass the global object as the receiver.
+ LoadGlobal();
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
+}
+
+
+void Ia32CodeGenerator::VisitCallNew(CallNew* node) {
+ Comment cmnt(masm_, "[ CallNew");
+
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments. This is different from ordinary calls, where the
+ // actual function to call is resolved after the arguments have been
+ // evaluated.
+
+ // Compute function to call and use the global object as the
+ // receiver.
+ Load(node->expression());
+ LoadGlobal();
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ for (int i = 0; i < args->length(); i++) Load(args->at(i));
+
+ // Constructors are called with the number of arguments in register
+ // eax for now. Another option would be to have separate construct
+ // call trampolines per different arguments counts encountered.
+ __ Set(eax, Immediate(args->length()));
+
+ // Load the function into temporary function slot as per calling
+ // convention.
+ __ mov(edi, Operand(esp, (args->length() + 1) * kPointerSize));
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ __ RecordPosition(node->position());
+ __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ js_construct_call);
+ __ mov(TOS, eax); // discard the function and "push" the newly created object
+}
+
+
+void Ia32CodeGenerator::GenerateSetThisFunction(ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateGetThisFunction(ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateSetThis(ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateSetArgumentsLength(
+ ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateGetArgumentsLength(
+ ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateTailCallWithArguments(
+ ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateSetArgument(ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateSquashFrame(ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateExpandFrame(ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateShiftDownAndTailCall(
+ ZoneList<Expression*>* args) {
+ // Not used on IA-32 anymore. Should go away soon.
+ __ int3();
+}
+
+
+void Ia32CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ cc_reg_ = zero;
+}
+
+
+void Ia32CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Label answer;
+ // We need the CC bits to come out as not_equal in the case where the
+ // object is a Smi. This can't be done with the usual test opcode so
+ // we copy the object to ecx and do some destructive ops on it that
+ // result in the right CC bits.
+ __ pop(eax);
+ __ mov(ecx, Operand(eax));
+ __ and_(ecx, kSmiTagMask);
+ __ xor_(ecx, kSmiTagMask);
+ __ j(not_equal, &answer, not_taken);
+ // It is a heap object - get map.
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ // Check if the object is a JS array or not.
+ __ cmp(eax, JS_ARRAY_TYPE);
+ __ bind(&answer);
+ cc_reg_ = equal;
+}
+
+
+void Ia32CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ // Seed the result with the formal parameters count, which will be
+ // used in case no arguments adaptor frame is found below the
+ // current frame.
+ __ Set(eax, Immediate(Smi::FromInt(scope_->num_parameters())));
+
+ // Call the shared stub to get to the arguments.length.
+ ArgumentsAccessStub stub(true);
+ __ CallStub(&stub);
+ __ push(eax);
+}
+
+
+void Ia32CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Label leave;
+ Load(args->at(0)); // Load the object.
+ __ mov(eax, TOS);
+ // if (object->IsSmi()) return object.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &leave, taken);
+ // It is a heap object - get map.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return object.
+ __ cmp(ecx, JS_VALUE_TYPE);
+ __ j(not_equal, &leave, not_taken);
+ __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
+ __ mov(TOS, eax);
+ __ bind(&leave);
+}
+
+
+void Ia32CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Label leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, TOS);
+ // if (object->IsSmi()) return object.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &leave, taken);
+ // It is a heap object - get map.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return object.
+ __ cmp(ebx, JS_VALUE_TYPE);
+ __ j(not_equal, &leave, not_taken);
+ // Store the value.
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ecx);
+ // Update the write barrier.
+ __ RecordWrite(eax, JSValue::kValueOffset, ecx, ebx);
+ // Leave.
+ __ bind(&leave);
+ __ mov(ecx, TOS);
+ __ pop(eax);
+ __ mov(TOS, ecx);
+}
+
+
+void Ia32CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the key onto the stack and set register eax to the formal
+ // parameters count for the currently executing function.
+ Load(args->at(0));
+ __ Set(eax, Immediate(Smi::FromInt(scope_->num_parameters())));
+
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(false);
+ __ CallStub(&stub);
+ __ mov(TOS, eax);
+}
+
+
+
+void Ia32CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ if (CheckForInlineRuntimeCall(node)) return;
+
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
+
+ if (function == NULL) {
+ // Prepare stack for calling JS runtime function.
+ __ push(Immediate(node->name()));
+ // Push the builtins object found in the current global object.
+ __ mov(edx, GlobalObject());
+ __ push(FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+ }
+
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < args->length(); i++)
+ Load(args->at(i));
+
+ if (function != NULL) {
+ // Call the C runtime function.
+ __ CallRuntime(function, args->length());
+ __ push(eax);
+ } else {
+ // Call the JS runtime function.
+ Handle<Code> stub = ComputeCallInitialize(args->length());
+ __ Set(eax, Immediate(args->length()));
+ __ call(stub, code_target);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(TOS, eax);
+ }
+}
+
+
+void Ia32CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ Comment cmnt(masm_, "[ UnaryOperation");
+
+ Token::Value op = node->op();
+
+ if (op == Token::NOT) {
+ LoadCondition(node->expression(), CodeGenState::LOAD,
+ false_target(), true_target(), true);
+ cc_reg_ = NegateCondition(cc_reg_);
+
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ if (property != NULL) {
+ Load(property->obj());
+ Load(property->key());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ push(eax);
+ return;
+ }
+
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ __ push(Immediate(variable->name()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ push(eax);
+ return;
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // lookup the context holding the named variable
+ __ push(Operand(esi));
+ __ push(Immediate(variable->name()));
+ __ CallRuntime(Runtime::kLookupContext, 2);
+ // eax: context
+ __ push(eax);
+ __ push(Immediate(variable->name()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ push(eax);
+ return;
+ }
+
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ __ push(Immediate(Factory::false_value()));
+
+ } else {
+ // Default: Result of deleting expressions is true.
+ Load(node->expression()); // may have side-effects
+ __ Set(TOS, Immediate(Factory::true_value()));
+ }
+
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ __ CallRuntime(Runtime::kTypeof, 1);
+ __ push(eax);
+
+ } else {
+ Load(node->expression());
+ switch (op) {
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
+ break;
+
+ case Token::SUB: {
+ UnarySubStub stub;
+ // TODO(1222589): remove dependency of TOS being cached inside stub
+ __ pop(eax);
+ __ CallStub(&stub);
+ __ push(eax);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ // smi check
+ Label smi_label;
+ Label continue_label;
+ __ pop(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_label, taken);
+
+ __ push(eax); // undo popping of TOS
+ __ InvokeBuiltin(Builtins::BIT_NOT, CALL_FUNCTION);
+
+ __ jmp(&continue_label);
+ __ bind(&smi_label);
+ __ not_(eax);
+ __ and_(eax, ~kSmiTagMask); // remove inverted smi-tag
+ __ bind(&continue_label);
+ __ push(eax);
+ break;
+ }
+
+ case Token::VOID:
+ __ mov(TOS, Factory::undefined_value());
+ break;
+
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(eax);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+class CountOperationDeferred: public DeferredCode {
+ public:
+ CountOperationDeferred(CodeGenerator* generator,
+ bool is_postfix,
+ bool is_increment,
+ int result_offset)
+ : DeferredCode(generator),
+ is_postfix_(is_postfix),
+ is_increment_(is_increment),
+ result_offset_(result_offset) {
+ set_comment("[ CountOperationDeferred");
+ }
+
+ virtual void Generate();
+
+ private:
+ bool is_postfix_;
+ bool is_increment_;
+ int result_offset_;
+};
+
+
+#undef __
+#define __ masm->
+
+
+class RevertToNumberStub: public CodeStub {
+ public:
+ explicit RevertToNumberStub(bool is_increment)
+ : is_increment_(is_increment) { }
+
+ private:
+ bool is_increment_;
+
+ Major MajorKey() { return RevertToNumber; }
+ int MinorKey() { return is_increment_ ? 1 : 0; }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "RevertToNumberStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RevertToNumberStub (is_increment %s)\n",
+ is_increment_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void RevertToNumberStub::Generate(MacroAssembler* masm) {
+ // Revert optimistic increment/decrement.
+ if (is_increment_) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+
+ __ pop(ecx);
+ __ push(eax);
+ __ push(ecx);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ // Code never returns due to JUMP_FUNCTION.
+}
+
+
+class CounterOpStub: public CodeStub {
+ public:
+ CounterOpStub(int result_offset, bool is_postfix, bool is_increment)
+ : result_offset_(result_offset),
+ is_postfix_(is_postfix),
+ is_increment_(is_increment) { }
+
+ private:
+ int result_offset_;
+ bool is_postfix_;
+ bool is_increment_;
+
+ Major MajorKey() { return CounterOp; }
+ int MinorKey() {
+ return ((result_offset_ << 2) |
+ (is_postfix_ ? 2 : 0) |
+ (is_increment_ ? 1 : 0));
+ }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "CounterOpStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CounterOpStub (result_offset %d), (is_postfix %s),"
+ " (is_increment %s)\n",
+ result_offset_,
+ is_postfix_ ? "true" : "false",
+ is_increment_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void CounterOpStub::Generate(MacroAssembler* masm) {
+ // Store to the result on the stack (skip return address) before
+ // performing the count operation.
+ if (is_postfix_) {
+ __ mov(Operand(esp, result_offset_ + kPointerSize), eax);
+ }
+
+ // Revert optimistic increment/decrement but only for prefix
+ // counts. For postfix counts it has already been reverted before
+ // the conversion to numbers.
+ if (!is_postfix_) {
+ if (is_increment_) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ }
+
+ // Compute the new value by calling the right JavaScript native.
+ __ pop(ecx);
+ __ push(eax);
+ __ push(ecx);
+ Builtins::JavaScript builtin = is_increment_ ? Builtins::INC : Builtins::DEC;
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ // Code never returns due to JUMP_FUNCTION.
+}
+
+
+#undef __
+#define __ masm_->
+
+
+void CountOperationDeferred::Generate() {
+ if (is_postfix_) {
+ RevertToNumberStub to_number_stub(is_increment_);
+ __ CallStub(&to_number_stub);
+ }
+ CounterOpStub stub(result_offset_, is_postfix_, is_increment_);
+ __ CallStub(&stub);
+}
+
+
+void Ia32CodeGenerator::VisitCountOperation(CountOperation* node) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
+
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+ // Postfix: Make room for the result.
+ if (is_postfix) __ push(Immediate(0));
+
+ { Reference target(this, node->expression());
+ if (target.is_illegal()) return;
+ GetValue(&target);
+
+ int result_offset = target.size() * kPointerSize;
+ CountOperationDeferred* deferred =
+ new CountOperationDeferred(this, is_postfix,
+ is_increment, result_offset);
+
+ __ pop(eax); // Load TOS into eax for calculations below
+
+ // Postfix: Store the old value as the result.
+ if (is_postfix) __ mov(Operand(esp, result_offset), eax);
+
+ // Perform optimistic increment/decrement.
+ if (is_increment) {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+
+ // If the count operation didn't overflow and the result is a
+ // valid smi, we're done. Otherwise, we jump to the deferred
+ // slow-case code.
+ __ j(overflow, deferred->enter(), not_taken);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->enter(), not_taken);
+
+ // Store the new value in the target if not const.
+ __ bind(deferred->exit());
+ __ push(eax); // Push the new value to TOS
+ if (!is_const) SetValue(&target);
+ }
+
+ // Postfix: Discard the new value and use the old.
+ if (is_postfix) __ pop(eax);
+}
+
+
+// Returns 'true' if able to defer negation to the consuming arithmetic
+// operation.
+bool Ia32CodeGenerator::TryDeferNegate(Expression* x) {
+ UnaryOperation* unary = x->AsUnaryOperation();
+ if (FLAG_defer_negation && unary != NULL && unary->op() == Token::SUB) {
+ Load(unary->expression());
+ return true;
+ } else {
+ Load(x);
+ return false;
+ }
+}
+
+
+void Ia32CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = node->op();
+
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
+
+ // NOTE: If the left hand side produces a materialized value (not in
+ // the CC register), we force the right hand side to do the
+ // same. This is necessary because we may have to branch to the exit
+ // after evaluating the left hand side (due to the shortcut
+ // semantics), but the compiler must (statically) know if the result
+ // of compiling the binary operation is materialized or not.
+
+ if (op == Token::AND) {
+ Label is_true;
+ LoadCondition(node->left(), CodeGenState::LOAD, &is_true,
+ false_target(), false);
+ if (has_cc()) {
+ Branch(false, false_target());
+
+ // Evaluate right side expression.
+ __ bind(&is_true);
+ LoadCondition(node->right(), CodeGenState::LOAD, true_target(),
+ false_target(), false);
+
+ } else {
+ Label pop_and_continue, exit;
+
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ // Duplicate the TOS value. The duplicate will be popped by ToBoolean.
+ __ mov(eax, TOS);
+ __ push(eax);
+ ToBoolean(&pop_and_continue, &exit);
+ Branch(false, &exit);
+
+ // Pop the result of evaluating the first part.
+ __ bind(&pop_and_continue);
+ __ pop(eax);
+
+ // Evaluate right side expression.
+ __ bind(&is_true);
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ __ bind(&exit);
+ }
+
+ } else if (op == Token::OR) {
+ Label is_false;
+ LoadCondition(node->left(), CodeGenState::LOAD, true_target(),
+ &is_false, false);
+ if (has_cc()) {
+ Branch(true, true_target());
+
+ // Evaluate right side expression.
+ __ bind(&is_false);
+ LoadCondition(node->right(), CodeGenState::LOAD, true_target(),
+ false_target(), false);
+
+ } else {
+ Label pop_and_continue, exit;
+
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ // Duplicate the TOS value. The duplicate will be popped by ToBoolean.
+ __ mov(eax, TOS);
+ __ push(eax);
+ ToBoolean(&exit, &pop_and_continue);
+ Branch(true, &exit);
+
+ // Pop the result of evaluating the first part.
+ __ bind(&pop_and_continue);
+ __ pop(eax);
+
+ // Evaluate right side expression.
+ __ bind(&is_false);
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ __ bind(&exit);
+ }
+
+ } else {
+ OverwriteMode overwrite_mode = NO_OVERWRITE;
+ if (node->left()->AsBinaryOperation() != NULL &&
+ node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_LEFT;
+ } else if (node->right()->AsBinaryOperation() != NULL &&
+ node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_RIGHT;
+ }
+
+ // Optimize for the case where (at least) one of the expressions
+ // is a literal small integer.
+ Literal* lliteral = node->left()->AsLiteral();
+ Literal* rliteral = node->right()->AsLiteral();
+
+ if (rliteral != NULL && rliteral->handle()->IsSmi()) {
+ Load(node->left());
+ SmiOperation(node->op(), rliteral->handle(), false, overwrite_mode);
+
+ } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
+ Load(node->right());
+ SmiOperation(node->op(), lliteral->handle(), true, overwrite_mode);
+
+ } else {
+ bool negate_result = false;
+ if (node->op() == Token::MUL) { // Implement only MUL for starters
+ bool left_negated = TryDeferNegate(node->left());
+ bool right_negated = TryDeferNegate(node->right());
+ negate_result = left_negated != right_negated;
+ } else {
+ Load(node->left());
+ Load(node->right());
+ }
+ const bool done = InlinedGenericOperation(node->op(), overwrite_mode,
+ negate_result);
+ if (!done) {
+ // Defer negation implemented only for inlined generic ops.
+ ASSERT(!negate_result);
+ GenericOperation(node->op(), overwrite_mode);
+ }
+ }
+ }
+}
+
+
+void Ia32CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ __ push(FunctionOperand());
+}
+
+
+void Ia32CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+
+ // NOTE: To make null checks efficient, we check if either left or
+ // right is the literal 'null'. If so, we optimize the code by
+ // inlining a null check instead of calling the (very) general
+ // runtime routine for checking equality.
+
+ bool left_is_null =
+ left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
+ bool right_is_null =
+ right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
+
+ if (op == Token::EQ || op == Token::EQ_STRICT) {
+ // The 'null' value is only equal to 'null' or 'undefined'.
+ if (left_is_null || right_is_null) {
+ Load(left_is_null ? right : left);
+ Label exit, undetectable;
+ __ pop(eax);
+ __ cmp(eax, Factory::null_value());
+
+ // The 'null' value is only equal to 'undefined' if using
+ // non-strict comparisons.
+ if (op != Token::EQ_STRICT) {
+ __ j(equal, &exit);
+ __ cmp(eax, Factory::undefined_value());
+
+ // NOTE: it can be an undetectable object.
+ __ j(equal, &exit);
+ __ test(eax, Immediate(kSmiTagMask));
+
+ __ j(not_equal, &undetectable);
+ __ jmp(false_target());
+
+ __ bind(&undetectable);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ecx, 1 << Map::kIsUndetectable);
+ __ cmp(ecx, 1 << Map::kIsUndetectable);
+ }
+
+ __ bind(&exit);
+
+ cc_reg_ = equal;
+ return;
+ }
+ }
+
+
+ // NOTE: To make typeof testing for natives implemented in
+ // JavaScript really efficient, we generate special code for
+ // expressions of the form: 'typeof <expression> == <string>'.
+
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(String::cast(*right->AsLiteral()->handle()));
+
+ // Load the operand, move it to register edx, and restore TOS.
+ LoadTypeofExpression(operation->expression());
+ __ pop(edx);
+
+ if (check->Equals(Heap::number_symbol())) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, true_target());
+ __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ cc_reg_ = equal;
+
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, false_target());
+
+ __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // NOTE: it might be an undetectable string object
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ecx, 1 << Map::kIsUndetectable);
+ __ cmp(ecx, 1 << Map::kIsUndetectable);
+ __ j(equal, false_target());
+
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_NONSTRING_TYPE);
+ cc_reg_ = less;
+
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ cmp(edx, Factory::true_value());
+ __ j(equal, true_target());
+ __ cmp(edx, Factory::false_value());
+ cc_reg_ = equal;
+
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ cmp(edx, Factory::undefined_value());
+ __ j(equal, true_target());
+
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, false_target());
+
+ // NOTE: it can be an undetectable object.
+ __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ecx, 1 << Map::kIsUndetectable);
+ __ cmp(ecx, 1 << Map::kIsUndetectable);
+
+ cc_reg_ = equal;
+
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, false_target());
+ __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(edx, JS_FUNCTION_TYPE);
+ cc_reg_ = equal;
+
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, false_target());
+
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::null_value());
+ __ j(equal, true_target());
+
+ // NOTE: it might be an undetectable object
+ __ movzx_b(edx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ and_(edx, 1 << Map::kIsUndetectable);
+ __ cmp(edx, 1 << Map::kIsUndetectable);
+ __ j(equal, false_target());
+
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, false_target());
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ cc_reg_ = less_equal;
+
+ } else {
+ // Uncommon case: Typeof testing against a string literal that
+ // is never returned from the typeof operator.
+ __ jmp(false_target());
+ }
+ return;
+ }
+
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN: {
+ Load(left);
+ Load(right);
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ push(eax); // push the result
+ return;
+ }
+ case Token::INSTANCEOF: {
+ Load(left);
+ Load(right);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ push(eax); // push the result
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Optimize for the case where (at least) one of the expressions
+ // is a literal small integer.
+ if (left->AsLiteral() != NULL && left->AsLiteral()->handle()->IsSmi()) {
+ Load(right);
+ SmiComparison(ReverseCondition(cc), left->AsLiteral()->handle(), strict);
+ return;
+ }
+ if (right->AsLiteral() != NULL && right->AsLiteral()->handle()->IsSmi()) {
+ Load(left);
+ SmiComparison(cc, right->AsLiteral()->handle(), strict);
+ return;
+ }
+
+ Load(left);
+ Load(right);
+ Comparison(cc, strict);
+}
+
+
+void Ia32CodeGenerator::RecordStatementPosition(Node* node) {
+ if (FLAG_debug_info) {
+ int pos = node->statement_pos();
+ if (pos != kNoPosition) {
+ __ RecordStatementPosition(pos);
+ }
+ }
+}
+
+
+void Ia32CodeGenerator::EnterJSFrame() {
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
+
+ // Store the context and the function in the frame.
+ __ push(esi);
+ __ push(edi);
+
+ // Clear the function slot when generating debug code.
+ if (FLAG_debug_code) {
+ __ Set(edi, Immediate(reinterpret_cast<int>(kZapValue)));
+ }
+}
+
+
+void Ia32CodeGenerator::ExitJSFrame() {
+ // Record the location of the JS exit code for patching when setting
+ // break point.
+ __ RecordJSReturn();
+
+ // Avoid using the leave instruction here, because it is too
+ // short. We need the return sequence to be a least the size of a
+ // call instruction to support patching the exit code in the
+ // debugger. See VisitReturnStatement for the full return sequence.
+ __ mov(esp, Operand(ebp));
+ __ pop(ebp);
+}
+
+
+#undef __
+#define __ masm->
+
+
+void CEntryStub::GenerateReserveCParameterSpace(MacroAssembler* masm,
+ int num_parameters) {
+ if (num_parameters > 0) {
+ __ sub(Operand(esp), Immediate(num_parameters * kPointerSize));
+ }
+ // OS X activation frames are 16 byte-aligned
+ // (see "Mac OS X ABI Function Call Guide").
+ const int kFrameAlignment = 16;
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ __ and_(esp, -kFrameAlignment);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(edx, Operand::StaticVariable(handler_address));
+ __ mov(ecx, Operand(edx, -1 * kPointerSize)); // get next in chain
+ __ mov(Operand::StaticVariable(handler_address), ecx);
+ __ mov(esp, Operand(edx));
+ __ pop(edi);
+ __ pop(ebp);
+ __ pop(edx); // remove code pointer
+ __ pop(edx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ xor_(esi, Operand(esi)); // tentatively set context pointer to NULL
+ Label skip;
+ __ cmp(ebp, 0);
+ __ j(equal, &skip, not_taken);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+
+ __ ret(0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool do_restore) {
+ // eax: result parameter for PerformGC, if any
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments (C callee-saved)
+
+ if (do_gc) {
+ __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
+ __ call(FUNCTION_ADDR(Runtime::PerformGC), runtime_entry);
+ }
+
+ // Call C function.
+ __ lea(eax,
+ Operand(ebp, edi, times_4, StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // argv.
+ __ call(Operand(ebx));
+ // Result is in eax or edx:eax - do not destroy these registers!
+
+ // Check for failure result.
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ lea(ecx, Operand(eax, 1));
+ // Lower 2 bits of ecx are 0 iff eax has failure tag.
+ __ test(ecx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned, not_taken);
+
+ // Restore number of arguments to ecx and clear top frame.
+ __ mov(ecx, Operand(edi));
+ ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+ __ mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
+
+ // Restore the memory copy of the registers by digging them out from
+ // the stack.
+ if (do_restore) {
+ // Ok to clobber ebx and edi - function pointer and number of arguments not
+ // needed anymore.
+ const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+ int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ __ lea(ebx, Operand(ebp, kOffset));
+ __ CopyRegistersFromStackToMemory(ebx, edi, kJSCallerSaved);
+ }
+
+ // Exit C frame.
+ __ lea(esp, Operand(ebp, -1 * kPointerSize));
+ __ pop(ebx);
+ __ pop(ebp);
+
+ // Restore current context from top and clear it in debug mode.
+ ExternalReference context_address(Top::k_context_address);
+ __ mov(esi, Operand::StaticVariable(context_address));
+ if (kDebug) {
+ __ mov(Operand::StaticVariable(context_address), Immediate(0));
+ }
+
+ // Pop arguments from caller's stack and return.
+ __ pop(ebx); // Ok to clobber ebx - function pointer not needed anymore.
+ __ lea(esp, Operand(esp, ecx, times_4, +1 * kPointerSize)); // +1 ~ receiver.
+ __ push(ebx);
+ __ ret(0);
+
+ // Handling of Failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry, taken);
+
+ Label continue_exception;
+ // If the returned failure is EXCEPTION then promote Top::pending_exception().
+ __ cmp(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+ __ j(not_equal, &continue_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ mov(eax, Operand::StaticVariable(pending_exception_address));
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+ __ bind(&continue_exception);
+ // Special handling of out of memory exception.
+ __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(edx, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
+ StackHandlerConstants::kStateOffset;
+ __ cmp(Operand(edx, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kAddressDisplacement +
+ StackHandlerConstants::kNextOffset;
+ __ mov(edx, Operand(edx, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ mov(eax, Operand(edx, kNextOffset));
+ __ mov(Operand::StaticVariable(handler_address), eax);
+
+ // Set external caught exception to false.
+ __ mov(eax, false);
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(Operand::StaticVariable(external_caught), eax);
+
+ // Set pending exception and eax to out of memory exception.
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+
+ // Restore the stack to the address of the ENTRY handler
+ __ mov(esp, Operand(edx));
+
+ // Clear the context pointer;
+ __ xor_(esi, Operand(esi));
+
+ // Restore registers from handler.
+ __ pop(edi); // PP
+ __ pop(ebp); // FP
+ __ pop(edx); // Code
+ __ pop(edx); // State
+
+ __ ret(0);
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+ // eax: number of arguments
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: caller's parameter pointer pp (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter C frame.
+ // Here we make the following assumptions and use them when setting
+ // up the top-most Frame. Adjust the code if these assumptions
+ // change.
+ ASSERT(ExitFrameConstants::kPPDisplacement == +2 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ ASSERT(ExitFrameConstants::kSPOffset == -2 * kPointerSize);
+ __ push(ebp); // caller fp
+ __ mov(ebp, Operand(esp)); // C entry fp
+ __ push(ebx); // C function
+ __ push(Immediate(0)); // saved entry sp, set before call
+ __ push(Immediate(is_debug_break ? 1 : 0));
+
+ // Remember top frame.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ ExternalReference context_address(Top::k_context_address);
+ __ mov(Operand::StaticVariable(c_entry_fp), ebp);
+ __ mov(Operand::StaticVariable(context_address), esi);
+
+ if (is_debug_break) {
+ // Save the state of all registers to the stack from the memory
+ // location.
+
+ // TODO(1243899): This should be symmetric to
+ // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
+ // correct here, but computed for the other call. Very error
+ // prone! FIX THIS. Actually there are deeper problems with
+ // register saving than this assymetry (see the buganizer report
+ // associated with this issue).
+ __ PushRegistersFromMemory(kJSCallerSaved);
+ }
+
+ // Move number of arguments (argc) into callee-saved register. Note
+ // that edi is only available after remembering the top frame.
+ __ mov(edi, Operand(eax));
+
+ // Allocate stack space for 2 arguments (argc, argv).
+ GenerateReserveCParameterSpace(masm, 2);
+ __ mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp); // save entry sp
+
+ // eax: result parameter for PerformGC, if any (setup below)
+ // ebx: pointer to builtin function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments (C callee-saved)
+
+ Label entry;
+ __ bind(&entry);
+
+ Label throw_out_of_memory_exception;
+ Label throw_normal_exception;
+
+#ifdef DEBUG
+ if (FLAG_gc_greedy) {
+ Failure* failure = Failure::RetryAfterGC(0, NEW_SPACE);
+ __ mov(Operand(eax), Immediate(reinterpret_cast<int32_t>(failure)));
+ }
+ GenerateCore(masm, &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ FLAG_gc_greedy,
+ is_debug_break);
+#else
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ false,
+ is_debug_break);
+#endif
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ true,
+ is_debug_break);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowOutOfMemory(masm);
+ // control flow for generated will not return.
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+
+ // Setup frame.
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
+
+ // Save callee-saved registers (C calling conventions).
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Push something that is not an arguments adaptor.
+ __ push(Immediate(~ArgumentsAdaptorFrame::SENTINEL));
+ __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ push(Operand::StaticVariable(c_entry_fp));
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(eax, Handle<Failure>(Failure::Exception()));
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ push(eax); // flush TOS
+
+ // Clear any pending exceptions.
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. Notice that we
+ // cannot store a reference to the trampoline code directly in this
+ // stub, because the builtin stubs may not have been generated yet.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(Operand(edx), Immediate(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(Operand(edx), Immediate(entry));
+ }
+ __ mov(edx, Operand(edx, 0)); // deref address
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ call(Operand(edx));
+
+ // Unlink this frame from the handler chain.
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ // Pop next_sp.
+ __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+
+#undef __
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator interfaces
+
+// MakeCode() is just a wrapper for CodeGenerator::MakeCode()
+// so we don't have to expose the entire CodeGenerator class in
+// the .h file.
+Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval) {
+ Handle<Code> code = Ia32CodeGenerator::MakeCode(fun, script, is_eval);
+ if (!code.is_null()) {
+ Counters::total_compiled_code_size.Increment(code->instruction_size());
+ }
+ return code;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_CODEGEN_INL_H_
+#define V8_CODEGEN_INL_H_
+
+#include "codegen.h"
+
+
+namespace v8 { namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Support for "structured" code comments.
+//
+// By selecting matching brackets in disassembler output,
+// code segments can be identified more easily.
+
+#ifdef DEBUG
+
+class Comment BASE_EMBEDDED {
+ public:
+ Comment(MacroAssembler* masm, const char* msg)
+ : masm_(masm),
+ msg_(msg) {
+ masm_->RecordComment(msg);
+ }
+
+ ~Comment() {
+ if (msg_[0] == '[')
+ masm_->RecordComment("]");
+ }
+
+ private:
+ MacroAssembler* masm_;
+ const char* msg_;
+};
+
+#else
+
+class Comment BASE_EMBEDDED {
+ public:
+ Comment(MacroAssembler*, const char*) {}
+};
+
+#endif // DEBUG
+
+
+} } // namespace v8::internal
+
+#endif // V8_CODEGEN_INL_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+
+DEFINE_bool(lazy, true, "use lazy compilation");
+DEFINE_bool(debug_info, true, "add debug information to compiled functions");
+
+
+DeferredCode::DeferredCode(CodeGenerator* generator)
+ : masm_(generator->masm()),
+ generator_(generator),
+ position_(masm_->last_position()),
+ position_is_statement_(masm_->last_position_is_statement()) {
+ generator->AddDeferred(this);
+#ifdef DEBUG
+ comment_ = "";
+#endif
+}
+
+
+void CodeGenerator::ProcessDeferred() {
+ while (!deferred_.is_empty()) {
+ DeferredCode* code = deferred_.RemoveLast();
+ MacroAssembler* masm = code->masm();
+ // Record position of deferred code stub.
+ if (code->position_is_statement()) {
+ masm->RecordStatementPosition(code->position());
+ } else {
+ masm->RecordPosition(code->position());
+ }
+ // Bind labels and generate the code.
+ masm->bind(code->enter());
+ Comment cmnt(masm, code->comment());
+ code->Generate();
+ if (code->exit()->is_bound()) {
+ masm->jmp(code->exit()); // platform independent?
+ }
+ }
+}
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
+ int length,
+ int function_token_position,
+ int start_position,
+ int end_position,
+ bool is_expression,
+ bool is_toplevel,
+ Handle<Script> script) {
+ fun->shared()->set_length(length);
+ fun->shared()->set_formal_parameter_count(length);
+ fun->shared()->set_script(*script);
+ fun->shared()->set_function_token_position(function_token_position);
+ fun->shared()->set_start_position(start_position);
+ fun->shared()->set_end_position(end_position);
+ fun->shared()->set_is_expression(is_expression);
+ fun->shared()->set_is_toplevel(is_toplevel);
+}
+
+
+static Handle<Code> ComputeLazyCompile(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
+}
+
+
+Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
+ // Determine if the function can be lazily compiled. This is
+ // necessary to allow some of our builtin JS files to be lazily
+ // compiled. These builtins cannot be handled lazily by the parser,
+ // since we have to know if a function uses the special natives
+ // syntax, which is something the parser records.
+ bool allow_lazy = node->AllowsLazyCompilation();
+
+ // Generate code
+ Handle<Code> code;
+ if (FLAG_lazy && allow_lazy) {
+ code = ComputeLazyCompile(node->num_parameters());
+ } else {
+ code = MakeCode(node, script_, false);
+
+ // Function compilation complete.
+ LOG(CodeCreateEvent("Function", *code, *node->name()));
+ }
+
+ // Create a boilerplate function.
+ Handle<JSFunction> function =
+ Factory::NewFunctionBoilerplate(node->name(),
+ node->materialized_literal_count(),
+ code);
+ CodeGenerator::SetFunctionInfo(function, node->num_parameters(),
+ node->function_token_position(),
+ node->start_position(), node->end_position(),
+ node->is_expression(), false, script_);
+
+ // Notify debugger that a new function has been added.
+ Debugger::OnNewFunction(function);
+
+ // Set the expected number of properties for instances and return
+ // the resulting function.
+ SetExpectedNofPropertiesFromEstimate(function,
+ node->expected_property_count());
+ return function;
+}
+
+
+Handle<Code> CodeGenerator::ComputeCallInitialize(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc), Code);
+}
+
+
+void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
+ int length = declarations->length();
+ int globals = 0;
+ for (int i = 0; i < length; i++) {
+ Declaration* node = declarations->at(i);
+ Variable* var = node->proxy()->var();
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile
+ // time, we need to "declare" it at runtime to make sure it
+ // actually exists in the local context.
+ if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+ VisitDeclaration(node);
+ } else {
+ // Count global variables and functions for later processing
+ globals++;
+ }
+ }
+
+ // Return in case of no declared global functions or variables.
+ if (globals == 0) return;
+
+ // Compute array of global variable and function declarations.
+ Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ for (int j = 0, i = 0; i < length; i++) {
+ Declaration* node = declarations->at(i);
+ Variable* var = node->proxy()->var();
+ Slot* slot = var->slot();
+
+ if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+ // Skip - already processed.
+ } else {
+ array->set(j++, *(var->name()));
+ if (node->fun() == NULL) {
+ if (var->mode() == Variable::CONST) {
+ // In case this is const property use the hole.
+ array->set_the_hole(j++);
+ } else {
+ array->set_undefined(j++);
+ }
+ } else {
+ array->set(j++, *BuildBoilerplate(node->fun()));
+ }
+ }
+ }
+
+ // Invoke the platform-dependent code generator to do the actual
+ // declaration the global variables and functions.
+ DeclareGlobals(array);
+}
+
+
+struct InlineRuntimeLUT {
+ void (CodeGenerator::*method)(ZoneList<Expression*>*);
+ const char* name;
+};
+
+
+bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
+ ZoneList<Expression*>* args = node->arguments();
+ // Special cases: These 'runtime calls' manipulate the current
+ // frame and are only used 1 or two places, so we generate them
+ // inline instead of generating calls to them. They are used
+ // for implementing Function.prototype.call() and
+ // Function.prototype.apply().
+ static const InlineRuntimeLUT kInlineRuntimeLUT[] = {
+ {&v8::internal::CodeGenerator::GenerateShiftDownAndTailCall,
+ "_ShiftDownAndTailCall"},
+ {&v8::internal::CodeGenerator::GenerateSetThisFunction,
+ "_SetThisFunction"},
+ {&v8::internal::CodeGenerator::GenerateGetThisFunction,
+ "_GetThisFunction"},
+ {&v8::internal::CodeGenerator::GenerateSetThis,
+ "_SetThis"},
+ {&v8::internal::CodeGenerator::GenerateGetArgumentsLength,
+ "_GetArgumentsLength"},
+ {&v8::internal::CodeGenerator::GenerateSetArgumentsLength,
+ "_SetArgumentsLength"},
+ {&v8::internal::CodeGenerator::GenerateTailCallWithArguments,
+ "_TailCallWithArguments"},
+ {&v8::internal::CodeGenerator::GenerateSetArgument,
+ "_SetArgument"},
+ {&v8::internal::CodeGenerator::GenerateSquashFrame,
+ "_SquashFrame"},
+ {&v8::internal::CodeGenerator::GenerateExpandFrame,
+ "_ExpandFrame"},
+ {&v8::internal::CodeGenerator::GenerateIsSmi,
+ "_IsSmi"},
+ {&v8::internal::CodeGenerator::GenerateIsArray,
+ "_IsArray"},
+ {&v8::internal::CodeGenerator::GenerateArgumentsLength,
+ "_ArgumentsLength"},
+ {&v8::internal::CodeGenerator::GenerateArgumentsAccess,
+ "_Arguments"},
+ {&v8::internal::CodeGenerator::GenerateValueOf,
+ "_ValueOf"},
+ {&v8::internal::CodeGenerator::GenerateSetValueOf,
+ "_SetValueOf"}
+ };
+ if (node->name()->length() > 0 && node->name()->Get(0) == '_') {
+ for (unsigned i = 0;
+ i < sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
+ i++) {
+ const InlineRuntimeLUT* entry = kInlineRuntimeLUT + i;
+ if (node->name()->IsEqualTo(CStrVector(entry->name))) {
+ ((*this).*(entry->method))(args);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+const char* RuntimeStub::GetName() {
+ return Runtime::FunctionForId(id_)->stub_name;
+}
+
+
+void RuntimeStub::Generate(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::FunctionForId((Runtime::FunctionId)id_));
+}
+
+
+const char* GenericOpStub::GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericOpStub_ADD";
+ case Token::SUB: return "GenericOpStub_SUB";
+ case Token::MUL: return "GenericOpStub_MUL";
+ case Token::DIV: return "GenericOpStub_DIV";
+ default: return "GenericOpStub";
+ }
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODEGEN_H_
+
+#include "ast.h"
+#include "code-stubs.h"
+#include "runtime.h"
+
+#define V8_CODEGEN_H_
+
+namespace v8 { namespace internal {
+
+
+// Use lazy compilation; defaults to true.
+// NOTE: Do not remove non-lazy compilation until we can properly
+// install extensions with lazy compilation enabled. At the
+// moment, this doesn't work for the extensions in Google3,
+// and we can only run the tests with --nolazy.
+DECLARE_bool(lazy);
+
+
+// Forward declaration.
+class CodeGenerator;
+
+
+// Deferred code objects are small pieces of code that are compiled
+// out of line. They are used to defer the compilation of uncommon
+// paths thereby avoiding expensive jumps around uncommon code parts.
+class DeferredCode: public ZoneObject {
+ public:
+ explicit DeferredCode(CodeGenerator* generator);
+ virtual ~DeferredCode() { }
+
+ virtual void Generate() = 0;
+
+ MacroAssembler* masm() const { return masm_; }
+ CodeGenerator* generator() const { return generator_; }
+
+ Label* enter() { return &enter_; }
+ Label* exit() { return &exit_; }
+
+ int position() const { return position_; }
+ bool position_is_statement() const { return position_is_statement_; }
+
+#ifdef DEBUG
+ void set_comment(const char* comment) { comment_ = comment; }
+ const char* comment() const { return comment_; }
+#else
+ inline void set_comment(const char* comment) { }
+ const char* comment() const { return ""; }
+#endif
+
+ protected:
+ // The masm_ field is manipulated when compiling stubs with the
+ // BEGIN_STUB and END_STUB macros. For that reason, it cannot be
+ // constant.
+ MacroAssembler* masm_;
+
+ private:
+ CodeGenerator* const generator_;
+ Label enter_;
+ Label exit_;
+ int position_;
+ bool position_is_statement_;
+#ifdef DEBUG
+ const char* comment_;
+#endif
+ DISALLOW_EVIL_CONSTRUCTORS(DeferredCode);
+};
+
+
+// A superclass for gode generators. The implementations of methods
+// declared in this class are partially in codegen.c and partially in
+// codegen_<arch>.c.
+class CodeGenerator: public Visitor {
+ public:
+ CodeGenerator(bool is_eval,
+ Handle<Script> script)
+ : is_eval_(is_eval),
+ script_(script),
+ deferred_(8) { }
+
+
+ // The code generator: Takes a function literal, generates code for it,
+ // and assembles it all into a Code* object. This function should only
+ // be called by compiler.cc.
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ int length,
+ int function_token_position,
+ int start_position,
+ int end_position,
+ bool is_expression,
+ bool is_toplevel,
+ Handle<Script> script);
+
+ virtual MacroAssembler* masm() = 0;
+
+
+ void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+ void ProcessDeferred();
+
+ // Accessors for is_eval.
+ bool is_eval() { return is_eval_; }
+
+ // Abstract node visitors.
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) = 0;
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ protected:
+ bool CheckForInlineRuntimeCall(CallRuntime* node);
+ Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+ void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+ Handle<Code> ComputeCallInitialize(int argc);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ virtual void DeclareGlobals(Handle<FixedArray> pairs) = 0;
+
+ virtual void GenerateShiftDownAndTailCall(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateSetThisFunction(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateGetThisFunction(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateSetThis(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateGetArgumentsLength(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateSetArgumentsLength(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateTailCallWithArguments(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateSetArgument(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateSquashFrame(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateExpandFrame(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateIsSmi(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateIsArray(ZoneList<Expression*>* args) = 0;
+
+ // Support for arguments.length and arguments[?].
+ virtual void GenerateArgumentsLength(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateArgumentsAccess(ZoneList<Expression*>* args) = 0;
+
+ // Support for accessing the value field of an object (used by Date).
+ virtual void GenerateValueOf(ZoneList<Expression*>* args) = 0;
+ virtual void GenerateSetValueOf(ZoneList<Expression*>* args) = 0;
+
+ private:
+ bool is_eval_; // Tells whether code is generated for eval.
+ Handle<Script> script_;
+ List<DeferredCode*> deferred_;
+};
+
+
+// RuntimeStub models code stubs calling entrypoints in the Runtime class.
+class RuntimeStub : public CodeStub {
+ public:
+ explicit RuntimeStub(Runtime::FunctionId id) : id_(id) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Runtime::FunctionId id_;
+
+ Major MajorKey() { return Runtime; }
+ int MinorKey() { return id_; }
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RuntimeStub (id %s)\n", Runtime::FunctionForId(id_)->name);
+ }
+#endif
+};
+
+
+class GenericOpStub : public CodeStub {
+ public:
+ explicit GenericOpStub(Token::Value op) : op_(op) { }
+
+ private:
+ Token::Value op_;
+
+ Major MajorKey() { return GenericOp; }
+ int MinorKey() { return static_cast<int>(op_); }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() { PrintF("GenericOpStub (token %s)\n", Token::String(op_)); }
+#endif
+};
+
+
+class StackCheckStub : public CodeStub {
+ public:
+ StackCheckStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+
+ const char* GetName() { return "StackCheckStub"; }
+
+ Major MajorKey() { return StackCheck; }
+ int MinorKey() { return 0; }
+};
+
+
+class UnarySubStub : public CodeStub {
+ public:
+ UnarySubStub() { }
+
+ private:
+ Major MajorKey() { return UnarySub; }
+ int MinorKey() { return 0; }
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "UnarySubStub"; }
+};
+
+
+class CEntryStub : public CodeStub {
+ public:
+ CEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+
+ protected:
+ void GenerateBody(MacroAssembler* masm, bool is_debug_break);
+ void GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc, bool do_restore);
+ void GenerateThrowTOS(MacroAssembler* masm);
+ void GenerateThrowOutOfMemory(MacroAssembler* masm);
+ void GenerateReserveCParameterSpace(MacroAssembler* masm, int num_parameters);
+
+ private:
+ Major MajorKey() { return CEntry; }
+ int MinorKey() { return 0; }
+
+ const char* GetName() { return "CEntryStub"; }
+};
+
+
+class CEntryDebugBreakStub : public CEntryStub {
+ public:
+ CEntryDebugBreakStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+
+ private:
+ int MinorKey() { return 1; }
+
+ const char* GetName() { return "CEntryDebugBreakStub"; }
+};
+
+
+
+class JSEntryStub : public CodeStub {
+ public:
+ JSEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+
+ protected:
+ void GenerateBody(MacroAssembler* masm, bool is_construct);
+
+ private:
+ Major MajorKey() { return JSEntry; }
+ int MinorKey() { return 0; }
+
+ const char* GetName() { return "JSEntryStub"; }
+};
+
+
+class JSConstructEntryStub : public JSEntryStub {
+ public:
+ JSConstructEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+
+ private:
+ int MinorKey() { return 1; }
+
+ const char* GetName() { return "JSConstructEntryStub"; }
+};
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "scopes.h"
+#include "rewriter.h"
+#include "usage-analyzer.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(strict, false, "strict error checking");
+DEFINE_int(min_preparse_length, 1024,
+ "Minimum length for automatic enable preparsing");
+DECLARE_bool(debug_info);
+
+#ifdef DEBUG
+DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins");
+DEFINE_bool(print_scopes, false, "print scopes");
+#endif
+
+
+// Helper class to keep track of compilation nesting and to do proper
+// cleanups of generated ASTs.
+class CompilationTracker BASE_EMBEDDED {
+ public:
+ CompilationTracker() {
+ ++nesting_;
+ }
+
+ ~CompilationTracker() {
+ // If we're leaving the top-level compilation, we must make sure
+ // to get rid of all generated ASTs.
+ if (--nesting_ == 0) Zone::DeleteAll();
+ }
+
+ private:
+ static int nesting_;
+};
+
+
+int CompilationTracker::nesting_ = 0;
+
+
+static Handle<Code> MakeCode(FunctionLiteral* literal,
+ Handle<Script> script,
+ bool is_eval) {
+ ASSERT(literal != NULL);
+
+ // Rewrite the AST by introducing .result assignments where needed.
+ if (!Rewriter::Process(literal) || !AnalyzeVariableUsage(literal)) {
+ Top::StackOverflow();
+ return Handle<Code>::null();
+ }
+
+ // Compute top scope and allocate variables. For lazy compilation
+ // the top scope only contains the single lazily compiled function,
+ // so this doesn't re-allocate variables repeatedly.
+ Scope* top = literal->scope();
+ while (top->outer_scope() != NULL) top = top->outer_scope();
+ top->AllocateVariables();
+
+#ifdef DEBUG
+ if (Bootstrapper::IsActive() ?
+ FLAG_print_builtin_scopes :
+ FLAG_print_scopes) {
+ literal->scope()->Print();
+ }
+#endif
+
+ // Generate code and return it.
+ Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
+ return result;
+}
+
+
+static Handle<JSFunction> MakeFunction(bool is_global,
+ bool is_eval,
+ Handle<Script> script,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data) {
+ CompilationTracker tracker;
+
+ // Make sure we have an initial stack limit.
+ StackGuard guard;
+ StackGuard::DisableInterrupts();
+
+ // Notify debugger
+ Debugger::OnBeforeCompile(script);
+
+ // Only allow non-global compiles for eval.
+ ASSERT(is_eval || is_global);
+
+ // Build AST.
+ FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
+
+ // Measure how long it takes to do the compilation; only take the
+ // rest of the function into account to avoid overlap with the
+ // parsing statistics.
+ StatsRate* rate = is_eval
+ ? &Counters::compile_eval
+ : &Counters::compile;
+ StatsRateScope timer(rate);
+
+ // Compile the code.
+ Handle<Code> code = Handle<Code>::null();
+ if (lit != NULL) code = MakeCode(lit, script, is_eval);
+
+ // Check for stack overflow.
+ if (code.is_null()) {
+ ASSERT(Top::has_pending_exception());
+ StackGuard::EnableInterrupts();
+ return Handle<JSFunction>::null();
+ }
+
+ if (script->name()->IsString()) {
+ SmartPointer<char> data =
+ String::cast(script->name())->ToCString(DISALLOW_NULLS);
+ LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, *data));
+ } else {
+ LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, ""));
+ }
+
+ // Allocate function.
+ Handle<JSFunction> fun =
+ Factory::NewFunctionBoilerplate(lit->name(),
+ lit->materialized_literal_count(),
+ code);
+
+ CodeGenerator::SetFunctionInfo(fun, lit->scope()->num_parameters(),
+ kNoPosition,
+ lit->start_position(), lit->end_position(),
+ lit->is_expression(), true, script);
+
+ // Hint to the runtime system used when allocating space for initial
+ // property space by setting the expected number of properties for
+ // the instances of the function.
+ SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count());
+
+ StackGuard::EnableInterrupts();
+
+ // Notify debugger
+ Debugger::OnAfterCompile(script, fun);
+
+ return fun;
+}
+
+
+static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
+
+
+Handle<JSFunction> Compiler::Compile(Handle<String> source,
+ Handle<String> script_name,
+ int line_offset, int column_offset,
+ v8::Extension* extension,
+ ScriptDataImpl* input_pre_data) {
+ Counters::total_load_size.Increment(source->length());
+ Counters::total_compile_size.Increment(source->length());
+
+ // The VM is in the COMPILER state until exiting this function.
+ VMState state(COMPILER);
+
+ ScriptDataImpl* pre_data = input_pre_data;
+ if (pre_data == NULL && source->length() >= FLAG_min_preparse_length) {
+ Access<SafeStringInputBuffer> buf(&safe_string_input_buffer);
+ buf->Reset(source.location());
+ pre_data = PreParse(buf.value(), extension);
+ }
+
+ // Create a script object describing the script to be compiled.
+ Handle<Script> script = Factory::NewScript(source);
+ if (!script_name.is_null()) {
+ script->set_name(*script_name);
+ script->set_line_offset(Smi::FromInt(line_offset));
+ script->set_column_offset(Smi::FromInt(column_offset));
+ }
+
+ Handle<JSFunction> result =
+ MakeFunction(true, false, script, extension, pre_data);
+
+ if (input_pre_data == NULL && pre_data != NULL)
+ delete pre_data;
+
+ return result;
+}
+
+
+Handle<JSFunction> Compiler::CompileEval(bool is_global,
+ Handle<String> source) {
+ Counters::total_eval_size.Increment(source->length());
+ Counters::total_compile_size.Increment(source->length());
+
+ // The VM is in the COMPILER state until exiting this function.
+ VMState state(COMPILER);
+
+ // Create a script object describing the script to be compiled.
+ Handle<Script> script = Factory::NewScript(source);
+ return MakeFunction(is_global, true, script, NULL, NULL);
+}
+
+
+bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared) {
+ CompilationTracker tracker;
+
+ // The VM is in the COMPILER state until exiting this function.
+ VMState state(COMPILER);
+
+ // Make sure we have an initial stack limit.
+ StackGuard guard;
+ StackGuard::DisableInterrupts();
+
+ // Compute name, source code and script data.
+ Handle<String> name(String::cast(shared->name()));
+ Handle<Script> script(Script::cast(shared->script()));
+
+ int start_position = shared->start_position();
+ int end_position = shared->end_position();
+ bool is_expression = shared->is_expression();
+ Counters::total_compile_size.Increment(end_position - start_position);
+
+ // Generate the AST for the lazily compiled function. The AST may be
+ // NULL in case of parser stack overflow.
+ FunctionLiteral* lit = MakeLazyAST(script, name,
+ start_position,
+ end_position,
+ is_expression);
+
+ // Measure how long it takes to do the lazy compilation; only take
+ // the rest of the function into account to avoid overlap with the
+ // lazy parsing statistics.
+ StatsRateScope timer(&Counters::compile_lazy);
+
+ // Compile the code (if we have a syntax tree).
+ Handle<Code> code = Handle<Code>::null();
+ if (lit != NULL) code = MakeCode(lit, script, false);
+
+ // Check for stack-overflow during compilation.
+ if (code.is_null()) {
+ ASSERT(Top::has_pending_exception());
+ StackGuard::EnableInterrupts();
+ return false;
+ }
+
+ // Generate the code, update the function info, and return the code.
+ LOG(CodeCreateEvent("LazyCompile", *code, *lit->name()));
+
+ // Update the shared function info with the compiled code.
+ shared->set_code(*code);
+
+ // Set the expected number of properties for instances.
+ SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
+
+ // Check the function has compiled code.
+ ASSERT(shared->is_compiled());
+ StackGuard::EnableInterrupts();
+ return true;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILER_H_
+#define V8_COMPILER_H_
+
+#include "parser.h"
+
+namespace v8 { namespace internal {
+
+// The V8 compiler
+//
+// General strategy: Source code is translated into an anonymous function w/o
+// parameters which then can be executed. If the source code contains other
+// functions, they will be compiled and allocated as part of the compilation
+// of the source code.
+
+// Please note this interface returns function boilerplates.
+// This means you need to call Factory::NewFunctionFromBoilerplate
+// before you have a real function with context.
+
+class Compiler : public AllStatic {
+ public:
+ // All routines return a JSFunction.
+ // If an error occurs an exception is raised and
+ // the return handle contains NULL.
+
+ // Compile a String source within a context.
+ static Handle<JSFunction> Compile(Handle<String> source,
+ Handle<String> script_name,
+ int line_offset, int column_offset,
+ v8::Extension* extension,
+ ScriptDataImpl* script_Data);
+
+ // Compile a String source within a context for Eval.
+ static Handle<JSFunction> CompileEval(bool is_global, Handle<String> source);
+
+ // Compile from function info (used for lazy compilation). Returns
+ // true on success and false if the compilation resulted in a stack
+ // overflow.
+ static bool CompileLazy(Handle<SharedFunctionInfo> shared);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_COMPILER_H_
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONSTANTS_ARM_H_
+#define V8_CONSTANTS_ARM_H_
+
+namespace assembler { namespace arm {
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+typedef unsigned char byte;
+
+enum Condition {
+ no_condition = -1,
+ EQ = 0,
+ NE = 1,
+ CS = 2,
+ CC = 3,
+ MI = 4,
+ PL = 5,
+ VS = 6,
+ VC = 7,
+ HI = 8,
+ LS = 9,
+ GE = 10,
+ LT = 11,
+ GT = 12,
+ LE = 13,
+ AL = 14,
+ special_condition = 15
+};
+
+
+enum Opcode {
+ no_operand = -1,
+ AND = 0,
+ EOR = 1,
+ SUB = 2,
+ RSB = 3,
+ ADD = 4,
+ ADC = 5,
+ SBC = 6,
+ RSC = 7,
+ TST = 8,
+ TEQ = 9,
+ CMP = 10,
+ CMN = 11,
+ ORR = 12,
+ MOV = 13,
+ BIC = 14,
+ MVN = 15
+};
+
+
+enum Shift {
+ no_shift = -1,
+ LSL = 0,
+ LSR = 1,
+ ASR = 2,
+ ROR = 3
+};
+
+
+enum SoftwareInterruptCodes {
+ // transition to C code
+ call_rt_r5 = 0x10,
+ call_rt_r2 = 0x11,
+ // break point
+ break_point = 0x20
+};
+
+
+typedef int32_t instr_t;
+
+
+// The class Instr enables access to individual fields defined in the ARM
+// architecture.
+class Instr {
+ public:
+ enum {
+ kInstrSize = 4,
+ kPCReadOffset = 8
+ };
+
+ // Get the raw instruction bits
+ inline instr_t InstructionBits() const {
+ return *reinterpret_cast<const instr_t*>(this);
+ }
+
+ inline void SetInstructionBits(instr_t value) {
+ *reinterpret_cast<instr_t*>(this) = value;
+ }
+
+ inline int Bit(int nr) const {
+ return (InstructionBits() >> nr) & 1;
+ }
+
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // Generally applicable fields
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(Bits(31, 28));
+ }
+ inline int TypeField() const { return Bits(27, 25); }
+
+ inline int RnField() const { return Bits(19, 16); }
+ inline int RdField() const { return Bits(15, 12); }
+
+ // Fields used in Data processing instructions
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(Bits(24, 21));
+ }
+ inline int SField() const { return Bit(20); }
+ // with register
+ inline int RmField() const { return Bits(3, 0); }
+ inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
+ inline int RegShiftField() const { return Bit(4); }
+ inline int RsField() const { return Bits(11, 8); }
+ inline int ShiftAmountField() const { return Bits(11, 7); }
+ // with immediate
+ inline int RotateField() const { return Bits(11, 8); }
+ inline int Immed8Field() const { return Bits(7, 0); }
+
+ // Fields used in Load/Store instructions
+ inline int PUField() const { return Bits(24, 23); }
+ inline int BField() const { return Bit(22); }
+ inline int WField() const { return Bit(21); }
+ inline int LField() const { return Bit(20); }
+ // with register uses same fields as Data processing instructions above
+ // with immediate
+ inline int Offset12Field() const { return Bits(11, 0); }
+ // multiple
+ inline int RlistField() const { return Bits(15, 0); }
+ // extra loads and stores
+ inline int SignField() const { return Bit(6); }
+ inline int HField() const { return Bit(5); }
+ inline int ImmedHField() const { return Bits(11, 8); }
+ inline int ImmedLField() const { return Bits(3, 0); }
+
+ // Fields used in Branch instructions
+ inline int LinkField() const { return Bit(24); }
+ inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+
+ // Fields used in Software interrupt instructions
+ inline SoftwareInterruptCodes SwiField() const {
+ return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+ }
+
+ // Test for special encodings of type 0 instructions (extra loads and stores,
+ // as well as multiplications).
+ inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
+
+ // Special accessors that test for existence of a value.
+ inline bool HasS() const { return SField() == 1; }
+ inline bool HasB() const { return BField() == 1; }
+ inline bool HasW() const { return WField() == 1; }
+ inline bool HasL() const { return LField() == 1; }
+ inline bool HasSign() const { return SignField() == 1; }
+ inline bool HasH() const { return HField() == 1; }
+ inline bool HasLink() const { return LinkField() == 1; }
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instr.
+ // Use the At(pc) function to create references to Instr.
+ static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+ // We need to prevent the creation of instances of class Instr.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+
+} } // namespace assembler::arm
+
+#endif // V8_CONSTANTS_ARM_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "debug.h"
+#include "scopeinfo.h"
+
+namespace v8 { namespace internal {
+
+
+#ifdef DEBUG
+DEFINE_bool(trace_contexts, false, "trace contexts operations");
+#else
+#define FLAG_trace_contexts false
+#endif
+
+
+JSBuiltinsObject* Context::builtins() {
+ GlobalObject* object = global();
+ if (object->IsJSGlobalObject()) {
+ return JSGlobalObject::cast(object)->builtins();
+ } else {
+ ASSERT(object->IsJSBuiltinsObject());
+ return JSBuiltinsObject::cast(object);
+ }
+}
+
+
+Context* Context::global_context() {
+ // Fast case: the global object for this context has been set. In
+ // that case, the global object has a direct pointer to the global
+ // context.
+ if (global()->IsGlobalObject()) {
+ return global()->global_context();
+ }
+ // During bootstrapping, the global object might not be set and we
+ // have to search the context chain to find the global context.
+ Context* current = this;
+ while (!current->IsGlobalContext()) {
+ current = Context::cast(JSFunction::cast(current->closure())->context());
+ }
+ return current;
+}
+
+
+Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
+ int* index_, PropertyAttributes* attributes) {
+ Handle<Context> context(this);
+
+ // The context must be in frame slot 0 (if not debugging).
+ if (kDebug && !Debug::InDebugger()) {
+ StackFrameLocator locator;
+ ASSERT(context->fcontext() ==
+ Context::cast(
+ locator.FindJavaScriptFrame(0)->context())->fcontext());
+ }
+
+ bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
+ *index_ = -1;
+ *attributes = ABSENT;
+
+ if (FLAG_trace_contexts) {
+ PrintF("Context::Lookup(");
+ name->ShortPrint();
+ PrintF(")\n");
+ }
+
+ do {
+ if (FLAG_trace_contexts) {
+ PrintF(" - looking in context %p", *context);
+ if (context->IsGlobalContext()) PrintF(" (global context)");
+ PrintF("\n");
+ }
+
+ // check extension/with object
+ Handle<JSObject> context_ext(context->extension());
+ if (*context_ext != NULL) {
+ if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
+ *attributes = context_ext->GetLocalPropertyAttribute(*name);
+ } else {
+ *attributes = context_ext->GetPropertyAttribute(*name);
+ }
+ if (*attributes != ABSENT) {
+ // property found
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in context object %p\n", *context_ext);
+ }
+ return context_ext;
+ }
+ }
+
+ if (context->is_function_context()) {
+ // we have context-local slots
+
+ // check non-parameter locals in context
+ Handle<Code> code(context->closure()->code());
+ Variable::Mode mode;
+ int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+ ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
+ if (index >= 0) {
+ // slot found
+ if (FLAG_trace_contexts) {
+ PrintF("=> found local in context slot %d (mode = %d)\n",
+ index, mode);
+ }
+ *index_ = index;
+ // Note: Fixed context slots are statically allocated by the compiler.
+ // Statically allocated variables always have a statically known mode,
+ // which is the mode with which they were declared when added to the
+ // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
+ // declared variables that were introduced through declaration nodes)
+ // must not appear here.
+ switch (mode) {
+ case Variable::INTERNAL : // fall through
+ case Variable::VAR : *attributes = NONE; break;
+ case Variable::CONST : *attributes = READ_ONLY; break;
+ case Variable::DYNAMIC : UNREACHABLE(); break;
+ case Variable::TEMPORARY: UNREACHABLE(); break;
+ }
+ return context;
+ }
+
+ // check parameter locals in context
+ int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+ if (param_index >= 0) {
+ // slot found
+ int index =
+ ScopeInfo<>::ContextSlotIndex(*code,
+ Heap::arguments_shadow_symbol(),
+ NULL);
+ ASSERT(index >= 0); // arguments must exist and be in the heap context
+ Handle<JSObject> arguments(JSObject::cast(context->get(index)));
+ ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
+ if (FLAG_trace_contexts) {
+ PrintF("=> found parameter %d in arguments object\n", param_index);
+ }
+ *index_ = param_index;
+ *attributes = NONE;
+ return arguments;
+ }
+
+ // check intermediate context (holding only the function name variable)
+ if (follow_context_chain) {
+ int index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+ if (index >= 0) {
+ // slot found
+ if (FLAG_trace_contexts) {
+ PrintF("=> found intermediate function in context slot %d\n",
+ index);
+ }
+ *index_ = index;
+ *attributes = READ_ONLY;
+ return context;
+ }
+ }
+ }
+
+ // proceed with enclosing context
+ if (context->IsGlobalContext()) {
+ follow_context_chain = false;
+ } else if (context->previous() != NULL) {
+ context = Handle<Context>(context->previous());
+ } else {
+ ASSERT(context->is_function_context());
+ context = Handle<Context>(Context::cast(context->closure()->context()));
+ }
+ } while (follow_context_chain);
+
+ // slot not found
+ if (FLAG_trace_contexts) {
+ PrintF("=> no property/slot found\n");
+ }
+ return Handle<Object>(reinterpret_cast<JSObject*>(NULL));
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONTEXTS_H_
+#define V8_CONTEXTS_H_
+
+namespace v8 { namespace internal {
+
+
+enum ContextLookupFlags {
+ FOLLOW_CONTEXT_CHAIN = 1,
+ FOLLOW_PROTOTYPE_CHAIN = 2,
+
+ DONT_FOLLOW_CHAINS = 0,
+ FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
+};
+
+
+// Heap-allocated activation contexts.
+//
+// Contexts are implemented as FixedArray objects; the Context
+// class is a convenience interface casted on a FixedArray object.
+//
+// Note: Context must have no virtual functions and Context objects
+// must always be allocated via Heap::AllocateContext() or
+// Factory::NewContext.
+
+// Comment for special_function_table:
+// Table for providing optimized/specialized functions.
+// The array contains triplets [object, general_function, optimized_function].
+// Primarily added to support built-in optimized variants of
+// Array.prototype.{push,pop}.
+
+#define GLOBAL_CONTEXT_FIELDS(V) \
+ V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
+ V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
+ V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
+ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
+ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
+ V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
+ V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
+ V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
+ V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
+ V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \
+ V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
+ V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
+ V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
+ V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
+ V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
+ V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
+ V(FUNCTION_MAP_INDEX, Map, function_map) \
+ V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
+ V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
+ V(SPECIAL_FUNCTION_TABLE_INDEX, FixedArray, special_function_table) \
+ V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+ V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
+ V(DEBUG_EVENT_LISTENERS_INDEX, JSObject, debug_event_listeners) \
+ V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
+ V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
+ V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
+ V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
+ V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
+ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+ V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
+ V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
+ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
+ V(OUT_OF_MEMORY_INDEX, Object, out_of_memory)
+
+// JSFunctions are pairs (context, function code), sometimes also called
+// closures. A Context object is used to represent function contexts and
+// dynamically pushed 'with' contexts (or 'scopes' in ECMA-262 speak).
+//
+// At runtime, the contexts build a stack in parallel to the execution
+// stack, with the top-most context being the current context. All contexts
+// have the following slots:
+//
+// [ closure ] This is the current function. It is the same for all
+// contexts inside a function. It provides access to the
+// incoming context (i.e., the outer context, which may
+// or may not become the current function's context), and
+// it provides access to the functions code and thus it's
+// scope information, which in turn contains the names of
+// statically allocated context slots. The names are needed
+// for dynamic lookups in the presence of 'with' or 'eval'.
+//
+// [ fcontext ] A pointer to the innermost enclosing function context.
+// It is the same for all contexts *allocated* inside a
+// function, and the function context's fcontext points
+// to itself. It is only needed for fast access of the
+// function context (used for declarations, and static
+// context slot access).
+//
+// [ previous ] A pointer to the previous context. It is NULL for
+// function contexts, and non-NULL for 'with' contexts.
+// Used to implement the 'with' statement.
+//
+// [ extension ] A pointer to an extension JSObject, or NULL. Used to
+// implement 'with' statements and dynamic declarations
+// (through 'eval'). The object in a 'with' statement is
+// stored in the extension slot of a 'with' context.
+// Dynamically declared variables/functions are also added
+// to lazily allocated extension object. Context::Lookup
+// searches the extension object for properties.
+//
+// [ global ] A pointer to the global object. Provided for quick
+// access to the global object from inside the code (since
+// we always have a context pointer).
+//
+// In addition, function contexts may have statically allocated context slots
+// to store local variables/functions that are accessed from inner functions
+// (via static context addresses) or through 'eval' (dynamic context lookups).
+// Finally, the global context contains additional slots for fast access to
+// global properties.
+//
+// We may be able to simplify the implementation:
+//
+// - We may be able to get rid of 'fcontext': We can always use the fact that
+// previous == NULL for function contexts and so we can search for them. They
+// are only needed when doing dynamic declarations, and the context chains
+// tend to be very very short (depth of nesting of 'with' statements). At
+// the moment we also use it in generated code for context slot accesses -
+// and there we don't want a loop because of code bloat - but we may not
+// need it there after all (see comment in codegen_*.cc).
+//
+// - If we cannot get rid of fcontext, consider making 'previous' never NULL
+// except for the global context. This could simplify Context::Lookup.
+
+class Context: public FixedArray {
+ public:
+ // Conversions.
+ static Context* cast(Object* context) {
+ ASSERT(context->IsContext());
+ return reinterpret_cast<Context*>(context);
+ }
+
+ // The default context slot layout; indices are FixedArray slot indices.
+ enum {
+ // These slots are in all contexts.
+ CLOSURE_INDEX,
+ FCONTEXT_INDEX,
+ PREVIOUS_INDEX,
+ EXTENSION_INDEX,
+ GLOBAL_INDEX,
+ MIN_CONTEXT_SLOTS,
+
+ // These slots are only in global contexts.
+ ARGUMENTS_BOILERPLATE_INDEX = MIN_CONTEXT_SLOTS,
+ JS_ARRAY_MAP_INDEX,
+ FUNCTION_MAP_INDEX,
+ FUNCTION_INSTANCE_MAP_INDEX,
+ INITIAL_OBJECT_PROTOTYPE_INDEX,
+ BOOLEAN_FUNCTION_INDEX,
+ NUMBER_FUNCTION_INDEX,
+ STRING_FUNCTION_INDEX,
+ OBJECT_FUNCTION_INDEX,
+ ARRAY_FUNCTION_INDEX,
+ DATE_FUNCTION_INDEX,
+ REGEXP_FUNCTION_INDEX,
+ CREATE_DATE_FUN_INDEX,
+ TO_NUMBER_FUN_INDEX,
+ TO_STRING_FUN_INDEX,
+ TO_DETAIL_STRING_FUN_INDEX,
+ TO_OBJECT_FUN_INDEX,
+ TO_INTEGER_FUN_INDEX,
+ TO_UINT32_FUN_INDEX,
+ TO_INT32_FUN_INDEX,
+ TO_BOOLEAN_FUN_INDEX,
+ INSTANTIATE_FUN_INDEX,
+ CONFIGURE_INSTANCE_FUN_INDEX,
+ SPECIAL_FUNCTION_TABLE_INDEX,
+ MESSAGE_LISTENERS_INDEX,
+ DEBUG_EVENT_LISTENERS_INDEX,
+ MAKE_MESSAGE_FUN_INDEX,
+ GET_STACK_TRACE_LINE_INDEX,
+ CONFIGURE_GLOBAL_INDEX,
+ FUNCTION_CACHE_INDEX,
+ RUNTIME_CONTEXT_INDEX,
+ CALL_AS_FUNCTION_DELEGATE_INDEX,
+ EMPTY_SCRIPT_INDEX,
+ SCRIPT_FUNCTION_INDEX,
+ CONTEXT_EXTENSION_FUNCTION_INDEX,
+ OUT_OF_MEMORY_INDEX,
+ GLOBAL_CONTEXT_SLOTS
+ };
+
+ // Direct slot access.
+ JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
+ void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
+
+ Context* fcontext() {
+ return reinterpret_cast<Context*>(get(FCONTEXT_INDEX));
+ }
+ void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
+
+ Context* previous() {
+ return reinterpret_cast<Context*>(get(PREVIOUS_INDEX));
+ }
+ void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
+
+ JSObject* extension() {
+ return reinterpret_cast<JSObject*>(get(EXTENSION_INDEX));
+ }
+ void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
+
+ GlobalObject* global() {
+ return reinterpret_cast<GlobalObject*>(get(GLOBAL_INDEX));
+ }
+ void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
+
+ // The builtins object.
+ JSBuiltinsObject* builtins();
+
+ // Compute the global context by traversing the context chain.
+ Context* global_context();
+
+ // Tells if this is a function context (as opposed to a 'with' context).
+ bool is_function_context() { return previous() == NULL; }
+
+ // Tells whether the global context is marked with out of memory.
+ bool has_out_of_memory() {
+ return global_context()->out_of_memory() == Heap::true_value();
+ }
+
+ // Mark the global context with out of memory.
+ void mark_out_of_memory() {
+ global_context()->set_out_of_memory(Heap::true_value());
+ }
+
+#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+ void set_##name(type* value) { \
+ ASSERT(IsGlobalContext()); \
+ set(index, value); \
+ } \
+ type* name() { \
+ ASSERT(IsGlobalContext()); \
+ return type::cast(get(index)); \
+ }
+ GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
+#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
+
+ // Lookup the the slot called name, starting with the current context.
+ // There are 4 possible outcomes:
+ //
+ // 1) index_ >= 0 && result->IsContext():
+ // most common case, the result is a Context, and index is the
+ // context slot index, and the slot exists.
+ // attributes == READ_ONLY for the function name variable, NONE otherwise.
+ //
+ // 2) index_ >= 0 && result->IsJSObject():
+ // the result is the JSObject arguments object, the index is the parameter
+ // index, i.e., key into the arguments object, and the property exists.
+ // attributes != ABSENT.
+ //
+ // 3) index_ < 0 && result->IsJSObject():
+ // the result is the JSObject extension context or the global object,
+ // and the name is the property name, and the property exists.
+ // attributes != ABSENT.
+ //
+ // 4) index_ < 0 && result.deref() == NULL:
+ // there was no context found with the corresponding property.
+ // attributes == ABSENT.
+ Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
+ int* index_, PropertyAttributes* attributes);
+
+ // Code generation support.
+ static int SlotOffset(int index) {
+ return kHeaderSize + index * kPointerSize - kHeapObjectTag;
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_CONTEXTS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONVERSIONS_INL_H_
+#define V8_CONVERSIONS_INL_H_
+
+#include <math.h>
+#include <float.h> // required for DBL_MAX and on Win32 for finite()
+
+// ----------------------------------------------------------------------------
+// Extra POSIX/ANSI functions for Win32/MSVC.
+
+#include "conversions.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+// The fast double-to-int conversion routine does not guarantee
+// rounding towards zero.
+static inline int FastD2I(double x) {
+#ifdef __USE_ISOC99
+ // The ISO C99 standard defines the lrint() function which rounds a
+ // double to an integer according to the current rounding direction.
+ return lrint(x);
+#else
+ // This is incredibly slow on Intel x86. The reason is that rounding
+ // towards zero is implied by the C standard. This means that the
+ // status register of the FPU has to be changed with the 'fldcw'
+ // instruction. This completely stalls the pipeline and takes many
+ // hundreds of clock cycles.
+ return static_cast<int>(x);
+#endif
+}
+
+
+static inline double DoubleToInteger(double x) {
+ if (isnan(x)) return 0;
+ if (!isfinite(x) || x == 0) return x;
+ return (x >= 0) ? floor(x) : ceil(x);
+}
+
+
+int32_t NumberToInt32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToInt32(number->Number());
+}
+
+
+uint32_t NumberToUint32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToUint32(number->Number());
+}
+
+
+int32_t DoubleToInt32(double x) {
+ int32_t i = FastD2I(x);
+ if (FastI2D(i) == x) return i;
+ static const double two32 = 4294967296.0;
+ static const double two31 = 2147483648.0;
+ if (!isfinite(x) || x == 0) return 0;
+ if (x < 0 || x >= two32) x = fmod(x, two32);
+ x = (x >= 0) ? floor(x) : ceil(x) + two32;
+ return (int32_t) ((x >= two31) ? x - two32 : x);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_CONVERSIONS_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "conversions-inl.h"
+#include "factory.h"
+#include "scanner.h"
+
+namespace v8 { namespace internal {
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+class StringBuilder {
+ public:
+ // Create a string builder with a buffer of the given size. The
+ // buffer is allocated through NewArray<char> and must be
+ // deallocated by the caller of Finalize().
+ explicit StringBuilder(int size);
+
+ StringBuilder(char* buffer, int size)
+ : buffer_(buffer), size_(size), position_(0) { }
+
+ ~StringBuilder() { if (!is_finalized()) Finalize(); }
+
+ // Get the current position in the builder.
+ inline int position() const;
+
+ // Add a single character to the builder. It is not allowed to add
+ // 0-characters; use the Finalize() method to terminate the string
+ // instead.
+ inline void AddCharacter(char c);
+
+ // Add an entire string to the builder. Uses strlen() internally to
+ // compute the length of the input string.
+ void AddString(const char* s);
+
+ // Add the first 'n' characters of the given string 's' to the
+ // builder. The input string must have enough characters.
+ void AddSubstring(const char* s, int n);
+
+ // Add formatted contents to the builder just like printf().
+ void AddFormatted(const char* format, ...);
+
+ // Add character padding to the builder. If count is non-positive,
+ // nothing is added to the builder.
+ void AddPadding(char c, int count);
+
+ // Finalize the string by 0-terminating it and returning the buffer.
+ char* Finalize();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+
+ char* buffer_;
+ int size_;
+ int position_;
+
+ bool is_finalized() const { return position_ < 0; }
+};
+
+
+StringBuilder::StringBuilder(int size) {
+ buffer_ = NewArray<char>(size);
+ size_ = size;
+ position_ = 0;
+}
+
+
+inline int StringBuilder::position() const {
+ ASSERT(!is_finalized());
+ return position_;
+}
+
+
+inline void StringBuilder::AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(!is_finalized() && position_ < size_);
+ buffer_[position_++] = c;
+}
+
+
+void StringBuilder::AddString(const char* s) {
+ AddSubstring(s, strlen(s));
+}
+
+
+void StringBuilder::AddSubstring(const char* s, int n) {
+ ASSERT(!is_finalized() && position_ + n < size_);
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ memcpy(&buffer_[position_], s, n * kCharSize);
+ position_ += n;
+}
+
+
+void StringBuilder::AddFormatted(const char* format, ...) {
+ ASSERT(!is_finalized() && position_ < size_);
+ va_list args;
+ va_start(args, format);
+ int remaining = size_ - position_;
+ int n = OS::VSNPrintF(&buffer_[position_], remaining, format, args);
+ va_end(args);
+ if (n < 0 || n >= remaining) {
+ position_ = size_;
+ } else {
+ position_ += n;
+ }
+}
+
+
+void StringBuilder::AddPadding(char c, int count) {
+ for (int i = 0; i < count; i++) {
+ AddCharacter(c);
+ }
+}
+
+
+char* StringBuilder::Finalize() {
+ ASSERT(!is_finalized() && position_ < size_);
+ buffer_[position_] = '\0';
+ // Make sure nobody managed to add a 0-character to the
+ // buffer while building the string.
+ ASSERT(strlen(buffer_) == static_cast<size_t>(position_));
+ position_ = -1;
+ ASSERT(is_finalized());
+ return buffer_;
+}
+
+
+int HexValue(uc32 c) {
+ if ('0' <= c && c <= '9')
+ return c - '0';
+ if ('a' <= c && c <= 'f')
+ return c - 'a' + 10;
+ if ('A' <= c && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+
+// Provide a common interface to getting a character at a certain
+// index from a char* or a String object.
+static inline int GetChar(const char* str, int index) {
+ ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+ return str[index];
+}
+
+
+static inline int GetChar(String* str, int index) {
+ return str->Get(index);
+}
+
+
+static inline int GetLength(const char* str) {
+ return strlen(str);
+}
+
+
+static inline int GetLength(String* str) {
+ return str->length();
+}
+
+
+static inline const char* GetCString(const char* str, int index) {
+ return str + index;
+}
+
+
+static inline const char* GetCString(String* str, int index) {
+ char* result = NewArray<char>(str->length() + 1);
+ for (int i = index; i < str->length(); i++) {
+ if (str->Get(i) <= 127) {
+ result[i - index] = static_cast<char>(str->Get(i));
+ } else {
+ result[i - index] = 127; // Force number parsing to fail.
+ }
+ }
+ result[str->length() - index] = '\0';
+ return result;
+}
+
+
+static inline void ReleaseCString(const char* original, const char* str) {
+}
+
+
+static inline void ReleaseCString(String* original, const char* str) {
+ DeleteArray(const_cast<char *>(str));
+}
+
+
+static inline bool IsSpace(const char* str, int index) {
+ ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+ return Scanner::kIsWhiteSpace.get(str[index]);
+}
+
+
+static inline bool IsSpace(String* str, int index) {
+ return Scanner::kIsWhiteSpace.get(str->Get(index));
+}
+
+
+static inline bool SubStringEquals(const char* str,
+ int index,
+ const char* other) {
+ return strncmp(str + index, other, strlen(other)) != 0;
+}
+
+
+static inline bool SubStringEquals(String* str, int index, const char* other) {
+ HandleScope scope;
+ int len = strlen(other);
+ int end = index + len < str->length() ? index + len : str->length();
+ Handle<String> slice =
+ Factory::NewStringSlice(Handle<String>(str), index, end);
+ return slice->IsEqualTo(Vector<const char>(other, len));
+}
+
+
+// Check if a string should be parsed as an octal number. The string
+// can be either a char* or a String*.
+template<class S>
+static bool ShouldParseOctal(S* s, int i) {
+ int index = i;
+ int len = GetLength(s);
+ if (index < len && GetChar(s, index) != '0') return false;
+
+ // If the first real character (following '0') is not an octal
+ // digit, bail out early. This also takes care of numbers of the
+ // forms 0.xxx and 0exxx by not allowing the first 0 to be
+ // interpreted as an octal.
+ index++;
+ if (index < len) {
+ int d = GetChar(s, index) - '0';
+ if (d < 0 || d > 7) return false;
+ } else {
+ return false;
+ }
+
+ // Traverse all digits (including the first). If there is an octal
+ // prefix which is not a part of a longer decimal prefix, we return
+ // true. Otherwise, false is returned.
+ while (index < len) {
+ int d = GetChar(s, index++) - '0';
+ if (d == 8 || d == 9) return false;
+ if (d < 0 || d > 7) return true;
+ }
+ return true;
+}
+
+
+extern "C" double gay_strtod(const char* s00, const char** se);
+
+
+// Parse an int from a string starting a given index and in a given
+// radix. The string can be either a char* or a String*.
+template <class S>
+static int InternalStringToInt(S* s, int i, int radix, double* value) {
+ int len = GetLength(s);
+
+ // Setup limits for computing the value.
+ ASSERT(2 <= radix && radix <= 36);
+ int lim_0 = '0' + (radix < 10 ? radix : 10);
+ int lim_a = 'a' + (radix - 10);
+ int lim_A = 'A' + (radix - 10);
+
+ // NOTE: The code for computing the value may seem a bit complex at
+ // first glance. It is structured to use 32-bit multiply-and-add
+ // loops as long as possible to avoid loosing precision.
+
+ double v = 0.0;
+ int j;
+ for (j = i; j < len;) {
+ // Parse the longest part of the string starting at index j
+ // possible while keeping the multiplier, and thus the part
+ // itself, within 32 bits.
+ uint32_t part = 0, multiplier = 1;
+ int k;
+ for (k = j; k < len; k++) {
+ int c = GetChar(s, k);
+ if (c >= '0' && c < lim_0) {
+ c = c - '0';
+ } else if (c >= 'a' && c < lim_a) {
+ c = c - 'a' + 10;
+ } else if (c >= 'A' && c < lim_A) {
+ c = c - 'A' + 10;
+ } else {
+ break;
+ }
+
+ // Update the value of the part as long as the multiplier fits
+ // in 32 bits. When we can't guarantee that the next iteration
+ // will not overflow the multiplier, we stop parsing the part
+ // by leaving the loop.
+ static const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
+ uint32_t m = multiplier * radix;
+ if (m > kMaximumMultiplier) break;
+ part = part * radix + c;
+ multiplier = m;
+ ASSERT(multiplier > part);
+ }
+
+ // Compute the number of part digits. If no digits were parsed;
+ // we're done parsing the entire string.
+ int digits = k - j;
+ if (digits == 0) break;
+
+ // Update the value and skip the part in the string.
+ ASSERT(multiplier ==
+ pow(static_cast<double>(radix), static_cast<double>(digits)));
+ v = v * multiplier + part;
+ j = k;
+ }
+
+ // If the resulting value is larger than 2^53 the value does not fit
+ // in the mantissa of the double and there is a loss of precision.
+ // When the value is larger than 2^53 the rounding depends on the
+ // code generation. If the code generator spills the double value
+ // it uses 64 bits and if it does not it uses 80 bits.
+ //
+ // If there is a potential for overflow we resort to strtod for
+ // radix 10 numbers to get higher precision. For numbers in another
+ // radix we live with the loss of precision.
+ static const double kPreciseConversionLimit = 9007199254740992.0;
+ if (radix == 10 && v > kPreciseConversionLimit) {
+ const char* cstr = GetCString(s, i);
+ const char* end;
+ v = gay_strtod(cstr, &end);
+ ReleaseCString(s, cstr);
+ }
+
+ *value = v;
+ return j;
+}
+
+
+int StringToInt(String* str, int index, int radix, double* value) {
+ return InternalStringToInt(str, index, radix, value);
+}
+
+
+int StringToInt(const char* str, int index, int radix, double* value) {
+ return InternalStringToInt(const_cast<char*>(str), index, radix, value);
+}
+
+
+static const double JUNK_STRING_VALUE = OS::nan_value();
+
+
+// Convert a string to a double value. The string can be either a
+// char* or a String*.
+template<class S>
+static double InternalStringToDouble(S* str,
+ int flags,
+ double empty_string_val) {
+ double result = 0.0;
+ int index = 0;
+
+ int len = GetLength(str);
+
+ // Skip leading spaces.
+ while ((index < len) && IsSpace(str, index)) index++;
+
+ // Compute sign of result.
+ int sign = 1;
+ if (index < len && GetChar(str, index) == '-') {
+ sign = -1;
+ index++;
+ // String only containing a '-' are junk chars.
+ if (index == len) return JUNK_STRING_VALUE;
+ }
+
+ // string is empty?
+ if (index >= len) return empty_string_val;
+
+ // do we have a hex number?
+ // (since the string is 0-terminated, it's ok to look one char beyond the end)
+ if ((flags & ALLOW_HEX) != 0 &&
+ (index + 1) < len &&
+ GetChar(str, index) == '0' &&
+ (GetChar(str, index + 1) == 'x' || GetChar(str, index + 1) == 'X')) {
+ index += 2;
+ index = StringToInt(str, index, 16, &result);
+ } else if ((flags & ALLOW_OCTALS) != 0 && ShouldParseOctal(str, index)) {
+ // NOTE: We optimistically try to parse the number as an octal (if
+ // we're allowed to), even though this is not as dictated by
+ // ECMA-262. The reason for doing this is compatibility with IE and
+ // Firefox.
+ index = StringToInt(str, index, 8, &result);
+ } else {
+ const char* cstr = GetCString(str, index);
+ const char* end;
+ // Optimistically parse the number and then, if that fails,
+ // check if it might have been {+,-,}Infinity.
+ result = gay_strtod(cstr, &end);
+ ReleaseCString(str, cstr);
+ if (result != 0.0 || end != cstr) {
+ // It appears that strtod worked
+ index += end - cstr;
+ } else {
+ // Check for {+,-,}Infinity
+ bool is_negative = (GetChar(str, index) == '-');
+ if (GetChar(str, index) == '+' || GetChar(str, index) == '-')
+ index++;
+ if (!SubStringEquals(str, index, "Infinity"))
+ return JUNK_STRING_VALUE;
+ result = is_negative ? -INFINITY : INFINITY;
+ index += 8;
+ }
+ }
+
+ if ((flags & ALLOW_TRAILING_JUNK) == 0) {
+ // skip trailing spaces
+ while ((index < len) && IsSpace(str, index)) index++;
+ // string ending with junk?
+ if (index < len) return JUNK_STRING_VALUE;
+ }
+
+ return sign * result;
+}
+
+
+double StringToDouble(String* str, int flags, double empty_string_val) {
+ return InternalStringToDouble(str, flags, empty_string_val);
+}
+
+
+double StringToDouble(const char* str, int flags, double empty_string_val) {
+ return InternalStringToDouble(str, flags, empty_string_val);
+}
+
+
+extern "C" char* dtoa(double d, int mode, int ndigits,
+ int* decpt, int* sign, char** rve);
+
+extern "C" void freedtoa(char* s);
+
+const char* DoubleToCString(double v, Vector<char> buffer) {
+ StringBuilder builder(buffer.start(), buffer.length());
+
+ switch (fpclassify(v)) {
+ case FP_NAN:
+ builder.AddString("NaN");
+ break;
+
+ case FP_INFINITE:
+ if (v < 0.0) {
+ builder.AddString("-Infinity");
+ } else {
+ builder.AddString("Infinity");
+ }
+ break;
+
+ case FP_ZERO:
+ builder.AddCharacter('0');
+ break;
+
+ default: {
+ int decimal_point;
+ int sign;
+
+ char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
+ int length = strlen(decimal_rep);
+
+ if (sign) builder.AddCharacter('-');
+
+ if (length <= decimal_point && decimal_point <= 21) {
+ // ECMA-262 section 9.8.1 step 6.
+ builder.AddString(decimal_rep);
+ builder.AddPadding('0', decimal_point - length);
+
+ } else if (0 < decimal_point && decimal_point <= 21) {
+ // ECMA-262 section 9.8.1 step 7.
+ builder.AddSubstring(decimal_rep, decimal_point);
+ builder.AddCharacter('.');
+ builder.AddString(decimal_rep + decimal_point);
+
+ } else if (decimal_point <= 0 && decimal_point > -6) {
+ // ECMA-262 section 9.8.1 step 8.
+ builder.AddString("0.");
+ builder.AddPadding('0', -decimal_point);
+ builder.AddString(decimal_rep);
+
+ } else {
+ // ECMA-262 section 9.8.1 step 9 and 10 combined.
+ builder.AddCharacter(decimal_rep[0]);
+ if (length != 1) {
+ builder.AddCharacter('.');
+ builder.AddString(decimal_rep + 1);
+ }
+ builder.AddCharacter('e');
+ builder.AddCharacter((decimal_point >= 0) ? '+' : '-');
+ int exponent = decimal_point - 1;
+ if (exponent < 0) exponent = -exponent;
+ builder.AddFormatted("%d", exponent);
+ }
+
+ freedtoa(decimal_rep);
+ }
+ }
+ return builder.Finalize();
+}
+
+
+const char* IntToCString(int n, Vector<char> buffer) {
+ bool negative = false;
+ if (n < 0) {
+ // We must not negate the most negative int.
+ if (n == kMinInt) return DoubleToCString(n, buffer);
+ negative = true;
+ n = -n;
+ }
+ // Build the string backwards from the least significant digit.
+ int i = buffer.length();
+ buffer[--i] = '\0';
+ do {
+ buffer[--i] = '0' + (n % 10);
+ n /= 10;
+ } while (n);
+ if (negative) buffer[--i] = '-';
+ return buffer.start() + i;
+}
+
+
+char* DoubleToFixedCString(double value, int f) {
+ ASSERT(f >= 0);
+
+ bool negative = false;
+ double abs_value = value;
+ if (value < 0) {
+ abs_value = -value;
+ negative = true;
+ }
+
+ if (abs_value >= 1e21) {
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ return StrDup(DoubleToCString(value, buffer));
+ }
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ int sign;
+ char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL);
+ int decimal_rep_length = strlen(decimal_rep);
+
+ // Create a representation that is padded with zeros if needed.
+ int zero_prefix_length = 0;
+ int zero_postfix_length = 0;
+
+ if (decimal_point <= 0) {
+ zero_prefix_length = -decimal_point + 1;
+ decimal_point = 1;
+ }
+
+ if (zero_prefix_length + decimal_rep_length < decimal_point + f) {
+ zero_postfix_length = decimal_point + f - decimal_rep_length -
+ zero_prefix_length;
+ }
+
+ unsigned rep_length =
+ zero_prefix_length + decimal_rep_length + zero_postfix_length;
+ StringBuilder rep_builder(rep_length + 1);
+ rep_builder.AddPadding('0', zero_prefix_length);
+ rep_builder.AddString(decimal_rep);
+ rep_builder.AddPadding('0', zero_postfix_length);
+ char* rep = rep_builder.Finalize();
+ freedtoa(decimal_rep);
+
+ // Create the result string by appending a minus and putting in a
+ // decimal point if needed.
+ unsigned result_size = decimal_point + f + 2;
+ StringBuilder builder(result_size + 1);
+ if (negative) builder.AddCharacter('-');
+ builder.AddSubstring(rep, decimal_point);
+ if (f > 0) {
+ builder.AddCharacter('.');
+ builder.AddSubstring(rep + decimal_point, f);
+ }
+ DeleteArray(rep);
+ return builder.Finalize();
+}
+
+
+static char* CreateExponentialRepresentation(char* decimal_rep,
+ int exponent,
+ bool negative,
+ int significant_digits) {
+ bool negative_exponent = false;
+ if (exponent < 0) {
+ negative_exponent = true;
+ exponent = -exponent;
+ }
+
+ // Leave room in the result for appending a minus, for a period, the
+ // letter 'e', a minus or a plus depending on the exponent, and a
+ // three digit exponent.
+ unsigned result_size = significant_digits + 7;
+ StringBuilder builder(result_size + 1);
+
+ if (negative) builder.AddCharacter('-');
+ builder.AddCharacter(decimal_rep[0]);
+ if (significant_digits != 1) {
+ builder.AddCharacter('.');
+ builder.AddString(decimal_rep + 1);
+ builder.AddPadding('0', significant_digits - strlen(decimal_rep));
+ }
+
+ builder.AddCharacter('e');
+ builder.AddCharacter(negative_exponent ? '-' : '+');
+ builder.AddFormatted("%d", exponent);
+ return builder.Finalize();
+}
+
+
+
+char* DoubleToExponentialCString(double value, int f) {
+ // f might be -1 to signal that f was undefined in JavaScript.
+ ASSERT(f >= -1 && f <= 20);
+
+ bool negative = false;
+ if (value < 0) {
+ value = -value;
+ negative = true;
+ }
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ int sign;
+ char* decimal_rep = NULL;
+ if (f == -1) {
+ decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
+ f = strlen(decimal_rep) - 1;
+ } else {
+ decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+ }
+ int decimal_rep_length = strlen(decimal_rep);
+ ASSERT(decimal_rep_length > 0);
+ ASSERT(decimal_rep_length <= f + 1);
+ USE(decimal_rep_length);
+
+ int exponent = decimal_point - 1;
+ char* result =
+ CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
+
+ freedtoa(decimal_rep);
+
+ return result;
+}
+
+
+char* DoubleToPrecisionCString(double value, int p) {
+ ASSERT(p >= 1 && p <= 21);
+
+ bool negative = false;
+ if (value < 0) {
+ value = -value;
+ negative = true;
+ }
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ int sign;
+ char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
+ int decimal_rep_length = strlen(decimal_rep);
+ ASSERT(decimal_rep_length <= p);
+
+ int exponent = decimal_point - 1;
+
+ char* result = NULL;
+
+ if (exponent < -6 || exponent >= p) {
+ result =
+ CreateExponentialRepresentation(decimal_rep, exponent, negative, p);
+ } else {
+ // Use fixed notation.
+ //
+ // Leave room in the result for appending a minus, a period and in
+ // the case where decimal_point is not positive for a zero in
+ // front of the period.
+ unsigned result_size = (decimal_point <= 0)
+ ? -decimal_point + p + 3
+ : p + 2;
+ StringBuilder builder(result_size + 1);
+ if (negative) builder.AddCharacter('-');
+ if (decimal_point <= 0) {
+ builder.AddString("0.");
+ builder.AddPadding('0', -decimal_point);
+ builder.AddString(decimal_rep);
+ builder.AddPadding('0', p - decimal_rep_length);
+ } else {
+ const int m = Min(decimal_rep_length, decimal_point);
+ builder.AddSubstring(decimal_rep, m);
+ builder.AddPadding('0', decimal_point - decimal_rep_length);
+ if (decimal_point < p) {
+ builder.AddCharacter('.');
+ const int extra = negative ? 2 : 1;
+ if (decimal_rep_length > decimal_point) {
+ const int len = strlen(decimal_rep + decimal_point);
+ const int n = Min(len, p - (builder.position() - extra));
+ builder.AddSubstring(decimal_rep + decimal_point, n);
+ }
+ builder.AddPadding('0', extra + (p - builder.position()));
+ }
+ }
+ result = builder.Finalize();
+ }
+
+ freedtoa(decimal_rep);
+ return result;
+}
+
+
+char* DoubleToRadixCString(double value, int radix) {
+ ASSERT(radix >= 2 && radix <= 36);
+
+ // Character array used for conversion.
+ static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+
+ // Buffer for the integer part of the result. 1024 chars is enough
+ // for max integer value in radix 2. We need room for a sign too.
+ static const int kBufferSize = 1100;
+ char integer_buffer[kBufferSize];
+ integer_buffer[kBufferSize - 1] = '\0';
+
+ // Buffer for the decimal part of the result. We only generate up
+ // to kBufferSize - 1 chars for the decimal part.
+ char decimal_buffer[kBufferSize];
+ decimal_buffer[kBufferSize - 1] = '\0';
+
+ // Make sure the value is positive.
+ bool is_negative = value < 0.0;
+ if (is_negative) value = -value;
+
+ // Get the integer part and the decimal part.
+ double integer_part = floor(value);
+ double decimal_part = value - integer_part;
+
+ // Convert the integer part starting from the back. Always generate
+ // at least one digit.
+ int integer_pos = kBufferSize - 2;
+ do {
+ integer_buffer[integer_pos--] =
+ chars[static_cast<int>(fmod(integer_part, radix))];
+ integer_part /= radix;
+ } while (integer_part >= 1.0);
+ // Sanity check.
+ ASSERT(integer_pos > 0);
+ // Add sign if needed.
+ if (is_negative) integer_buffer[integer_pos--] = '-';
+
+ // Convert the decimal part. Repeatedly multiply by the radix to
+ // generate the next char. Never generate more than kBufferSize - 1
+ // chars.
+ //
+ // TODO(1093998): We will often generate a full decimal_buffer of
+ // chars because hitting zero will often not happen. The right
+ // solution would be to continue until the string representation can
+ // be read back and yield the original value. To implement this
+ // efficiently, we probably have to modify dtoa.
+ int decimal_pos = 0;
+ while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
+ decimal_part *= radix;
+ decimal_buffer[decimal_pos++] =
+ chars[static_cast<int>(floor(decimal_part))];
+ decimal_part -= floor(decimal_part);
+ }
+ decimal_buffer[decimal_pos] = '\0';
+
+ // Compute the result size.
+ int integer_part_size = kBufferSize - 2 - integer_pos;
+ // Make room for zero termination.
+ unsigned result_size = integer_part_size + decimal_pos;
+ // If the number has a decimal part, leave room for the period.
+ if (decimal_pos > 0) result_size++;
+ // Allocate result and fill in the parts.
+ StringBuilder builder(result_size + 1);
+ builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
+ if (decimal_pos > 0) builder.AddCharacter('.');
+ builder.AddSubstring(decimal_buffer, decimal_pos);
+ return builder.Finalize();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONVERSIONS_H_
+#define V8_CONVERSIONS_H_
+
+namespace v8 { namespace internal {
+
+// The fast double-to-int conversion routine does not guarantee
+// rounding towards zero.
+// The result is unspecified if x is infinite or NaN, or if the rounded
+// integer value is outside the range of type int.
+static inline int FastD2I(double x);
+
+
+static inline double FastI2D(int x) {
+ // There is no rounding involved in converting an integer to a
+ // double, so this code should compile to a few instructions without
+ // any FPU pipeline stalls.
+ return static_cast<double>(x);
+}
+
+
+static inline double FastUI2D(unsigned x) {
+ // There is no rounding involved in converting an unsigned integer to a
+ // double, so this code should compile to a few instructions without
+ // any FPU pipeline stalls.
+ return static_cast<double>(x);
+}
+
+
+// This function should match the exact semantics of ECMA-262 9.4.
+static inline double DoubleToInteger(double x);
+
+
+// This function should match the exact semantics of ECMA-262 9.5.
+static inline int32_t DoubleToInt32(double x);
+
+
+// This function should match the exact semantics of ECMA-262 9.6.
+static inline uint32_t DoubleToUint32(double x) {
+ return static_cast<uint32_t>(DoubleToInt32(x));
+}
+
+
+// Returns the value (0 .. 15) of a hexadecimal character c.
+// If c is not a legal hexadecimal character, returns a value < 0.
+int HexValue(uc32 c);
+
+
+// Enumeration for allowing octals and ignoring junk when converting
+// strings to numbers.
+enum ConversionFlags {
+ NO_FLAGS = 0,
+ ALLOW_HEX = 1,
+ ALLOW_OCTALS = 2,
+ ALLOW_TRAILING_JUNK = 4
+};
+
+
+// Convert from Number object to C integer.
+static inline int32_t NumberToInt32(Object* number);
+static inline uint32_t NumberToUint32(Object* number);
+
+
+// Converts a string into a double value according to ECMA-262 9.3.1
+double StringToDouble(const char* str, int flags, double empty_string_val = 0);
+double StringToDouble(String* str, int flags, double empty_string_val = 0);
+
+// Converts a string into an integer.
+int StringToInt(String* str, int index, int radix, double* value);
+int StringToInt(const char* str, int index, int radix, double* value);
+
+// Converts a double to a string value according to ECMA-262 9.8.1.
+// The buffer should be large enough for any floating point number.
+// 100 characters is enough.
+const char* DoubleToCString(double value, Vector<char> buffer);
+
+// Convert an int to a null-terminated string. The returned string is
+// located inside the buffer, but not necessarily at the start.
+const char* IntToCString(int n, Vector<char> buffer);
+
+// Additional number to string conversions for the number type.
+// The caller is responsible for calling free on the returned pointer.
+char* DoubleToFixedCString(double value, int f);
+char* DoubleToExponentialCString(double value, int f);
+char* DoubleToPrecisionCString(double value, int f);
+char* DoubleToRadixCString(double value, int radix);
+
+} } // namespace v8::internal
+
+#endif // V8_CONVERSIONS_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "counters.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+CounterLookupCallback StatsTable::lookup_function_ = NULL;
+
+StatsCounterTimer::StatsCounterTimer(const wchar_t* name)
+ : start_time_(0), // initialize to avoid compiler complaints
+ stop_time_(0) { // initialize to avoid compiler complaints
+ int len = wcslen(name);
+ // we prepend the name with 'c.' to indicate that it is a counter.
+ name_ = NewArray<wchar_t>(len+3);
+ wcscpy(name_, L"t:");
+ wcscpy(&name_[2], name);
+}
+
+// Start the timer.
+void StatsCounterTimer::Start() {
+ if (!Enabled())
+ return;
+ stop_time_ = 0;
+ start_time_ = OS::Ticks();
+}
+
+// Stop the timer and record the results.
+void StatsCounterTimer::Stop() {
+ if (!Enabled())
+ return;
+ stop_time_ = OS::Ticks();
+ Record();
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COUNTERS_H_
+#define V8_COUNTERS_H_
+
+#include <wchar.h>
+
+namespace v8 { namespace internal {
+
+// StatsCounters is an interface for plugging into external
+// counters for monitoring. Counters can be looked up and
+// manipulated by name.
+
+class StatsTable : public AllStatic {
+ public:
+ // Register an application-defined function where
+ // counters can be looked up.
+ static void SetCounterFunction(CounterLookupCallback f) {
+ lookup_function_ = f;
+ }
+
+ static bool HasCounterFunction() {
+ return lookup_function_ != NULL;
+ }
+
+ // Lookup the location of a counter by name. If the lookup
+ // is successful, returns a non-NULL pointer for writing the
+ // value of the counter. Each thread calling this function
+ // may receive a different location to store it's counter.
+ // The return value must not be cached and re-used across
+ // threads, although a single thread is free to cache it.
+ static int *FindLocation(const wchar_t* name) {
+ if (!lookup_function_) return NULL;
+ return lookup_function_(name);
+ }
+
+ private:
+ static CounterLookupCallback lookup_function_;
+};
+
+// StatsCounters are dynamically created values which can be tracked in
+// the StatsTable. They are designed to be lightweight to create and
+// easy to use.
+//
+// The implementation of the StatsTable is external to this module.
+//
+// Example usage:
+// {
+// StatsCounter request_count("RequestCount");
+// request_count.Increment();
+// }
+//
+// Internally, a counter represents a value in a row of a StatsTable.
+// The row has a 32bit value for each process/thread in the table and also
+// a name (stored in the table metadata). Since the storage location can be
+// thread-specific, this class cannot be shared across threads.
+//
+
+// StatsCounter represents a counter in the StatsTable class.
+class StatsCounter BASE_EMBEDDED {
+ public:
+ // Create a StatsCounter object.
+ explicit StatsCounter(const wchar_t* name, int id) :
+ lookup_done_(false),
+ ptr_(NULL),
+ id_(id) {
+ int len = wcslen(name);
+ // we prepend the name with 'c:' to indicate that it is a counter.
+ name_ = NewArray<wchar_t>(len+3);
+ wcscpy(name_, L"c:");
+ wcscpy(&name_[2], name);
+ };
+
+ ~StatsCounter() {
+ DeleteArray(name_);
+ }
+
+ // Sets the counter to a specific value.
+ void Set(int value) {
+ int* loc = GetPtr();
+ if (loc) *loc = value;
+ }
+
+ // Increments the counter.
+ void Increment() {
+ int* loc = GetPtr();
+ if (loc) (*loc)++;
+ }
+
+ void Increment(int value) {
+ int* loc = GetPtr();
+ if (loc)
+ (*loc) += value;
+ }
+
+ // Decrements the counter.
+ void Decrement() {
+ int* loc = GetPtr();
+ if (loc) (*loc)--;
+ }
+
+ void Decrement(int value) {
+ int* loc = GetPtr();
+ if (loc) (*loc) -= value;
+ }
+
+ // Is this counter enabled?
+ // Returns false if table is full.
+ bool Enabled() {
+ return GetPtr() != NULL;
+ }
+
+ // Get the internal pointer to the counter. This is used
+ // by the code generator to emit code that manipulates a
+ // given counter without calling the runtime system.
+ int* GetInternalPointer() {
+ int* loc = GetPtr();
+ ASSERT(loc != NULL);
+ return loc;
+ }
+
+ int Id() {
+ return id_;
+ }
+
+ protected:
+ StatsCounter() :
+ lookup_done_(false),
+ ptr_(NULL) {
+ }
+
+ // Returns the cached address of this counter location.
+ int* GetPtr() {
+ if (lookup_done_)
+ return ptr_;
+ lookup_done_ = true;
+ ptr_ = StatsTable::FindLocation(name_);
+ return ptr_;
+ }
+
+ wchar_t* name_;
+ bool lookup_done_;
+ int* ptr_;
+ int id_;
+};
+
+// A StatsCounterTimer is a StatsCounter which keeps a timer during
+// the scope of the StatsCounterTimer. On destruction, it will record
+// its time measurement.
+class StatsCounterTimer : StatsCounter {
+ public:
+ // Constructs and starts the timer.
+ explicit StatsCounterTimer(const wchar_t* name);
+
+ // Start the timer.
+ void Start();
+
+ // Stop the timer and record the results.
+ void Stop();
+
+ // Returns true if the timer is running.
+ bool Running() {
+ return Enabled() && start_time_ != 0 && stop_time_ == 0;
+ }
+
+ private:
+ // Compute the delta between start and stop, in milliseconds.
+ void Record() {
+ int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
+ Increment(milliseconds);
+ }
+
+ int64_t start_time_;
+ int64_t stop_time_;
+};
+
+
+// A StatsRate is a combination of both a timer and a counter so that
+// several statistics can be produced:
+// min, max, avg, count, total
+class StatsRate BASE_EMBEDDED {
+ public:
+ // Constructs and starts the timer.
+ explicit StatsRate(const wchar_t* name, int id) :
+ timer_(name),
+ counter_(name, id) {
+ }
+
+ // Starts the rate timer.
+ void Start() {
+ timer_.Start();
+ }
+
+ // Stops the rate and records the time.
+ void Stop() {
+ if (timer_.Running()) {
+ timer_.Stop();
+ counter_.Increment();
+ }
+ }
+
+ // Access to the timer.
+ StatsCounterTimer& timer() { return timer_; }
+
+ private:
+ StatsCounterTimer timer_;
+ StatsCounter counter_;
+};
+
+
+// Helper class for scoping a timer.
+class StatsTimerScope BASE_EMBEDDED {
+ public:
+ explicit StatsTimerScope(StatsCounterTimer* timer) :
+ timer_(timer) {
+ timer_->Start();
+ }
+ ~StatsTimerScope() {
+ timer_->Stop();
+ }
+ private:
+ StatsCounterTimer* timer_;
+};
+
+// Helper class for scoping a rate.
+class StatsRateScope BASE_EMBEDDED {
+ public:
+ explicit StatsRateScope(StatsRate* rate) :
+ rate_(rate) {
+ rate_->Start();
+ }
+ ~StatsRateScope() {
+ rate_->Stop();
+ }
+ private:
+ StatsRate* rate_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_COUNTERS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h> // for cache flushing.
+
+#include "v8.h"
+
+#include "cpu.h"
+
+namespace v8 { namespace internal {
+
+void CPU::Setup() {
+ // Nothing to do.
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+#if !defined (__arm__)
+ // Not generating ARM instructions for C-code. This means that we are
+ // building an ARM emulator based target. No I$ flushes are necessary.
+#else
+ // Ideally, we would call
+ // syscall(__ARM_NR_cacheflush, start,
+ // reinterpret_cast<intptr_t>(start) + size, 0);
+ // however, syscall(int, ...) is not supported on all platforms, especially
+ // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
+
+ register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
+ register uint32_t end asm("a2") =
+ reinterpret_cast<uint32_t>(start) + size;
+ register uint32_t flg asm("a3") = 0;
+ #ifdef __ARM_EABI__
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ asm volatile(
+ "swi 0x0"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno));
+ #else
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: swi 0x0 \n\t"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno)
+ : "r3");
+ #endif
+ #else
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ asm volatile(
+ "swi %1"
+ : "=r" (beg)
+ : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
+ #else
+ // Do not use the value of __ARM_NR_cacheflush in the inline assembly
+ // below, because the thumb mode value would be used, which would be
+ // wrong, since we switch to ARM mode before executing the swi instruction
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: swi 0x9f0002 \n"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg)
+ : "r3");
+ #endif
+ #endif
+#endif
+}
+
+
+void CPU::DebugBreak() {
+#if !defined (__arm__)
+ UNIMPLEMENTED(); // when building ARM emulator target
+#else
+ asm volatile("bkpt 0");
+#endif
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for ia32 independent of OS goes here.
+
+#include "v8.h"
+
+#include "cpu.h"
+
+namespace v8 { namespace internal {
+
+void CPU::Setup() {
+ // Nothing to do.
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+ // No need to flush the instruction cache on Intel. On Intel instruction
+ // cache flushing is only necessary when multiple cores running the same
+ // code simultaneously. V8 (and JavaScript) is single threaded and when code
+ // is patched on an intel CPU the core performing the patching will have its
+ // own instruction cache updated automatically.
+
+ // If flushing of the instruction cache becomes necessary Windows have the
+ // API function FlushInstructionCache.
+}
+
+
+void CPU::DebugBreak() {
+#ifdef WIN32
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
+ __debugbreak();
+#else
+ asm("int $3");
+#endif
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module contains the architecture-specific code. This make the rest of
+// the code less dependent on differences between different processor
+// architecture.
+// The classes have the same definition for all architectures. The
+// implementation for a particular architecture is put in cpu_<arch>.cc.
+// The build system then uses the implementation for the target architecture.
+//
+
+#ifndef V8_CPU_H_
+#define V8_CPU_H_
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// CPU
+//
+// This class has static methods for the architecture specific functions. Add
+// methods here to cope with differences between the supported architectures.
+//
+// For each architecture the file cpu_<arch>.cc contains the implementation of
+// these functions.
+
+class CPU : public AllStatic {
+ public:
+ // Initializes the cpu architecture support. Called once at VM startup.
+ static void Setup();
+
+ // Flush instruction cache.
+ static void FlushICache(void* start, size_t size);
+
+ // Try to activate a system level debugger.
+ static void DebugBreak();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_CPU_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// -------------------------------------------------------------------
+
+// This file contains date support implemented in JavaScript.
+
+
+// Keep reference to original values of some global properties. This
+// has the added benefit that the code in this file is isolated from
+// changes to these properties.
+const $Date = global.Date;
+const $floor = $Math_floor;
+const $abs = $Math_abs;
+
+
+// ECMA 262 - 15.9.1.2
+function Day(time) {
+ return $floor(time/msPerDay);
+};
+
+
+// ECMA 262 - 5.2
+function Modulo(value, remainder) {
+ var mod = value % remainder;
+ return mod >= 0 ? mod : mod + remainder;
+};
+
+
+function TimeWithinDay(time) {
+ return Modulo(time, msPerDay);
+};
+
+
+// ECMA 262 - 15.9.1.3
+function DaysInYear(year) {
+ if (year % 4 != 0) return 365;
+ if ((year % 100 == 0) && (year % 400 != 0)) return 365;
+ return 366;
+};
+
+
+function DayFromYear(year) {
+ return 365 * (year-1970)
+ + $floor((year-1969)/4)
+ - $floor((year-1901)/100)
+ + $floor((year-1601)/400);
+};
+
+
+function TimeFromYear(year) {
+ return msPerDay * DayFromYear(year);
+};
+
+
+function YearFromTime(time) {
+ return FromJulianDay(Day(time) + kDayZeroInJulianDay).year;
+};
+
+
+function InLeapYear(time) {
+ return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
+};
+
+
+// ECMA 262 - 15.9.1.4
+function MonthFromTime(time) {
+ return FromJulianDay(Day(time) + kDayZeroInJulianDay).month;
+};
+
+
+function DayWithinYear(time) {
+ return Day(time) - DayFromYear(YearFromTime(time));
+};
+
+
+// ECMA 262 - 15.9.1.5
+function DateFromTime(time) {
+ return FromJulianDay(Day(time) + kDayZeroInJulianDay).date;
+};
+
+
+// ECMA 262 - 15.9.1.9
+function EquivalentYear(year) {
+ // Returns an equivalent year in the range [1956-2000] matching
+ // - leap year.
+ // - week day of first day.
+ var time = TimeFromYear(year);
+ return (InLeapYear(time) == 0 ? 1967 : 1956) + (WeekDay(time) * 12) % 28;
+};
+
+
+function EquivalentTime(t) {
+ // The issue here is that some library calls don't work right for dates
+ // that cannot be represented using a signed 32 bit integer (measured in
+ // whole seconds based on the 1970 epoch).
+ // We solve this by mapping the time to a year with same leap-year-ness
+ // and same starting day for the year.
+ // As an optimization we avoid finding an equivalent year in the common
+ // case. We are measuring in ms here so the 32 bit signed integer range
+ // is +-(1<<30)*1000 ie approximately +-2.1e20.
+ if (t >= -2.1e12 && t <= 2.1e12) return t;
+ var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
+ return TimeClip(MakeDate(day, TimeWithinDay(t)));
+};
+
+
+var local_time_offset;
+
+function LocalTimeOffset() {
+ if (IS_UNDEFINED(local_time_offset)) {
+ local_time_offset = %DateLocalTimeOffset(0);
+ }
+ return local_time_offset;
+};
+
+
+var daylight_cache_time = $NaN;
+var daylight_cache_offset;
+
+function DaylightSavingsOffset(t) {
+ if (t == daylight_cache_time) {
+ return daylight_cache_offset;
+ }
+ var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+ daylight_cache_time = t;
+ daylight_cache_offset = offset;
+ return offset;
+};
+
+
+var timezone_cache_time = $NaN;
+var timezone_cache_timezone;
+
+function LocalTimezone(t) {
+ if(t == timezone_cache_time) {
+ return timezone_cache_timezone;
+ }
+ var timezone = %DateLocalTimezone(EquivalentTime(t));
+ timezone_cache_time = t;
+ timezone_cache_timezone = timezone;
+ return timezone;
+};
+
+
+function WeekDay(time) {
+ return Modulo(Day(time) + 4, 7);
+};
+
+
+function LocalTime(time) {
+ if ($isNaN(time)) return time;
+ return time + LocalTimeOffset() + DaylightSavingsOffset(time);
+};
+
+
+function UTC(time) {
+ if ($isNaN(time)) return time;
+ var tmp = time - LocalTimeOffset();
+ return tmp - DaylightSavingsOffset(tmp);
+};
+
+
+// ECMA 262 - 15.9.1.10
+function HourFromTime(time) {
+ return Modulo($floor(time / msPerHour), HoursPerDay);
+};
+
+
+function MinFromTime(time) {
+ return Modulo($floor(time / msPerMinute), MinutesPerHour);
+};
+
+
+function SecFromTime(time) {
+ return Modulo($floor(time / msPerSecond), SecondsPerMinute);
+};
+
+
+function msFromTime(time) {
+ return Modulo(time, msPerSecond);
+};
+
+
+// ECMA 262 - 15.9.1.11
+function MakeTime(hour, min, sec, ms) {
+ if (!$isFinite(hour)) return $NaN;
+ if (!$isFinite(min)) return $NaN;
+ if (!$isFinite(sec)) return $NaN;
+ if (!$isFinite(ms)) return $NaN;
+ return TO_INTEGER(hour) * msPerHour
+ + TO_INTEGER(min) * msPerMinute
+ + TO_INTEGER(sec) * msPerSecond
+ + TO_INTEGER(ms);
+};
+
+
+// ECMA 262 - 15.9.1.12
+function TimeInYear(year) {
+ return DaysInYear(year) * msPerDay;
+};
+
+
+// Compute modified Julian day from year, month, date.
+// The missing days in 1582 are ignored for JavaScript compatibility.
+function ToJulianDay(year, month, date) {
+ var jy = (month > 1) ? year : year - 1;
+ var jm = (month > 1) ? month + 2 : month + 14;
+ var ja = $floor(0.01*jy);
+ return $floor($floor(365.25*jy) + $floor(30.6001*jm) + date + 1720995) + 2 - ja + $floor(0.25*ja);
+};
+
+
+var four_year_cycle_table;
+
+
+function CalculateDateTable() {
+ var month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
+ var four_year_cycle_table = new $Array(1461);
+
+ var cumulative = 0;
+ var position = 0;
+ var leap_position = 0;
+ for (var month = 0; month < 12; month++) {
+ var length = month_lengths[month];
+ for (var day = 1; day <= length; day++) {
+ four_year_cycle_table[leap_position] =
+ (month << kMonthShift) + day;
+ four_year_cycle_table[366 + position] =
+ (1 << kYearShift) + (month << kMonthShift) + day;
+ four_year_cycle_table[731 + position] =
+ (2 << kYearShift) + (month << kMonthShift) + day;
+ four_year_cycle_table[1096 + position] =
+ (3 << kYearShift) + (month << kMonthShift) + day;
+ leap_position++;
+ position++;
+ }
+ if (month == 1) {
+ four_year_cycle_table[leap_position++] =
+ (month << kMonthShift) + 29;
+ }
+ }
+ return four_year_cycle_table;
+};
+
+
+
+// Constructor for creating objects holding year, month, and date.
+// Introduced to ensure the two return points in FromJulianDay match same map.
+function DayTriplet(year, month, date) {
+ this.year = year;
+ this.month = month;
+ this.date = date;
+}
+
+// Compute year, month, and day from modified Julian day.
+// The missing days in 1582 are ignored for JavaScript compatibility.
+function FromJulianDay(julian) {
+ // Avoid floating point and non-Smi maths in common case. This is also a period of
+ // time where leap years are very regular. The range is not too large to avoid overflow
+ // when doing the multiply-to-divide trick.
+ if (julian > kDayZeroInJulianDay &&
+ (julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
+ if (!four_year_cycle_table)
+ four_year_cycle_table = CalculateDateTable();
+ var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
+ var y = 1968;
+ // Divide by 1461 by multiplying with 22967 and shifting down by 25!
+ var after_1968 = (jsimple * 22967) >> 25;
+ y += after_1968 << 2;
+ jsimple -= 1461 * after_1968;
+ var four_year_cycle = four_year_cycle_table[jsimple];
+ return new DayTriplet(y + (four_year_cycle >> kYearShift),
+ (four_year_cycle & kMonthMask) >> kMonthShift,
+ four_year_cycle & kDayMask);
+ }
+ var jalpha = $floor((julian - 1867216.25) / 36524.25);
+ var jb = julian + 1 + jalpha - $floor(0.25 * jalpha) + 1524;
+ var jc = $floor(6680.0 + ((jb-2439870) - 122.1)/365.25);
+ var jd = $floor(365 * jc + (0.25 * jc));
+ var je = $floor((jb - jd)/30.6001);
+ var m = je - 1;
+ if (m > 12) m -= 13;
+ var y = jc - 4715;
+ if (m > 2) { --y; --m; }
+ var d = jb - jd - $floor(30.6001 * je);
+ return new DayTriplet(y, m, d);
+};
+
+// Compute number of days given a year, month, date.
+// Note that month and date can lie outside the normal range.
+// For example:
+// MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
+// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
+// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
+function MakeDay(year, month, date) {
+ if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
+
+ // Conversion to integers.
+ year = TO_INTEGER(year);
+ month = TO_INTEGER(month);
+ date = TO_INTEGER(date);
+
+ // Overflow months into year.
+ year = year + $floor(month/12);
+ month = month % 12;
+ if (month < 0) {
+ month += 12;
+ }
+
+ // Return days relative to Jan 1 1970.
+ return ToJulianDay(year, month, date) - kDayZeroInJulianDay;
+};
+
+
+// ECMA 262 - 15.9.1.13
+function MakeDate(day, time) {
+ if (!$isFinite(day)) return $NaN;
+ if (!$isFinite(time)) return $NaN;
+ return day * msPerDay + time;
+};
+
+
+// ECMA 262 - 15.9.1.14
+function TimeClip(time) {
+ if (!$isFinite(time)) return $NaN;
+ if ($abs(time) > 8.64E15) return $NaN;
+ return TO_INTEGER(time);
+};
+
+
+%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
+ if (%IsConstructCall(this)) {
+ // ECMA 262 - 15.9.3
+ var argc = %_ArgumentsLength();
+ if (argc == 0) {
+ %_SetValueOf(this, %DateCurrentTime(argc));
+ return;
+ }
+ if (argc == 1) {
+ // According to ECMA 262, no hint should be given for this
+ // conversion. However, ToPrimitive defaults to String Hint
+ // for Date objects which will lose precision when the Date
+ // constructor is called with another Date object as its
+ // argument. We therefore use Number Hint for the conversion
+ // (which is the default for everything else than Date
+ // objects). This makes us behave like KJS and SpiderMonkey.
+ var time = ToPrimitive(year, NUMBER_HINT);
+ if (IS_STRING(time)) {
+ %_SetValueOf(this, DateParse(time));
+ } else {
+ %_SetValueOf(this, TimeClip(ToNumber(time)));
+ }
+ return;
+ }
+ year = ToNumber(year);
+ month = ToNumber(month);
+ date = argc > 2 ? ToNumber(date) : 1;
+ hours = argc > 3 ? ToNumber(hours) : 0;
+ minutes = argc > 4 ? ToNumber(minutes) : 0;
+ seconds = argc > 5 ? ToNumber(seconds) : 0;
+ ms = argc > 6 ? ToNumber(ms) : 0;
+ year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+ ? 1900 + TO_INTEGER(year) : year;
+ var day = MakeDay(year, month, date);
+ var time = MakeTime(hours, minutes, seconds, ms);
+ %_SetValueOf(this, TimeClip(UTC(MakeDate(day, time))));
+ } else {
+ // ECMA 262 - 15.9.2
+ return (new $Date()).toString();
+ }
+});
+
+
+// Helper functions.
+function GetTimeFrom(aDate) {
+ if (IS_DATE(aDate)) return %_ValueOf(aDate);
+ throw new $TypeError('this is not a Date object.');
+};
+
+
+function GetMillisecondsFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return msFromTime(LocalTime(t));
+};
+
+
+function GetUTCMillisecondsFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return msFromTime(t);
+};
+
+
+function GetSecondsFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return SecFromTime(LocalTime(t));
+};
+
+
+function GetUTCSecondsFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return SecFromTime(t);
+};
+
+
+function GetMinutesFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return MinFromTime(LocalTime(t));
+};
+
+
+function GetUTCMinutesFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return MinFromTime(t);
+};
+
+
+function GetHoursFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return HourFromTime(LocalTime(t));
+};
+
+
+function GetUTCHoursFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return HourFromTime(t);
+};
+
+
+function GetFullYearFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return YearFromTime(LocalTime(t));
+};
+
+
+function GetUTCFullYearFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return YearFromTime(t);
+};
+
+
+function GetMonthFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return MonthFromTime(LocalTime(t));
+};
+
+
+function GetUTCMonthFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return MonthFromTime(t);
+};
+
+
+function GetDateFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return DateFromTime(LocalTime(t));
+};
+
+
+function GetUTCDateFrom(aDate) {
+ var t = GetTimeFrom(aDate);
+ if ($isNaN(t)) return t;
+ return DateFromTime(t);
+};
+
+
+%FunctionSetPrototype($Date, new $Date($NaN));
+
+
+var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+
+
+function TwoDigitString(value) {
+ return value < 10 ? "0" + value : "" + value;
+};
+
+
+function DateString(time) {
+ var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+ return WeekDays[WeekDay(time)] + ' '
+ + Months[YMD.month] + ' '
+ + TwoDigitString(YMD.date) + ' '
+ + YMD.year;
+};
+
+
+function TimeString(time) {
+ return TwoDigitString(HourFromTime(time)) + ':'
+ + TwoDigitString(MinFromTime(time)) + ':'
+ + TwoDigitString(SecFromTime(time));
+};
+
+
+function LocalTimezoneString(time) {
+ var timezoneOffset = (LocalTimeOffset() + DaylightSavingsOffset(time)) / msPerMinute;
+ var sign = (timezoneOffset >= 0) ? 1 : -1;
+ var hours = $floor((sign * timezoneOffset)/60);
+ var min = $floor((sign * timezoneOffset)%60);
+ var gmt = ' GMT' + ((sign == 1) ? '+' : '-') + TwoDigitString(hours) + TwoDigitString(min);
+ return gmt + ' (' + LocalTimezone(time) + ')';
+};
+
+
+function DatePrintString(time) {
+ return DateString(time) + ' ' + TimeString(time);
+};
+
+// -------------------------------------------------------------------
+
+
+// ECMA 262 - 15.9.4.2
+function DateParse(string) {
+ var arr = %DateParseString(ToString(string));
+ if (IS_NULL(arr)) return $NaN;
+
+ var day = MakeDay(arr[0], arr[1], arr[2]);
+ var time = MakeTime(arr[3], arr[4], arr[5], 0);
+ var date = MakeDate(day, time);
+
+ if (IS_NULL(arr[6])) {
+ return TimeClip(UTC(date));
+ } else {
+ return TimeClip(date - arr[6] * 1000);
+ }
+};
+
+
+// ECMA 262 - 15.9.4.3
+function DateUTC(year, month, date, hours, minutes, seconds, ms) {
+ year = ToNumber(year);
+ month = ToNumber(month);
+ var argc = %_ArgumentsLength();
+ date = argc > 2 ? ToNumber(date) : 1;
+ hours = argc > 3 ? ToNumber(hours) : 0;
+ minutes = argc > 4 ? ToNumber(minutes) : 0;
+ seconds = argc > 5 ? ToNumber(seconds) : 0;
+ ms = argc > 6 ? ToNumber(ms) : 0;
+ year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+ ? 1900 + TO_INTEGER(year) : year;
+ var day = MakeDay(year, month, date);
+ var time = MakeTime(hours, minutes, seconds, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
+};
+
+
+// Mozilla-specific extension. Returns the number of milliseconds
+// elapsed since 1 January 1970 00:00:00 UTC.
+function DateNow() {
+ return %DateCurrentTime(0);
+};
+
+
+// ECMA 262 - 15.9.5.2
+function DateToString() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return kInvalidDate;
+ return DatePrintString(LocalTime(t)) + LocalTimezoneString(t);
+};
+
+
+// ECMA 262 - 15.9.5.3
+function DateToDateString() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return kInvalidDate;
+ return DateString(LocalTime(t));
+};
+
+
+// ECMA 262 - 15.9.5.4
+function DateToTimeString() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return kInvalidDate;
+ var lt = LocalTime(t);
+ return TimeString(lt) + LocalTimezoneString(lt);
+};
+
+
+// ECMA 262 - 15.9.5.9
+function DateGetTime() {
+ return GetTimeFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.10
+function DateGetFullYear() {
+ return GetFullYearFrom(this)
+};
+
+
+// ECMA 262 - 15.9.5.11
+function DateGetUTCFullYear() {
+ return GetUTCFullYearFrom(this)
+};
+
+
+// ECMA 262 - 15.9.5.12
+function DateGetMonth() {
+ return GetMonthFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.13
+function DateGetUTCMonth() {
+ return GetUTCMonthFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.14
+function DateGetDate() {
+ return GetDateFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.15
+function DateGetUTCDate() {
+ return GetUTCDateFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.16
+function DateGetDay() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return t;
+ return WeekDay(LocalTime(t));
+};
+
+
+// ECMA 262 - 15.9.5.17
+function DateGetUTCDay() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return t;
+ return WeekDay(t);
+};
+
+
+// ECMA 262 - 15.9.5.18
+function DateGetHours() {
+ return GetHoursFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.19
+function DateGetUTCHours() {
+ return GetUTCHoursFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.20
+function DateGetMinutes() {
+ return GetMinutesFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.21
+function DateGetUTCMinutes() {
+ return GetUTCMinutesFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.22
+function DateGetSeconds() {
+ return GetSecondsFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.23
+function DateGetUTCSeconds() {
+ return GetUTCSecondsFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.24
+function DateGetMilliseconds() {
+ return GetMillisecondsFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.25
+function DateGetUTCMilliseconds() {
+ return GetUTCMillisecondsFrom(this);
+};
+
+
+// ECMA 262 - 15.9.5.26
+function DateGetTimezoneOffset() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return t;
+ return (t - LocalTime(t)) / msPerMinute;
+};
+
+
+// ECMA 262 - 15.9.5.27
+function DateSetTime(ms) {
+ if (!IS_DATE(this)) throw new $TypeError('this is not a Date object.');
+ return %_SetValueOf(this, TimeClip(ToNumber(ms)));
+};
+
+
+// ECMA 262 - 15.9.5.28
+function DateSetMilliseconds(ms) {
+ var t = LocalTime(GetTimeFrom(this));
+ ms = ToNumber(ms);
+ var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+};
+
+
+// ECMA 262 - 15.9.5.29
+function DateSetUTCMilliseconds(ms) {
+ var t = GetTimeFrom(this);
+ ms = ToNumber(ms);
+ var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+};
+
+
+// ECMA 262 - 15.9.5.30
+function DateSetSeconds(sec, ms) {
+ var t = LocalTime(GetTimeFrom(this));
+ sec = ToNumber(sec);
+ ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
+ var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+};
+
+
+// ECMA 262 - 15.9.5.31
+function DateSetUTCSeconds(sec, ms) {
+ var t = GetTimeFrom(this);
+ sec = ToNumber(sec);
+ ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+ var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+};
+
+
+// ECMA 262 - 15.9.5.33
+function DateSetMinutes(min, sec, ms) {
+ var t = LocalTime(GetTimeFrom(this));
+ min = ToNumber(min);
+ var argc = %_ArgumentsLength();
+ sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
+ ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
+ var time = MakeTime(HourFromTime(t), min, sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+};
+
+
+// ECMA 262 - 15.9.5.34
+function DateSetUTCMinutes(min, sec, ms) {
+ var t = GetTimeFrom(this);
+ min = ToNumber(min);
+ var argc = %_ArgumentsLength();
+ sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
+ ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+ var time = MakeTime(HourFromTime(t), min, sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+};
+
+
+// ECMA 262 - 15.9.5.35
+function DateSetHours(hour, min, sec, ms) {
+ var t = LocalTime(GetTimeFrom(this));
+ hour = ToNumber(hour);
+ var argc = %_ArgumentsLength();
+ min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min);
+ sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
+ ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
+ var time = MakeTime(hour, min, sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+};
+
+
+// ECMA 262 - 15.9.5.34
+function DateSetUTCHours(hour, min, sec, ms) {
+ var t = GetTimeFrom(this);
+ hour = ToNumber(hour);
+ var argc = %_ArgumentsLength();
+ min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min);
+ sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
+ ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+ var time = MakeTime(hour, min, sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+};
+
+
+// ECMA 262 - 15.9.5.36
+function DateSetDate(date) {
+ var t = LocalTime(GetTimeFrom(this));
+ date = ToNumber(date);
+ var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+};
+
+
+// ECMA 262 - 15.9.5.37
+function DateSetUTCDate(date) {
+ var t = GetTimeFrom(this);
+ date = ToNumber(date);
+ var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+ return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+};
+
+
+// ECMA 262 - 15.9.5.38
+function DateSetMonth(month, date) {
+ var t = LocalTime(GetTimeFrom(this));
+ month = ToNumber(month);
+ date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
+ var day = MakeDay(YearFromTime(t), month, date);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+};
+
+
+// ECMA 262 - 15.9.5.39
+function DateSetUTCMonth(month, date) {
+ var t = GetTimeFrom(this);
+ month = ToNumber(month);
+ date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
+ var day = MakeDay(YearFromTime(t), month, date);
+ return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+};
+
+
+// ECMA 262 - 15.9.5.40
+function DateSetFullYear(year, month, date) {
+ var t = GetTimeFrom(this);
+ t = $isNaN(t) ? 0 : LocalTime(t);
+ year = ToNumber(year);
+ var argc = %_ArgumentsLength();
+ month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+ date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+ var day = MakeDay(year, month, date);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+};
+
+
+// ECMA 262 - 15.9.5.41
+function DateSetUTCFullYear(year, month, date) {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) t = 0;
+ var argc = %_ArgumentsLength();
+ year = ToNumber(year);
+ month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+ date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+ var day = MakeDay(year, month, date);
+ return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+};
+
+
+// ECMA 262 - 15.9.5.42
+function DateToUTCString() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return kInvalidDate;
+ // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
+ return WeekDays[WeekDay(t)] + ', '
+ + TwoDigitString(DateFromTime(t)) + ' '
+ + Months[MonthFromTime(t)] + ' '
+ + YearFromTime(t) + ' '
+ + TimeString(t) + ' GMT';
+};
+
+
+// ECMA 262 - B.2.4
+function DateGetYear() {
+ var t = GetTimeFrom(this);
+ if ($isNaN(t)) return $NaN;
+ return YearFromTime(LocalTime(t)) - 1900;
+};
+
+
+// ECMA 262 - B.2.5
+function DateSetYear(year) {
+ var t = LocalTime(GetTimeFrom(this));
+ if ($isNaN(t)) t = 0;
+ year = ToNumber(year);
+ if ($isNaN(year)) return %_SetValueOf(this, $NaN);
+ year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+ ? 1900 + TO_INTEGER(year) : year;
+ var day = MakeDay(year, GetMonthFrom(this), GetDateFrom(this));
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+};
+
+
+// -------------------------------------------------------------------
+
+function SetupDate() {
+ // Setup non-enumerable properties of the Date object itself.
+ InstallProperties($Date, DONT_ENUM, {
+ UTC: DateUTC,
+ parse: DateParse,
+ now: DateNow
+ });
+
+ // Setup non-enumerable properties of the Date prototype object.
+ InstallProperties($Date.prototype, DONT_ENUM, {
+ constructor: $Date,
+ toString: DateToString,
+ toDateString: DateToDateString,
+ toTimeString: DateToTimeString,
+ toLocaleString: DateToString,
+ toLocaleDateString: DateToDateString,
+ toLocaleTimeString: DateToTimeString,
+ valueOf: DateGetTime,
+ getTime: DateGetTime,
+ getFullYear: DateGetFullYear,
+ getUTCFullYear: DateGetUTCFullYear,
+ getMonth: DateGetMonth,
+ getUTCMonth: DateGetUTCMonth,
+ getDate: DateGetDate,
+ getUTCDate: DateGetUTCDate,
+ getDay: DateGetDay,
+ getUTCDay: DateGetUTCDay,
+ getHours: DateGetHours,
+ getUTCHours: DateGetUTCHours,
+ getMinutes: DateGetMinutes,
+ getUTCMinutes: DateGetUTCMinutes,
+ getSeconds: DateGetSeconds,
+ getUTCSeconds: DateGetUTCSeconds,
+ getMilliseconds: DateGetMilliseconds,
+ getUTCMilliseconds: DateGetUTCMilliseconds,
+ getTimezoneOffset: DateGetTimezoneOffset,
+ setTime: DateSetTime,
+ setMilliseconds: DateSetMilliseconds,
+ setUTCMilliseconds: DateSetUTCMilliseconds,
+ setSeconds: DateSetSeconds,
+ setUTCSeconds: DateSetUTCSeconds,
+ setMinutes: DateSetMinutes,
+ setUTCMinutes: DateSetUTCMinutes,
+ setHours: DateSetHours,
+ setUTCHours: DateSetUTCHours,
+ setDate: DateSetDate,
+ setUTCDate: DateSetUTCDate,
+ setMonth: DateSetMonth,
+ setUTCMonth: DateSetUTCMonth,
+ setFullYear: DateSetFullYear,
+ setUTCFullYear: DateSetUTCFullYear,
+ toUTCString: DateToUTCString,
+ toGMTString: DateToUTCString,
+ getYear: DateGetYear,
+ setYear: DateSetYear
+ });
+};
+
+SetupDate();
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "dateparser.h"
+
+namespace v8 { namespace internal {
+
+
+bool DateParser::Parse(String* str, FixedArray* out) {
+ ASSERT(out->length() == OUTPUT_SIZE);
+
+ InputReader in(str);
+ TimeZoneComposer tz;
+ TimeComposer time;
+ DayComposer day;
+
+ while (!in.IsEnd()) {
+ if (in.IsAsciiDigit()) {
+ // Parse a number (possibly with 1 or 2 trailing colons).
+ int n = in.ReadUnsignedNumber();
+ if (in.Skip(':')) {
+ if (in.Skip(':')) {
+ // n + "::"
+ if (!time.IsEmpty()) return false;
+ time.Add(n);
+ time.Add(0);
+ } else {
+ // n + ":"
+ if (!time.Add(n)) return false;
+ }
+ } else if (tz.IsExpecting(n)) {
+ tz.SetAbsoluteMinute(n);
+ } else if (time.IsExpecting(n)) {
+ time.AddFinal(n);
+ // Require end or white space immediately after finalizing time.
+ if (!in.IsEnd() && !in.SkipWhiteSpace()) return false;
+ } else {
+ if (!day.Add(n)) return false;
+ in.Skip('-'); // Ignore suffix '-' for year, month, or day.
+ }
+ } else if (in.IsAsciiAlphaOrAbove()) {
+ // Parse a "word" (sequence of chars. >= 'A').
+ uint32_t pre[KeywordTable::kPrefixLength];
+ int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
+ int index = KeywordTable::Lookup(pre, len);
+ KeywordType type = KeywordTable::GetType(index);
+
+ if (type == AM_PM && !time.IsEmpty()) {
+ time.SetHourOffset(KeywordTable::GetValue(index));
+ } else if (type == MONTH_NAME) {
+ day.SetNamedMonth(KeywordTable::GetValue(index));
+ in.Skip('-'); // Ignore suffix '-' for month names
+ } else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
+ tz.Set(KeywordTable::GetValue(index));
+ } else {
+ // Garbage words are illegal if no number read yet.
+ if (in.HasReadNumber()) return false;
+ }
+ } else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+ // Parse UTC offset (only after UTC or time).
+ tz.SetSign(in.GetAsciiSignValue());
+ in.Next();
+ int n = in.ReadUnsignedNumber();
+ if (in.Skip(':')) {
+ tz.SetAbsoluteHour(n);
+ tz.SetAbsoluteMinute(kNone);
+ } else {
+ tz.SetAbsoluteHour(n / 100);
+ tz.SetAbsoluteMinute(n % 100);
+ }
+ } else if (in.Is('(')) {
+ // Ignore anything from '(' to a matching ')' or end of string.
+ in.SkipParentheses();
+ } else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
+ // Extra sign or ')' is illegal if no number read yet.
+ return false;
+ } else {
+ // Ignore other characters.
+ in.Next();
+ }
+ }
+ return day.Write(out) && time.Write(out) && tz.Write(out);
+}
+
+
+bool DateParser::DayComposer::Write(FixedArray* output) {
+ int year = 0; // Default year is 0 (=> 2000) for KJS compatibility.
+ int month = kNone;
+ int day = kNone;
+
+ if (named_month_ == kNone) {
+ if (index_ < 2) return false;
+ if (index_ == 3 && !IsDay(comp_[0])) {
+ // YMD
+ year = comp_[0];
+ month = comp_[1];
+ day = comp_[2];
+ } else {
+ // MD(Y)
+ month = comp_[0];
+ day = comp_[1];
+ if (index_ == 3) year = comp_[2];
+ }
+ } else {
+ month = named_month_;
+ if (index_ < 1) return false;
+ if (index_ == 1) {
+ // MD or DM
+ day = comp_[0];
+ } else if (!IsDay(comp_[0])) {
+ // YMD, MYD, or YDM
+ year = comp_[0];
+ day = comp_[1];
+ } else {
+ // DMY, MDY, or DYM
+ day = comp_[0];
+ year = comp_[1];
+ }
+ }
+
+ if (Between(year, 0, 49)) year += 2000;
+ else if (Between(year, 50, 99)) year += 1900;
+
+ if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
+
+ output->set(YEAR, Smi::FromInt(year));
+ output->set(MONTH, Smi::FromInt(month - 1)); // 0-based
+ output->set(DAY, Smi::FromInt(day));
+ return true;
+}
+
+
+bool DateParser::TimeComposer::Write(FixedArray* output) {
+ // All time slots default to 0
+ while (index_ < kSize) {
+ comp_[index_++] = 0;
+ }
+
+ int& hour = comp_[0];
+ int& minute = comp_[1];
+ int& second = comp_[2];
+
+ if (hour_offset_ != kNone) {
+ if (!IsHour12(hour)) return false;
+ hour %= 12;
+ hour += hour_offset_;
+ }
+
+ if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second)) return false;
+
+ output->set(HOUR, Smi::FromInt(hour));
+ output->set(MINUTE, Smi::FromInt(minute));
+ output->set(SECOND, Smi::FromInt(second));
+ return true;
+}
+
+
+bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
+ if (sign_ != kNone) {
+ if (hour_ == kNone) hour_ = 0;
+ if (minute_ == kNone) minute_ = 0;
+ int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
+ if (!Smi::IsValid(total_seconds)) return false;
+ output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
+ } else {
+ output->set(UTC_OFFSET, Heap::null_value());
+ }
+ return true;
+}
+
+
+const int8_t
+DateParser::KeywordTable::array[][DateParser::KeywordTable::kEntrySize] = {
+ {'j', 'a', 'n', DateParser::MONTH_NAME, 1},
+ {'f', 'e', 'b', DateParser::MONTH_NAME, 2},
+ {'m', 'a', 'r', DateParser::MONTH_NAME, 3},
+ {'a', 'p', 'r', DateParser::MONTH_NAME, 4},
+ {'m', 'a', 'y', DateParser::MONTH_NAME, 5},
+ {'j', 'u', 'n', DateParser::MONTH_NAME, 6},
+ {'j', 'u', 'l', DateParser::MONTH_NAME, 7},
+ {'a', 'u', 'g', DateParser::MONTH_NAME, 8},
+ {'s', 'e', 'p', DateParser::MONTH_NAME, 9},
+ {'o', 'c', 't', DateParser::MONTH_NAME, 10},
+ {'n', 'o', 'v', DateParser::MONTH_NAME, 11},
+ {'d', 'e', 'c', DateParser::MONTH_NAME, 12},
+ {'a', 'm', '\0', DateParser::AM_PM, 0},
+ {'p', 'm', '\0', DateParser::AM_PM, 12},
+ {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
+ {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
+ {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
+ {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
+ {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
+ {'e', 'd', 't', DateParser::TIME_ZONE_NAME, -4},
+ {'e', 's', 't', DateParser::TIME_ZONE_NAME, -5},
+ {'m', 'd', 't', DateParser::TIME_ZONE_NAME, -6},
+ {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
+ {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
+ {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
+ {'\0', '\0', '\0', DateParser::INVALID, 0},
+};
+
+
+// We could use perfect hashing here, but this is not a bottleneck.
+int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
+ int i;
+ for (i = 0; array[i][kTypeOffset] != INVALID; i++) {
+ int j = 0;
+ while (j < kPrefixLength &&
+ pre[j] == static_cast<uint32_t>(array[i][j])) {
+ j++;
+ }
+ // Check if we have a match and the length is legal.
+ // Word longer than keyword is only allowed for month names.
+ if (j == kPrefixLength &&
+ (len <= kPrefixLength || array[i][kTypeOffset] == MONTH_NAME)) {
+ return i;
+ }
+ }
+ return i;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DATEPARSER_H_
+#define V8_DATEPARSER_H_
+
+#include "scanner.h"
+
+namespace v8 { namespace internal {
+
+
+class DateParser : public AllStatic {
+ public:
+
+ // Parse the string as a date. If parsing succeeds, return true after
+ // filling out the output array as follows (all integers are Smis):
+ // [0]: year
+ // [1]: month (0 = Jan, 1 = Feb, ...)
+ // [2]: day
+ // [3]: hour
+ // [4]: minute
+ // [5]: second
+ // [6]: UTC offset in seconds, or null value if no timezone specified
+ // If parsing fails, return false (content of output array is not defined).
+ static bool Parse(String* str, FixedArray* output);
+
+ enum {YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, UTC_OFFSET, OUTPUT_SIZE};
+
+ private:
+ // Range testing
+ static bool Between(int x, int lo, int hi) { return x >= lo && x <= hi; }
+ // Indicates a missing value.
+ static const int kNone = kMaxInt;
+
+ // InputReader provides basic string parsing and character classification.
+ class InputReader BASE_EMBEDDED {
+ public:
+ explicit InputReader(String* s) : buffer_(s), has_read_number_(false) {
+ Next();
+ }
+
+ // Advance to the next character of the string.
+ void Next() { ch_ = buffer_.has_more() ? buffer_.GetNext() : 0; }
+
+ // Read a string of digits as an unsigned number (cap just below kMaxInt).
+ int ReadUnsignedNumber() {
+ has_read_number_ = true;
+ int n;
+ for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
+ n = n * 10 + ch_ - '0';
+ }
+ return n;
+ }
+
+ // Read a word (sequence of chars. >= 'A'), fill the given buffer with a
+ // lower-case prefix, and pad any remainder of the buffer with zeroes.
+ // Return word length.
+ int ReadWord(uint32_t* prefix, int prefix_size) {
+ int len;
+ for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
+ if (len < prefix_size) prefix[len] = GetAsciiAlphaLower();
+ }
+ for (int i = len; i < prefix_size; i++) prefix[i] = 0;
+ return len;
+ }
+
+ // The skip methods return whether they actually skipped something.
+ bool Skip(uint32_t c) { return ch_ == c ? (Next(), true) : false; }
+
+ bool SkipWhiteSpace() {
+ return Scanner::kIsWhiteSpace.get(ch_) ? (Next(), true) : false;
+ }
+
+ bool SkipParentheses() {
+ if (ch_ != '(') return false;
+ int balance = 0;
+ do {
+ if (ch_ == ')') --balance;
+ else if (ch_ == '(') ++balance;
+ Next();
+ } while (balance > 0 && ch_);
+ return true;
+ }
+
+ // Character testing/classification. Non-ASCII digits are not supported.
+ bool Is(uint32_t c) const { return ch_ == c; }
+ bool IsEnd() const { return ch_ == 0; }
+ bool IsAsciiDigit() const { return IsDecimalDigit(ch_); }
+ bool IsAsciiAlphaOrAbove() const { return ch_ >= 'A'; }
+ bool IsAsciiSign() const { return ch_ == '+' || ch_ == '-'; }
+
+ // Return 1 for '+' and -1 for '-'.
+ int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
+
+ // Indicates whether any (possibly empty!) numbers have been read.
+ bool HasReadNumber() const { return has_read_number_; }
+
+ private:
+ // If current character is in 'A'-'Z' or 'a'-'z', return its lower-case.
+ // Else, return something outside of 'A'-'Z' and 'a'-'z'.
+ uint32_t GetAsciiAlphaLower() const { return ch_ | 32; }
+
+ StringInputBuffer buffer_;
+ bool has_read_number_;
+ uint32_t ch_;
+ };
+
+ enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
+
+ // KeywordTable maps names of months, time zones, am/pm to numbers.
+ class KeywordTable : public AllStatic {
+ public:
+ // Look up a word in the keyword table and return an index.
+ // 'pre' contains a prefix of the word, zero-padded to size kPrefixLength
+ // and 'len' is the word length.
+ static int Lookup(const uint32_t* pre, int len);
+ // Get the type of the keyword at index i.
+ static KeywordType GetType(int i) {
+ return static_cast<KeywordType>(array[i][kTypeOffset]);
+ }
+ // Get the value of the keyword at index i.
+ static int GetValue(int i) { return array[i][kValueOffset]; }
+
+ static const int kPrefixLength = 3;
+ static const int kTypeOffset = kPrefixLength;
+ static const int kValueOffset = kTypeOffset + 1;
+ static const int kEntrySize = kValueOffset + 1;
+ static const int8_t array[][kEntrySize];
+ };
+
+ class TimeZoneComposer BASE_EMBEDDED {
+ public:
+ TimeZoneComposer() : sign_(kNone), hour_(kNone), minute_(kNone) {}
+ void Set(int offset_in_hours) {
+ sign_ = offset_in_hours < 0 ? -1 : 1;
+ hour_ = offset_in_hours * sign_;
+ minute_ = 0;
+ }
+ void SetSign(int sign) { sign_ = sign < 0 ? -1 : 1; }
+ void SetAbsoluteHour(int hour) { hour_ = hour; }
+ void SetAbsoluteMinute(int minute) { minute_ = minute; }
+ bool IsExpecting(int n) const {
+ return hour_ != kNone && minute_ == kNone && TimeComposer::IsMinute(n);
+ }
+ bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
+ bool Write(FixedArray* output);
+ private:
+ int sign_;
+ int hour_;
+ int minute_;
+ };
+
+ class TimeComposer BASE_EMBEDDED {
+ public:
+ TimeComposer() : index_(0), hour_offset_(kNone) {}
+ bool IsEmpty() const { return index_ == 0; }
+ bool IsExpecting(int n) const {
+ return (index_ == 1 && IsMinute(n)) || (index_ == 2 && IsSecond(n));
+ }
+ bool Add(int n) {
+ return index_ < kSize ? (comp_[index_++] = n, true) : false;
+ }
+ bool AddFinal(int n) {
+ if (!Add(n)) return false;
+ while (index_ < kSize) comp_[index_++] = 0;
+ return true;
+ }
+ void SetHourOffset(int n) { hour_offset_ = n; }
+ bool Write(FixedArray* output);
+
+ static bool IsMinute(int x) { return Between(x, 0, 59); }
+ private:
+ static bool IsHour(int x) { return Between(x, 0, 23); }
+ static bool IsHour12(int x) { return Between(x, 0, 12); }
+ static bool IsSecond(int x) { return Between(x, 0, 59); }
+
+ static const int kSize = 3;
+ int comp_[kSize];
+ int index_;
+ int hour_offset_;
+ };
+
+ class DayComposer BASE_EMBEDDED {
+ public:
+ DayComposer() : index_(0), named_month_(kNone) {}
+ bool IsEmpty() const { return index_ == 0; }
+ bool Add(int n) {
+ return index_ < kSize ? (comp_[index_++] = n, true) : false;
+ }
+ void SetNamedMonth(int n) { named_month_ = n; }
+ bool Write(FixedArray* output);
+ private:
+ static bool IsMonth(int x) { return Between(x, 1, 12); }
+ static bool IsDay(int x) { return Between(x, 1, 31); }
+
+ static const int kSize = 3;
+ int comp_[kSize];
+ int index_;
+ int named_month_;
+ };
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_DATEPARSER_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Default number of frames to include in the response to backtrace request.
+const kDefaultBacktraceLength = 10;
+
+const Debug = {};
+
+// Regular expression to skip "crud" at the beginning of a source line which is
+// not really code. Currently the regular expression matches whitespace and
+// comments.
+const sourceLineBeginningSkip = /^(?:[ \v\h]*(?:\/\*.*?\*\/)*)*/;
+
+// Debug events which can occour in the V8 JavaScript engine. These originate
+// from the API include file debug.h.
+Debug.DebugEvent = { Break: 1,
+ Exception: 2,
+ NewFunction: 3,
+ BeforeCompile: 4,
+ AfterCompile: 5 };
+
+// Types of exceptions that can be broken upon.
+Debug.ExceptionBreak = { All : 0,
+ Uncaught: 1 };
+
+// The different types of steps.
+Debug.StepAction = { StepOut: 0,
+ StepNext: 1,
+ StepIn: 2,
+ StepMin: 3,
+ StepInMin: 4 };
+
+// The different types of scripts matching enum ScriptType in objects.h.
+Debug.ScriptType = { Native: 0,
+ Extension: 1,
+ Normal: 2 };
+
+function ScriptTypeFlag(type) {
+ return (1 << type);
+}
+
+// Globals.
+var next_response_seq = 0;
+var next_break_point_number = 1;
+var break_points = [];
+var script_break_points = [];
+
+
+// Create a new break point object and add it to the list of break points.
+function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+ var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point);
+ break_points.push(break_point);
+ return break_point;
+};
+
+
+// Object representing a break point.
+// NOTE: This object does not have a reference to the function having break
+// point as this would cause function not to be garbage collected when it is
+// not used any more. We do not want break points to keep functions alive.
+function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+ this.source_position_ = source_position;
+ this.source_line_ = opt_line;
+ this.source_column_ = opt_column;
+ if (opt_script_break_point) {
+ this.script_break_point_ = opt_script_break_point;
+ } else {
+ this.number_ = next_break_point_number++;
+ }
+ this.hit_count_ = 0;
+ this.active_ = true;
+ this.condition_ = null;
+ this.ignoreCount_ = 0;
+};
+
+
+BreakPoint.prototype.number = function() {
+ return this.number_;
+};
+
+
+BreakPoint.prototype.func = function() {
+ return this.func_;
+};
+
+
+BreakPoint.prototype.source_position = function() {
+ return this.source_position_;
+};
+
+
+BreakPoint.prototype.hit_count = function() {
+ return this.hit_count_;
+};
+
+
+BreakPoint.prototype.active = function() {
+ if (this.script_break_point()) {
+ return this.script_break_point().active();
+ }
+ return this.active_;
+};
+
+
+BreakPoint.prototype.condition = function() {
+ if (this.script_break_point() && this.script_break_point().condition()) {
+ return this.script_break_point().condition();
+ }
+ return this.condition_;
+};
+
+
+BreakPoint.prototype.ignoreCount = function() {
+ return this.ignoreCount_;
+};
+
+
+BreakPoint.prototype.script_break_point = function() {
+ return this.script_break_point_;
+};
+
+
+BreakPoint.prototype.enable = function() {
+ this.active_ = true;
+};
+
+
+BreakPoint.prototype.disable = function() {
+ this.active_ = false;
+};
+
+
+BreakPoint.prototype.setCondition = function(condition) {
+ this.condition_ = condition;
+};
+
+
+BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
+ this.ignoreCount_ = ignoreCount;
+};
+
+
+BreakPoint.prototype.isTriggered = function(exec_state) {
+ // Break point not active - not triggered.
+ if (!this.active()) return false;
+
+ // Check for conditional break point.
+ if (this.condition()) {
+ // If break point has condition try to evaluate it in the top frame.
+ try {
+ var mirror = exec_state.GetFrame(0).evaluate(this.condition());
+ // If no sensible mirror or non true value break point not triggered.
+ if (!(mirror instanceof ValueMirror) || !%ToBoolean(mirror.value_)) {
+ return false;
+ }
+ } catch (e) {
+ // Exception evaluating condition counts as not triggered.
+ return false;
+ }
+ }
+
+ // Update the hit count.
+ this.hit_count_++;
+ if (this.script_break_point_) {
+ this.script_break_point_.hit_count_++;
+ }
+
+ // If the break point has an ignore count it is not triggered.
+ if (this.ignoreCount_ > 0) {
+ this.ignoreCount_--;
+ return false;
+ }
+
+ // Break point triggered.
+ return true;
+};
+
+
+// Function called from the runtime when a break point is hit. Returns true if
+// the break point is triggered and supposed to break execution.
+function IsBreakPointTriggered(break_id, break_point) {
+ return break_point.isTriggered(MakeExecutionState(break_id));
+};
+
+
+// Object representing a script break point. The script is referenced by its
+// script name and the break point is represented as line and column.
+function ScriptBreakPoint(script_name, opt_line, opt_column) {
+ this.script_name_ = script_name;
+ this.line_ = opt_line || 0;
+ this.column_ = opt_column;
+ this.hit_count_ = 0;
+ this.active_ = true;
+ this.condition_ = null;
+ this.ignoreCount_ = 0;
+};
+
+
+ScriptBreakPoint.prototype.number = function() {
+ return this.number_;
+};
+
+
+ScriptBreakPoint.prototype.script_name = function() {
+ return this.script_name_;
+};
+
+
+ScriptBreakPoint.prototype.line = function() {
+ return this.line_;
+};
+
+
+ScriptBreakPoint.prototype.column = function() {
+ return this.column_;
+};
+
+
+ScriptBreakPoint.prototype.hit_count = function() {
+ return this.hit_count_;
+};
+
+
+ScriptBreakPoint.prototype.active = function() {
+ return this.active_;
+};
+
+
+ScriptBreakPoint.prototype.condition = function() {
+ return this.condition_;
+};
+
+
+ScriptBreakPoint.prototype.ignoreCount = function() {
+ return this.ignoreCount_;
+};
+
+
+ScriptBreakPoint.prototype.enable = function() {
+ this.active_ = true;
+};
+
+
+ScriptBreakPoint.prototype.disable = function() {
+ this.active_ = false;
+};
+
+
+ScriptBreakPoint.prototype.setCondition = function(condition) {
+ this.condition_ = condition;
+};
+
+
+ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
+ this.ignoreCount_ = ignoreCount;
+
+ // Set ignore count on all break points created from this script break point.
+ for (var i = 0; i < break_points.length; i++) {
+ if (break_points[i].script_break_point() === this) {
+ break_points[i].setIgnoreCount(ignoreCount);
+ }
+ }
+};
+
+
+// Check whether a script matches this script break point. Currently this is
+// only based on script name.
+ScriptBreakPoint.prototype.matchesScript = function(script) {
+ return this.script_name_ == script.name &&
+ script.line_offset <= this.line_ &&
+ this.line_ < script.line_offset + script.lineCount();
+};
+
+
+// Set the script break point in a script.
+ScriptBreakPoint.prototype.set = function (script) {
+ var column = this.column();
+ var line = this.line();
+ // If the column is undefined the break is on the line. To help locate the
+ // first piece of breakable code on the line try to find the column on the
+ // line which contains some source.
+ if (IS_UNDEFINED(column)) {
+ var source_line = script.sourceLine(this.line());
+
+ // Allocate array for caching the columns where the actual source starts.
+ if (!script.sourceColumnStart_) {
+ script.sourceColumnStart_ = new Array(script.lineCount());
+ }
+
+ // Fill cache if needed and get column where the actual source starts.
+ if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
+ script.sourceColumnStart_[line] =
+ source_line.match(sourceLineBeginningSkip)[0].length;
+ }
+ column = script.sourceColumnStart_[line];
+ }
+
+ // Convert the line and column into an absolute position within the script.
+ var pos = Debug.findScriptSourcePosition(script, this.line(), column);
+
+ // Create a break point object and set the break point.
+ break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
+ break_point.setIgnoreCount(this.ignoreCount());
+ %SetScriptBreakPoint(script, pos, break_point);
+
+ return break_point;
+};
+
+
+// Clear all the break points created from this script break point
+ScriptBreakPoint.prototype.clear = function () {
+ var remaining_break_points = [];
+ for (var i = 0; i < break_points.length; i++) {
+ if (break_points[i].script_break_point() &&
+ break_points[i].script_break_point() === this) {
+ %ClearBreakPoint(break_points[i]);
+ } else {
+ remaining_break_points.push(break_points[i]);
+ }
+ }
+ break_points = remaining_break_points;
+};
+
+
+// Function called from runtime when a new script is compiled to set any script
+// break points set in this script.
+function UpdateScriptBreakPoints(script) {
+ for (var i = 0; i < script_break_points.length; i++) {
+ if (script_break_points[i].script_name() == script.name) {
+ script_break_points[i].set(script);
+ }
+ }
+};
+
+
+// Function called from the runtime to handle a debug request receiced from the
+// debugger. When this function is called the debugger is in the broken state
+// reflected by the exec_state parameter. When pending requests are handled the
+// parameter stopping indicate the expected running state.
+function ProcessDebugRequest(exec_state, request, stopping) {
+ return exec_state.debugCommandProcessor().processDebugJSONRequest(request, stopping);
+}
+
+
+// Helper function to check whether the JSON request is a plain break request.
+// This is used form the runtime handling of pending debug requests. If one of
+// the pending requests is a plain break execution should be broken after
+// processing the pending break requests.
+function IsPlainBreakRequest(json_request) {
+ try {
+ // Convert the JSON string to an object.
+ request = %CompileString('(' + json_request + ')', false)();
+
+ // Check for break command without arguments.
+ return request.command && request.command == "break" && !request.arguments;
+ } catch (e) {
+ // If there is a exception parsing the JSON request just return false.
+ return false;
+ }
+}
+
+
+Debug.addListener = function(listener, opt_data) {
+ if (!IS_FUNCTION(listener)) throw new Error('Parameters have wrong types.');
+ %AddDebugEventListener(listener, opt_data);
+};
+
+Debug.removeListener = function(listener) {
+ if (!IS_FUNCTION(listener)) throw new Error('Parameters have wrong types.');
+ %RemoveDebugEventListener(listener);
+};
+
+Debug.Break = function(f) {
+ %Break(0);
+};
+
+Debug.breakLocations = function(f) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ return %GetBreakLocations(f);
+};
+
+// Returns a Script object. If the parameter is a function the return value
+// is the script in which the function is defined. If the parameter is a string
+// the return value is the script for which the script name has that string
+// value.
+Debug.findScript = function(func_or_script_name) {
+ if (IS_FUNCTION(func_or_script_name)) {
+ return %FunctionGetScript(func_or_script_name);
+ } else {
+ return %GetScript(func_or_script_name);
+ }
+};
+
+// Returns the script source. If the parameter is a function the return value
+// is the script source for the script in which the function is defined. If the
+// parameter is a string the return value is the script for which the script
+// name has that string value.
+Debug.scriptSource = function(func_or_script_name) {
+ return this.findScript(func_or_script_name).source;
+};
+
+Debug.source = function(f) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ return %FunctionGetSourceCode(f);
+};
+
+Debug.assembler = function(f) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ return %FunctionGetAssemblerCode(f);
+};
+
+Debug.sourcePosition = function(f) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ return %FunctionGetScriptSourcePosition(f);
+};
+
+Debug.findFunctionSourcePosition = function(func, opt_line, opt_column) {
+ var script = %FunctionGetScript(func);
+ var script_offset = %FunctionGetScriptSourcePosition(func);
+ return script.locationFromLine(opt_line, opt_column, script_offset).position;
+}
+
+
+// Returns the character position in a script based on a line number and an
+// optional position within that line.
+Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
+ return script.locationFromLine(opt_line, opt_column).position;
+}
+
+
+Debug.findBreakPoint = function(break_point_number, remove) {
+ var break_point;
+ for (var i = 0; i < break_points.length; i++) {
+ if (break_points[i].number() == break_point_number) {
+ break_point = break_points[i];
+ // Remove the break point from the list if requested.
+ if (remove) {
+ break_points.splice(i, 1);
+ }
+ break;
+ }
+ }
+ if (break_point) {
+ return break_point;
+ } else {
+ return this.findScriptBreakPoint(break_point_number, remove);
+ }
+};
+
+
+Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
+ if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
+ var source_position = this.findFunctionSourcePosition(func, opt_line, opt_column) -
+ this.sourcePosition(func);
+ // Find the script for the function.
+ var script = %FunctionGetScript(func);
+ // If the script for the function has a name convert this to a script break
+ // point.
+ if (script && script.name) {
+ // Adjust the source position to be script relative.
+ source_position += %FunctionGetScriptSourcePosition(func);
+ // Find line and column for the position in the script and set a script
+ // break point from that.
+ var location = script.locationFromPosition(source_position);
+ return this.setScriptBreakPoint(script.name,
+ location.line, location.column,
+ opt_condition);
+ } else {
+ // Set a break point directly on the function.
+ var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
+ %SetFunctionBreakPoint(func, source_position, break_point);
+ break_point.setCondition(opt_condition);
+ return break_point.number();
+ }
+};
+
+
+Debug.enableBreakPoint = function(break_point_number) {
+ var break_point = this.findBreakPoint(break_point_number, false);
+ break_point.enable();
+};
+
+
+Debug.disableBreakPoint = function(break_point_number) {
+ var break_point = this.findBreakPoint(break_point_number, false);
+ break_point.disable();
+};
+
+
+Debug.changeBreakPointCondition = function(break_point_number, condition) {
+ var break_point = this.findBreakPoint(break_point_number, false);
+ break_point.setCondition(condition);
+};
+
+
+Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+ if (ignoreCount < 0) {
+ throw new Error('Invalid argument');
+ }
+ var break_point = this.findBreakPoint(break_point_number, false);
+ break_point.setIgnoreCount(ignoreCount);
+};
+
+
+Debug.clearBreakPoint = function(break_point_number) {
+ var break_point = this.findBreakPoint(break_point_number, true);
+ if (break_point) {
+ return %ClearBreakPoint(break_point);
+ } else {
+ break_point = this.findScriptBreakPoint(break_point_number, true);
+ if (!break_point) {
+ throw new Error('Invalid breakpoint');
+ }
+ }
+};
+
+
+Debug.clearAllBreakPoints = function() {
+ for (var i = 0; i < break_points.length; i++) {
+ break_point = break_points[i];
+ %ClearBreakPoint(break_point);
+ }
+ break_points = [];
+};
+
+
+Debug.findScriptBreakPoint = function(break_point_number, remove) {
+ var script_break_point;
+ for (var i = 0; i < script_break_points.length; i++) {
+ if (script_break_points[i].number() == break_point_number) {
+ script_break_point = script_break_points[i];
+ // Remove the break point from the list if requested.
+ if (remove) {
+ script_break_point.clear();
+ script_break_points.splice(i,1);
+ }
+ break;
+ }
+ }
+ return script_break_point;
+}
+
+
+// Sets a breakpoint in a script identified through script name at the
+// specified source line and column within that line.
+Debug.setScriptBreakPoint = function(script_name, opt_line, opt_column, opt_condition) {
+ // Create script break point object.
+ var script_break_point = new ScriptBreakPoint(script_name, opt_line, opt_column);
+
+ // Assign number to the new script break point and add it.
+ script_break_point.number_ = next_break_point_number++;
+ script_break_point.setCondition(opt_condition);
+ script_break_points.push(script_break_point);
+
+ // Run through all scripts to see it this script break point matches any
+ // loaded scripts.
+ var scripts = this.scripts();
+ for (var i = 0; i < scripts.length; i++) {
+ if (script_break_point.matchesScript(scripts[i])) {
+ script_break_point.set(scripts[i]);
+ }
+ }
+
+ return script_break_point.number();
+}
+
+
+Debug.enableScriptBreakPoint = function(break_point_number) {
+ var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+ script_break_point.enable();
+};
+
+
+Debug.disableScriptBreakPoint = function(break_point_number) {
+ var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+ script_break_point.disable();
+};
+
+
+Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
+ var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+ script_break_point.setCondition(condition);
+};
+
+
+Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+ if (ignoreCount < 0) {
+ throw new Error('Invalid argument');
+ }
+ var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+ script_break_point.setIgnoreCount(ignoreCount);
+};
+
+
+Debug.scriptBreakPoints = function() {
+ return script_break_points;
+}
+
+
+Debug.clearStepping = function() {
+ %ClearStepping(0);
+}
+
+Debug.setBreakOnException = function() {
+ return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
+};
+
+Debug.clearBreakOnException = function() {
+ return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
+};
+
+Debug.setBreakOnUncaughtException = function() {
+ return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true);
+};
+
+Debug.clearBreakOnUncaughtException = function() {
+ return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
+};
+
+Debug.showBreakPoints = function(f, full) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ var source = full ? this.scriptSource(f) : this.source(f);
+ var offset = full ? this.sourcePosition(f) : 0;
+ var locations = this.breakLocations(f);
+ if (!locations) return source;
+ locations.sort(function(x, y) { return x - y; });
+ var result = "";
+ var prev_pos = 0;
+ var pos;
+ for (var i = 0; i < locations.length; i++) {
+ pos = locations[i] - offset;
+ result += source.slice(prev_pos, pos);
+ result += "[B" + i + "]";
+ prev_pos = pos;
+ }
+ pos = source.length;
+ result += source.substring(prev_pos, pos);
+ return result;
+};
+
+
+// Get all the scripts currently loaded. Locating all the scripts is based on
+// scanning the heap.
+Debug.scripts = function() {
+ // Collect all scripts in the heap.
+ return %DebugGetLoadedScripts(0);
+}
+
+function MakeExecutionState(break_id) {
+ return new ExecutionState(break_id);
+};
+
+function ExecutionState(break_id) {
+ this.break_id = break_id;
+ this.selected_frame = 0;
+};
+
+ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
+ var action = Debug.StepAction.StepIn;
+ if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
+ var count = opt_count ? %ToNumber(opt_count) : 1;
+
+ return %PrepareStep(this.break_id, action, count);
+}
+
+ExecutionState.prototype.evaluateGlobal = function(source) {
+ var result = %DebugEvaluateGlobal(this.break_id, source);
+ return result;
+};
+
+ExecutionState.prototype.GetFrameCount = function() {
+ return %GetFrameCount(this.break_id);
+};
+
+ExecutionState.prototype.GetFrame = function(opt_index) {
+ // If no index supplied return the selected frame.
+ if (opt_index == null) opt_index = this.selected_frame;
+ return new FrameMirror(this.break_id, opt_index);
+};
+
+ExecutionState.prototype.cframesValue = function(opt_from_index, opt_to_index) {
+ return %GetCFrames(this.break_id);
+};
+
+ExecutionState.prototype.setSelectedFrame = function(index) {
+ var i = %ToNumber(index);
+ if (i < 0 || i >= this.GetFrameCount()) throw new Error('Illegal frame index.');
+ this.selected_frame = i;
+};
+
+ExecutionState.prototype.getSelectedFrame = function() {
+ return this.selected_frame;
+};
+
+ExecutionState.prototype.debugCommandProcessor = function(protocol) {
+ return new DebugCommandProcessor(this, protocol);
+};
+
+
+function MakeBreakEvent(exec_state, break_points_hit) {
+ return new BreakEvent(exec_state, break_points_hit);
+};
+
+
+function BreakEvent(exec_state, break_points_hit) {
+ this.exec_state_ = exec_state;
+ this.break_points_hit_ = break_points_hit;
+};
+
+
+BreakEvent.prototype.func = function() {
+ return this.exec_state_.GetFrame(0).func();
+};
+
+
+BreakEvent.prototype.sourceLine = function() {
+ return this.exec_state_.GetFrame(0).sourceLine();
+};
+
+
+BreakEvent.prototype.sourceColumn = function() {
+ return this.exec_state_.GetFrame(0).sourceColumn();
+};
+
+
+BreakEvent.prototype.sourceLineText = function() {
+ return this.exec_state_.GetFrame(0).sourceLineText();
+};
+
+
+BreakEvent.prototype.breakPointsHit = function() {
+ return this.break_points_hit_;
+};
+
+
+BreakEvent.prototype.details = function() {
+ // Build the break details.
+ var details = '';
+ if (this.breakPointsHit()) {
+ details += 'breakpoint';
+ if (this.breakPointsHit().length > 1) {
+ details += 's';
+ }
+ details += ' ';
+ for (var i = 0; i < this.breakPointsHit().length; i++) {
+ if (i > 0) {
+ details += ',';
+ }
+ details += this.breakPointsHit()[i].number();
+ }
+ } else {
+ details += 'break';
+ }
+ details += ' in ';
+ details += this.exec_state_.GetFrame(0).invocationText();
+ details += ' at ';
+ details += this.exec_state_.GetFrame(0).sourceAndPositionText();
+ details += '\n'
+ if (this.func().script()) {
+ details += FrameSourceUnderline(this.exec_state_.GetFrame(0));
+ }
+ return details;
+};
+
+
+BreakEvent.prototype.debugPrompt = function() {
+ // Build the debug break prompt.
+ if (this.breakPointsHit()) {
+ return 'breakpoint';
+ } else {
+ return 'break';
+ }
+};
+
+
+BreakEvent.prototype.toJSONProtocol = function() {
+ var o = { seq: next_response_seq++,
+ type: "event",
+ event: "break",
+ body: { invocationText: this.exec_state_.GetFrame(0).invocationText(),
+ }
+ }
+
+ // Add script related information to the event if available.
+ var script = this.func().script();
+ if (script) {
+ o.body.sourceLine = this.sourceLine(),
+ o.body.sourceColumn = this.sourceColumn(),
+ o.body.sourceLineText = this.sourceLineText(),
+ o.body.script = { name: script.name(),
+ lineOffset: script.lineOffset(),
+ columnOffset: script.columnOffset(),
+ lineCount: script.lineCount()
+ };
+ }
+
+ // Add an Array of break points hit if any.
+ if (this.breakPointsHit()) {
+ o.body.breakpoints = [];
+ for (var i = 0; i < this.breakPointsHit().length; i++) {
+ // Find the break point number. For break points originating from a
+ // script break point supply the script break point number.
+ var breakpoint = this.breakPointsHit()[i];
+ var script_break_point = breakpoint.script_break_point();
+ var number;
+ if (script_break_point) {
+ number = script_break_point.number();
+ } else {
+ number = breakpoint.number();
+ }
+ o.body.breakpoints.push(number);
+ }
+ }
+
+ return SimpleObjectToJSON_(o);
+};
+
+
+function MakeExceptionEvent(exec_state, exception, uncaught) {
+ return new ExceptionEvent(exec_state, exception, uncaught);
+};
+
+function ExceptionEvent(exec_state, exception, uncaught) {
+ this.exec_state_ = exec_state;
+ this.exception_ = exception;
+ this.uncaught_ = uncaught;
+};
+
+ExceptionEvent.prototype.uncaught = function() {
+ return this.uncaught_;
+}
+
+ExceptionEvent.prototype.func = function() {
+ return this.exec_state_.GetFrame(0).func();
+};
+
+
+ExceptionEvent.prototype.sourceLine = function() {
+ return this.exec_state_.GetFrame(0).sourceLine();
+};
+
+
+ExceptionEvent.prototype.sourceColumn = function() {
+ return this.exec_state_.GetFrame(0).sourceColumn();
+};
+
+
+ExceptionEvent.prototype.sourceLineText = function() {
+ return this.exec_state_.GetFrame(0).sourceLineText();
+};
+
+
+ExceptionEvent.prototype.details = function() {
+ var details = "";
+ if (this.uncaught_) {
+ details += "Uncaught: ";
+ } else {
+ details += "Exception: ";
+ }
+
+ details += '"';
+ details += MakeMirror(this.exception_).toText();
+ details += '" at ';
+ details += this.exec_state_.GetFrame(0).sourceAndPositionText();
+ details += '\n';
+ details += FrameSourceUnderline(this.exec_state_.GetFrame(0));
+
+ return details;
+};
+
+ExceptionEvent.prototype.debugPrompt = function() {
+ if (this.uncaught_) {
+ return "uncaught exception";
+ } else {
+ return "exception";
+ }
+};
+
+ExceptionEvent.prototype.toJSONProtocol = function() {
+ var o = { seq: next_response_seq++,
+ type: "event",
+ event: "exception",
+ body: { uncaught: this.uncaught_,
+ exception: MakeMirror(this.exception_),
+ sourceLine: this.sourceLine(),
+ sourceColumn: this.sourceColumn(),
+ sourceLineText: this.sourceLineText(),
+ }
+ }
+
+ // Add script information to the event if available.
+ var script = this.func().script();
+ if (script) {
+ o.body.script = { name: script.name(),
+ lineOffset: script.lineOffset(),
+ columnOffset: script.columnOffset(),
+ lineCount: script.lineCount()
+ };
+ }
+
+ return SimpleObjectToJSON_(o);
+};
+
+function MakeCompileEvent(script_source, script_name, script_function) {
+ return new CompileEvent(script_source, script_name, script_function);
+};
+
+function CompileEvent(script_source, script_name, script_function) {
+ this.scriptSource = script_source;
+ this.scriptName = script_name;
+ this.scriptFunction = script_function;
+};
+
+CompileEvent.prototype.details = function() {
+ var result = "";
+ result = "Script added"
+ if (this.scriptData) {
+ result += ": '";
+ result += this.scriptData;
+ result += "'";
+ }
+ return result;
+};
+
+CompileEvent.prototype.debugPrompt = function() {
+ var result = "source"
+ if (this.scriptData) {
+ result += " '";
+ result += this.scriptData;
+ result += "'";
+ }
+ if (this.func) {
+ result += " added";
+ } else {
+ result += " compiled";
+ }
+ return result;
+};
+
+function MakeNewFunctionEvent(func) {
+ return new NewFunctionEvent(func);
+};
+
+function NewFunctionEvent(func) {
+ this.func = func;
+};
+
+NewFunctionEvent.prototype.details = function() {
+ var result = "";
+ result = "Function added: ";
+ result += this.func.name;
+ return result;
+};
+
+NewFunctionEvent.prototype.debugPrompt = function() {
+ var result = "function";
+ if (this.func.name) {
+ result += " '";
+ result += this.func.name;
+ result += "'";
+ }
+ result += " added";
+ return result;
+};
+
+NewFunctionEvent.prototype.name = function() {
+ return this.func.name;
+};
+
+NewFunctionEvent.prototype.setBreakPoint = function(p) {
+ Debug.setBreakPoint(this.func, p || 0);
+};
+
+function DebugCommandProcessor(exec_state) {
+ this.exec_state_ = exec_state;
+};
+
+
+// Convenience function for C debugger code to process a text command. This
+// function converts the text command to a JSON request, performs the request
+// and converts the request to a text result for display. The result is an
+// object containing the text result and the intermediate results.
+DebugCommandProcessor.prototype.processDebugCommand = function (command) {
+ var request;
+ var response;
+ var text_result;
+ var running;
+
+ request = this.commandToJSONRequest(command);
+ response = this.processDebugJSONRequest(request);
+ text_result = this.responseToText(response);
+ running = this.isRunning(response);
+
+ return { "request" : request,
+ "response" : response,
+ "text_result" : text_result,
+ "running" : running };
+}
+
+
+// Converts a text command to a JSON request.
+DebugCommandProcessor.prototype.commandToJSONRequest = function(cmd_line) {
+ // If the wery first character is a { assume that a JSON request have been
+ // entered as a command. Converting that to a JSON request is trivial.
+ if (cmd_line && cmd_line.length > 0 && cmd_line.charAt(0) == '{') {
+ return cmd_line;
+ }
+
+ // Trim string for leading and trailing whitespace.
+ cmd_line = cmd_line.replace(/^\s+|\s+$/g, "");
+
+ // Find the command.
+ var pos = cmd_line.indexOf(" ");
+ var cmd;
+ var args;
+ if (pos == -1) {
+ cmd = cmd_line;
+ args = "";
+ } else {
+ cmd = cmd_line.slice(0, pos);
+ args = cmd_line.slice(pos).replace(/^\s+|\s+$/g, "");
+ }
+
+ // Switch on command.
+ if (cmd == 'continue' || cmd == 'c') {
+ return this.continueCommandToJSONRequest_(args);
+ } else if (cmd == 'step' || cmd == 's') {
+ return this.stepCommandToJSONRequest_(args);
+ } else if (cmd == 'backtrace' || cmd == 'bt') {
+ return this.backtraceCommandToJSONRequest_(args);
+ } else if (cmd == 'frame' || cmd == 'f') {
+ return this.frameCommandToJSONRequest_(args);
+ } else if (cmd == 'print' || cmd == 'p') {
+ return this.printCommandToJSONRequest_(args);
+ } else if (cmd == 'source') {
+ return this.sourceCommandToJSONRequest_(args);
+ } else if (cmd == 'scripts') {
+ return this.scriptsCommandToJSONRequest_(args);
+ } else if (cmd[0] == '{') {
+ return cmd_line;
+ } else {
+ throw new Error('Unknown command "' + cmd + '"');
+ }
+};
+
+
+// Create a JSON request for the continue command.
+DebugCommandProcessor.prototype.continueCommandToJSONRequest_ = function(args) {
+ var request = this.createRequest('continue');
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the step command.
+DebugCommandProcessor.prototype.stepCommandToJSONRequest_ = function(args) {
+ // Requesting a step is through the continue command with additional
+ // arguments.
+ var request = this.createRequest('continue');
+ request.arguments = {};
+
+ // Process arguments if any.
+ if (args && args.length > 0) {
+ args = args.split(/\s*[ ]+\s*/g);
+
+ if (args.length > 2) {
+ throw new Error('Invalid step arguments.');
+ }
+
+ if (args.length > 0) {
+ // Get step count argument if any.
+ if (args.length == 2) {
+ request.arguments.stepcount = %ToNumber(args[1]);
+ }
+
+ // Get the step action.
+ if (args[0] == 'in' || args[0] == 'i') {
+ request.arguments.stepaction = 'in';
+ } else if (args[0] == 'min' || args[0] == 'm') {
+ request.arguments.stepaction = 'min';
+ } else if (args[0] == 'next' || args[0] == 'n') {
+ request.arguments.stepaction = 'next';
+ } else if (args[0] == 'out' || args[0] == 'o') {
+ request.arguments.stepaction = 'out';
+ } else {
+ throw new Error('Invalid step argument "' + args[0] + '".');
+ }
+ }
+ } else {
+ // Default is step next.
+ request.arguments.stepaction = 'next';
+ }
+
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the backtrace command.
+DebugCommandProcessor.prototype.backtraceCommandToJSONRequest_ = function(args) {
+ // Build a backtrace request from the text command.
+ var request = this.createRequest('backtrace');
+ args = args.split(/\s*[ ]+\s*/g);
+ if (args.length == 2) {
+ request.arguments = {};
+ request.arguments.fromFrame = %ToNumber(args[0]);
+ request.arguments.toFrame = %ToNumber(args[1]) + 1;
+ }
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the frame command.
+DebugCommandProcessor.prototype.frameCommandToJSONRequest_ = function(args) {
+ // Build a frame request from the text command.
+ var request = this.createRequest('frame');
+ args = args.split(/\s*[ ]+\s*/g);
+ if (args.length > 0 && args[0].length > 0) {
+ request.arguments = {};
+ request.arguments.number = args[0];
+ }
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the print command.
+DebugCommandProcessor.prototype.printCommandToJSONRequest_ = function(args) {
+ // Build a evaluate request from the text command.
+ var request = this.createRequest('evaluate');
+ if (args.length == 0) {
+ throw new Error('Missing expression.');
+ }
+
+ request.arguments = {};
+ request.arguments.expression = args;
+
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the source command.
+DebugCommandProcessor.prototype.sourceCommandToJSONRequest_ = function(args) {
+ // Build a evaluate request from the text command.
+ var request = this.createRequest('source');
+
+ // Default is one line before and two lines after current location.
+ var before = 1;
+ var after = 2;
+
+ // Parse the arguments.
+ args = args.split(/\s*[ ]+\s*/g);
+ if (args.length > 1 && args[0].length > 0 && args[1].length > 0) {
+ before = %ToNumber(args[0]);
+ after = %ToNumber(args[1]);
+ } else if (args.length > 0 && args[0].length > 0) {
+ after = %ToNumber(args[0]);
+ }
+
+ // Request source arround current source location.
+ request.arguments = {};
+ request.arguments.fromLine = this.exec_state_.GetFrame().sourceLine() - before;
+ if (request.arguments.fromLine < 0) {
+ request.arguments.fromLine = 0
+ }
+ request.arguments.toLine = this.exec_state_.GetFrame().sourceLine() + after + 1;
+
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the scripts command.
+DebugCommandProcessor.prototype.scriptsCommandToJSONRequest_ = function(args) {
+ // Build a evaluate request from the text command.
+ var request = this.createRequest('scripts');
+
+ // Process arguments if any.
+ if (args && args.length > 0) {
+ args = args.split(/\s*[ ]+\s*/g);
+
+ if (args.length > 1) {
+ throw new Error('Invalid scripts arguments.');
+ }
+
+ request.arguments = {};
+ if (args[0] == 'natives') {
+ request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
+ } else if (args[0] == 'extensions') {
+ request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
+ } else if (args[0] == 'all') {
+ request.arguments.types =
+ ScriptTypeFlag(Debug.ScriptType.Normal) |
+ ScriptTypeFlag(Debug.ScriptType.Native) |
+ ScriptTypeFlag(Debug.ScriptType.Extension);
+ } else {
+ throw new Error('Invalid argument "' + args[0] + '".');
+ }
+ }
+
+ return request.toJSONProtocol();
+};
+
+
+// Convert a JSON response to text for display in a text based debugger.
+DebugCommandProcessor.prototype.responseToText = function(json_response) {
+ try {
+ // Convert the JSON string to an object.
+ response = %CompileString('(' + json_response + ')', false)();
+
+ if (!response.success) {
+ return response.message;
+ }
+
+ if (response.command == 'backtrace') {
+ var body = response.body;
+ var result = 'Frames #' + body.fromFrame + ' to #' +
+ (body.toFrame - 1) + ' of ' + body.totalFrames + '\n';
+ for (i = 0; i < body.frames.length; i++) {
+ if (i != 0) result += '\n';
+ result += body.frames[i].text;
+ }
+ return result;
+ } else if (response.command == 'frame') {
+ return SourceUnderline(response.body.sourceLineText,
+ response.body.column);
+ } else if (response.command == 'evaluate') {
+ return response.body.text;
+ } else if (response.command == 'source') {
+ // Get the source from the response.
+ var source = response.body.source;
+
+ // Get rid of last line terminator.
+ var remove_count = 0;
+ if (source[source.length - 1] == '\n') remove_count++;
+ if (source[source.length - 2] == '\r') remove_count++;
+ if (remove_count > 0) source = source.substring(0, source.length - remove_count);
+
+ return source;
+ } else if (response.command == 'scripts') {
+ var result = '';
+ for (i = 0; i < response.body.length; i++) {
+ if (i != 0) result += '\n';
+ if (response.body[i].name) {
+ result += response.body[i].name;
+ } else {
+ result += '[unnamed] ';
+ var sourceStart = response.body[i].sourceStart;
+ if (sourceStart.length > 40) {
+ sourceStart = sourceStart.substring(0, 37) + '...';
+ }
+ result += sourceStart;
+ }
+ result += ' (lines: ';
+ result += response.body[i].sourceLines;
+ result += ', length: ';
+ result += response.body[i].sourceLength;
+ if (response.body[i].type == Debug.ScriptType.Native) {
+ result += ', native';
+ } else if (response.body[i].type == Debug.ScriptType.Extension) {
+ result += ', extension';
+ }
+ result += ')';
+ }
+ return result;
+ }
+ } catch (e) {
+ return 'Error: "' + %ToString(e) + '" formatting response';
+ }
+};
+
+
+function SourceUnderline(source_text, position) {
+ if (IS_UNDEFINED(source_text)) {
+ return;
+ }
+
+ // Create an underline with a caret pointing to the source position. If the
+ // source contains a tab character the underline will have a tab character in
+ // the same place otherwise the underline will have a space character.
+ var underline = '';
+ for (var i = 0; i < position; i++) {
+ if (source_text[i] == '\t') {
+ underline += '\t';
+ } else {
+ underline += ' ';
+ }
+ }
+ underline += '^';
+
+ // Return the source line text with the underline beneath.
+ return source_text + '\n' + underline;
+};
+
+
+function FrameSourceUnderline(frame) {
+ var location = frame.sourceLocation();
+ if (location) {
+ return SourceUnderline(location.sourceText(), location.position - location.start);
+ }
+};
+
+
+function RequestPacket(command) {
+ this.seq = 0;
+ this.type = 'request';
+ this.command = command;
+};
+
+
+RequestPacket.prototype.toJSONProtocol = function() {
+ // Encode the protocol header.
+ var json = '{';
+ json += '"seq":' + this.seq;
+ json += ',"type":"' + this.type + '"';
+ if (this.command) {
+ json += ',"command":' + StringToJSON_(this.command);
+ }
+ if (this.arguments) {
+ json += ',"arguments":';
+ // Encode the arguments part.
+ if (this.arguments.toJSONProtocol) {
+ json += this.arguments.toJSONProtocol()
+ } else {
+ json += SimpleObjectToJSON_(this.arguments);
+ }
+ }
+ json += '}';
+ return json;
+}
+
+
+DebugCommandProcessor.prototype.createRequest = function(command) {
+ return new RequestPacket(command);
+};
+
+
+function ResponsePacket(request) {
+ // Build the initial response from the request.
+ this.seq = next_response_seq++;
+ this.type = 'response';
+ if (request) this.request_seq = request.seq;
+ if (request) this.command = request.command;
+ this.success = true;
+ this.running = false;
+};
+
+
+ResponsePacket.prototype.failed = function(message) {
+ this.success = false;
+ this.message = message;
+}
+
+
+ResponsePacket.prototype.toJSONProtocol = function() {
+ // Encode the protocol header.
+ var json = '{';
+ json += '"seq":' + this.seq;
+ if (this.request_seq) {
+ json += ',"request_seq":' + this.request_seq;
+ }
+ json += ',"type":"' + this.type + '"';
+ if (this.command) {
+ json += ',"command":' + StringToJSON_(this.command);
+ }
+ if (this.success) {
+ json += ',"success":' + this.success;
+ } else {
+ json += ',"success":false';
+ }
+ if (this.body) {
+ json += ',"body":';
+ // Encode the body part.
+ if (this.body.toJSONProtocol) {
+ json += this.body.toJSONProtocol(true);
+ } else if (this.body instanceof Array) {
+ json += '[';
+ for (var i = 0; i < this.body.length; i++) {
+ if (i != 0) json += ',';
+ if (this.body[i].toJSONProtocol) {
+ json += this.body[i].toJSONProtocol(true)
+ } else {
+ json += SimpleObjectToJSON_(this.body[i]);
+ }
+ }
+ json += ']';
+ } else {
+ json += SimpleObjectToJSON_(this.body);
+ }
+ }
+ if (this.message) {
+ json += ',"message":' + StringToJSON_(this.message) ;
+ }
+ if (this.running) {
+ json += ',"running":true';
+ } else {
+ json += ',"running":false';
+ }
+ json += '}';
+ return json;
+}
+
+
+DebugCommandProcessor.prototype.createResponse = function(request) {
+ return new ResponsePacket(request);
+};
+
+
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request, stopping) {
+ var request; // Current request.
+ var response; // Generated response.
+ try {
+ try {
+ // Convert the JSON string to an object.
+ request = %CompileString('(' + json_request + ')', false)();
+
+ // Create an initial response.
+ response = this.createResponse(request);
+
+ if (!request.type) {
+ throw new Error('Type not specified');
+ }
+
+ if (request.type != 'request') {
+ throw new Error("Illegal type '" + request.type + "' in request");
+ }
+
+ if (!request.command) {
+ throw new Error('Command not specified');
+ }
+
+ if (request.command == 'continue') {
+ this.continueRequest_(request, response);
+ } else if (request.command == 'break') {
+ this.breakRequest_(request, response);
+ } else if (request.command == 'setbreakpoint') {
+ this.setBreakPointRequest_(request, response);
+ } else if (request.command == 'changebreakpoint') {
+ this.changeBreakPointRequest_(request, response);
+ } else if (request.command == 'clearbreakpoint') {
+ this.clearBreakPointRequest_(request, response);
+ } else if (request.command == 'backtrace') {
+ this.backtraceRequest_(request, response);
+ } else if (request.command == 'frame') {
+ this.frameRequest_(request, response);
+ } else if (request.command == 'evaluate') {
+ this.evaluateRequest_(request, response);
+ } else if (request.command == 'source') {
+ this.sourceRequest_(request, response);
+ } else if (request.command == 'scripts') {
+ this.scriptsRequest_(request, response);
+ } else {
+ throw new Error('Unknown command "' + request.command + '" in request');
+ }
+ } catch (e) {
+ // If there is no response object created one (without command).
+ if (!response) {
+ response = this.createResponse();
+ }
+ response.success = false;
+ response.message = %ToString(e);
+ }
+
+ // Return the response as a JSON encoded string.
+ try {
+ // Set the running state to what indicated.
+ if (!IS_UNDEFINED(stopping)) {
+ response.running = !stopping;
+ }
+ return response.toJSONProtocol();
+ } catch (e) {
+ // Failed to generate response - return generic error.
+ return '{"seq":' + response.seq + ',' +
+ '"request_seq":' + request.seq + ',' +
+ '"type":"response",' +
+ '"success":false,' +
+ '"message":"Internal error: ' + %ToString(e) + '"}';
+ }
+ } catch (e) {
+ // Failed in one of the catch blocks above - most generic error.
+ return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
+ }
+};
+
+
+DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
+ // Check for arguments for continue.
+ if (request.arguments) {
+ var count = 1;
+ var action = Debug.StepAction.StepIn;
+
+ // Pull out arguments.
+ var stepaction = request.arguments.stepaction;
+ var stepcount = request.arguments.stepcount;
+
+ // Get the stepcount argument if any.
+ if (stepcount) {
+ count = %ToNumber(stepcount);
+ if (count < 0) {
+ throw new Error('Invalid stepcount argument "' + stepcount + '".');
+ }
+ }
+
+ // Get the stepaction argument.
+ if (stepaction) {
+ if (stepaction == 'in') {
+ action = Debug.StepAction.StepIn;
+ } else if (stepaction == 'min') {
+ action = Debug.StepAction.StepMin;
+ } else if (stepaction == 'next') {
+ action = Debug.StepAction.StepNext;
+ } else if (stepaction == 'out') {
+ action = Debug.StepAction.StepOut;
+ } else {
+ throw new Error('Invalid stepaction argument "' + stepaction + '".');
+ }
+ }
+
+ // Setup the VM for stepping.
+ this.exec_state_.prepareStep(action, count);
+ }
+
+ // VM should be running after executing this request.
+ response.running = true;
+};
+
+
+DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
+ // Ignore as break command does not do anything when broken.
+};
+
+
+DebugCommandProcessor.prototype.setBreakPointRequest_ =
+ function(request, response) {
+ // Check for legal request.
+ if (!request.arguments) {
+ response.failed('Missing arguments');
+ return;
+ }
+
+ // Pull out arguments.
+ var type = request.arguments.type;
+ var target = request.arguments.target;
+ var line = request.arguments.line;
+ var column = request.arguments.column;
+ var enabled = IS_UNDEFINED(request.arguments.enabled) ?
+ true : request.arguments.enabled;
+ var condition = request.arguments.condition;
+ var ignoreCount = request.arguments.ignoreCount;
+
+ // Check for legal arguments.
+ if (!type || !target) {
+ response.failed('Missing argument "type" or "target"');
+ return;
+ }
+ if (type != 'function' && type != 'script') {
+ response.failed('Illegal type "' + type + '"');
+ return;
+ }
+
+ // Either function or script break point.
+ var break_point_number;
+ if (type == 'function') {
+ // Handle function break point.
+ if (!IS_STRING(target)) {
+ response.failed('Argument "target" is not a string value');
+ return;
+ }
+ var f;
+ try {
+ // Find the function through a global evaluate.
+ f = this.exec_state_.evaluateGlobal(target);
+ } catch (e) {
+ response.failed('Error: "' + %ToString(e) +
+ '" evaluating "' + target + '"');
+ return;
+ }
+ if (!IS_FUNCTION(f)) {
+ response.failed('"' + target + '" does not evaluate to a function');
+ return;
+ }
+
+ // Set function break point.
+ break_point_number = Debug.setBreakPoint(f, line, column, condition);
+ } else {
+ // set script break point.
+ break_point_number = Debug.setScriptBreakPoint(target,
+ line, column,
+ condition);
+ }
+
+ // Set additional break point properties.
+ var break_point = Debug.findBreakPoint(break_point_number);
+ if (ignoreCount) {
+ Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
+ }
+ if (!enabled) {
+ Debug.disableBreakPoint(break_point_number);
+ }
+
+ // Add the break point number to the response.
+ response.body = { type: type,
+ breakpoint: break_point_number }
+
+ // Add break point information to the response.
+ if (break_point instanceof ScriptBreakPoint) {
+ response.body.type = 'script';
+ response.body.script_name = break_point.script_name();
+ response.body.line = break_point.line();
+ response.body.column = break_point.column();
+ } else {
+ response.body.type = 'function';
+ }
+};
+
+
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
+ // Check for legal request.
+ if (!request.arguments) {
+ response.failed('Missing arguments');
+ return;
+ }
+
+ // Pull out arguments.
+ var break_point = %ToNumber(request.arguments.breakpoint);
+ var enabled = request.arguments.enabled;
+ var condition = request.arguments.condition;
+ var ignoreCount = request.arguments.ignoreCount;
+
+ // Check for legal arguments.
+ if (!break_point) {
+ response.failed('Missing argument "breakpoint"');
+ return;
+ }
+
+ // Change enabled state if supplied.
+ if (!IS_UNDEFINED(enabled)) {
+ if (enabled) {
+ Debug.enableBreakPoint(break_point);
+ } else {
+ Debug.disableBreakPoint(break_point);
+ }
+ }
+
+ // Change condition if supplied
+ if (!IS_UNDEFINED(condition)) {
+ Debug.changeBreakPointCondition(break_point, condition);
+ }
+
+ // Change ignore count if supplied
+ if (!IS_UNDEFINED(ignoreCount)) {
+ Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
+ }
+}
+
+
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
+ // Check for legal request.
+ if (!request.arguments) {
+ response.failed('Missing arguments');
+ return;
+ }
+
+ // Pull out arguments.
+ var break_point = %ToNumber(request.arguments.breakpoint);
+
+ // Check for legal arguments.
+ if (!break_point) {
+ response.failed('Missing argument "breakpoint"');
+ return;
+ }
+
+ // Clear break point.
+ Debug.clearBreakPoint(break_point);
+}
+
+
+DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
+ // Get the number of frames.
+ var total_frames = this.exec_state_.GetFrameCount();
+
+ // Default frame range to include in backtrace.
+ var from_index = 0
+ var to_index = kDefaultBacktraceLength;
+
+ // Get the range from the arguments.
+ if (request.arguments) {
+ from_index = request.arguments.fromFrame;
+ if (from_index < 0) {
+ return response.failed('Invalid frame number');
+ }
+ to_index = request.arguments.toFrame;
+ if (to_index < 0) {
+ return response.failed('Invalid frame number');
+ }
+ }
+
+ // Adjust the index.
+ to_index = Math.min(total_frames, to_index);
+
+ if (to_index <= from_index) {
+ var error = 'Invalid frame range';
+ return response.failed(error);
+ }
+
+ // Create the response body.
+ var frames = [];
+ for (var i = from_index; i < to_index; i++) {
+ frames.push(this.exec_state_.GetFrame(i));
+ }
+ response.body = {
+ fromFrame: from_index,
+ toFrame: to_index,
+ totalFrames: total_frames,
+ frames: frames
+ }
+};
+
+
+DebugCommandProcessor.prototype.backtracec = function(cmd, args) {
+ return this.exec_state_.cframesValue();
+};
+
+
+DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
+ // With no arguments just keep the selected frame.
+ if (request.arguments && request.arguments.number >= 0) {
+ this.exec_state_.setSelectedFrame(request.arguments.number);
+ }
+ response.body = this.exec_state_.GetFrame();
+};
+
+
+DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
+ if (!request.arguments) {
+ return response.failed('Missing arguments');
+ }
+
+ // Pull out arguments.
+ var expression = request.arguments.expression;
+ var frame = request.arguments.frame;
+ var global = request.arguments.global;
+
+ // Check for legal arguments.
+ if (!IS_UNDEFINED(frame) && global) {
+ return response.failed('Arguments "frame" and "global" are exclusive');
+ }
+
+ // Global evaluate.
+ if (global) {
+ // Evaluate in the global context.
+ response.body = MakeMirror(this.exec_state_.evaluateGlobal(expression));
+ return;
+ }
+
+ // Check whether a frame was specified.
+ if (!IS_UNDEFINED(frame)) {
+ var frame_number = %ToNumber(frame);
+ if (frame_number < 0 || frame_number >= this.exec_state_.GetFrameCount()) {
+ return response.failed('Invalid frame "' + frame + '"');
+ }
+ // Evaluate in the specified frame.
+ response.body = this.exec_state_.GetFrame(frame_number).evaluate(expression);
+ return;
+ } else {
+ // Evaluate in the selected frame.
+ response.body = this.exec_state_.GetFrame().evaluate(expression);
+ return;
+ }
+};
+
+
+DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
+ var from_line;
+ var to_line;
+ var frame = this.exec_state_.GetFrame();
+ if (request.arguments) {
+ // Pull out arguments.
+ from_line = request.arguments.fromLine;
+ to_line = request.arguments.toLine;
+
+ if (!IS_UNDEFINED(request.arguments.frame)) {
+ var frame_number = %ToNumber(request.arguments.frame);
+ if (frame_number < 0 || frame_number >= this.exec_state_.GetFrameCount()) {
+ return response.failed('Invalid frame "' + frame + '"');
+ }
+ frame = this.exec_state_.GetFrame(frame_number);
+ }
+ }
+
+ // Get the script selected.
+ var script = frame.func().script();
+ if (!script) {
+ return response.failed('No source');
+ }
+
+ // Get the source slice and fill it into the response.
+ var slice = script.sourceSlice(from_line, to_line);
+ if (!slice) {
+ return response.failed('Invalid line interval');
+ }
+ response.body = {};
+ response.body.source = slice.sourceText();
+ response.body.fromLine = slice.from_line;
+ response.body.toLine = slice.to_line;
+ response.body.fromPosition = slice.from_position;
+ response.body.toPosition = slice.to_position;
+ response.body.totalLines = script.lineCount();
+};
+
+
+DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
+ var types = ScriptTypeFlag(Debug.ScriptType.Normal);
+ if (request.arguments) {
+ // Pull out arguments.
+ if (!IS_UNDEFINED(request.arguments.types)) {
+ types = %ToNumber(request.arguments.types);
+ if (isNaN(types) || types < 0) {
+ return response.failed('Invalid types "' + request.arguments.types + '"');
+ }
+ }
+ }
+
+ // Collect all scripts in the heap.
+ var scripts = %DebugGetLoadedScripts(0);
+
+ response.body = [];
+
+ for (var i = 0; i < scripts.length; i++) {
+ if (types & ScriptTypeFlag(scripts[i].type)) {
+ var script = {};
+ if (scripts[i].name) {
+ script.name = scripts[i].name;
+ }
+ script.lineOffset = scripts[i].line_offset;
+ script.columnOffset = scripts[i].column_offset;
+ script.lineCount = scripts[i].lineCount();
+ script.sourceStart = scripts[i].source.substring(0, 80);
+ script.sourceLength = scripts[i].source.length;
+ script.type = scripts[i].type;
+ response.body.push(script);
+ }
+ }
+};
+
+
+// Check whether the JSON response indicate that the VM should be running.
+DebugCommandProcessor.prototype.isRunning = function(json_response) {
+ try {
+ // Convert the JSON string to an object.
+ response = %CompileString('(' + json_response + ')', false)();
+
+ // Return whether VM should be running after this request.
+ return response.running;
+
+ } catch (e) {
+ return false;
+ }
+}
+
+
+DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
+ return %SystemBreak(0);
+};
+
+
+function NumberToHex8Str(n) {
+ var r = "";
+ for (var i = 0; i < 8; ++i) {
+ var c = hexCharArray[n & 0x0F]; // hexCharArray is defined in uri.js
+ r = c + r;
+ n = n >>> 4;
+ }
+ return r;
+};
+
+DebugCommandProcessor.prototype.formatCFrames = function(cframes_value) {
+ var result = "";
+ if (cframes_value == null || cframes_value.length == 0) {
+ result += "(stack empty)";
+ } else {
+ for (var i = 0; i < cframes_value.length; ++i) {
+ if (i != 0) result += "\n";
+ result += this.formatCFrame(cframes_value[i]);
+ }
+ }
+ return result;
+};
+
+
+DebugCommandProcessor.prototype.formatCFrame = function(cframe_value) {
+ var result = "";
+ result += "0x" + NumberToHex8Str(cframe_value.address);
+ if (!IS_UNDEFINED(cframe_value.text)) {
+ result += " " + cframe_value.text;
+ }
+ return result;
+}
+
+
+/**
+ * Convert an Object to its JSON representation (see http://www.json.org/).
+ * This implementation simply runs through all string property names and adds
+ * each property to the JSON representation for some predefined types. For type
+ * "object" the function calls itself recursively unless the object has the
+ * function property "toJSONProtocol" in which case that is used. This is not
+ * a general implementation but sufficient for the debugger. Note that circular
+ * structures will cause infinite recursion.
+ * @param {Object} object The object to format as JSON
+ * @return {string} JSON formatted object value
+ */
+function SimpleObjectToJSON_(object) {
+ var content = [];
+ for (var key in object) {
+ // Only consider string keys.
+ if (typeof key == 'string') {
+ var property_value = object[key];
+
+ // Format the value based on its type.
+ var property_value_json;
+ switch (typeof property_value) {
+ case 'object':
+ if (typeof property_value.toJSONProtocol == 'function') {
+ property_value_json = property_value.toJSONProtocol(true)
+ } else if (IS_ARRAY(property_value)){
+ property_value_json = SimpleArrayToJSON_(property_value);
+ } else {
+ property_value_json = SimpleObjectToJSON_(property_value);
+ }
+ break;
+
+ case 'boolean':
+ property_value_json = BooleanToJSON_(property_value);
+ break;
+
+ case 'number':
+ property_value_json = NumberToJSON_(property_value);
+ break;
+
+ case 'string':
+ property_value_json = StringToJSON_(property_value);
+ break;
+
+ default:
+ property_value_json = null;
+ }
+
+ // Add the property if relevant.
+ if (property_value_json) {
+ content.push(StringToJSON_(key) + ':' + property_value_json);
+ }
+ }
+ }
+
+ // Make JSON object representation.
+ return '{' + content.join(',') + '}';
+};
+
+/**
+ * Convert an array to its JSON representation. This is a VERY simple
+ * implementation just to support what is needed for the debugger.
+ * @param {Array} arrya The array to format as JSON
+ * @return {string} JSON formatted array value
+ */
+function SimpleArrayToJSON_(array) {
+ // Make JSON array representation.
+ var json = '[';
+ for (var i = 0; i < array.length; i++) {
+ if (i != 0) {
+ json += ',';
+ }
+ var elem = array[i];
+ if (elem.toJSONProtocol) {
+ json += elem.toJSONProtocol(true)
+ } else if (IS_OBJECT(elem)) {
+ json += SimpleObjectToJSON_(elem);
+ } else if (IS_BOOLEAN(elem)) {
+ json += BooleanToJSON_(elem);
+ } else if (IS_NUMBER(elem)) {
+ json += NumberToJSON_(elem);
+ } else if (IS_STRING(elem)) {
+ json += StringToJSON_(elem);
+ } else {
+ json += elem;
+ }
+ }
+ json += ']';
+ return json;
+};
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "natives.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(remote_debugging, false, "enable remote debugging");
+DEFINE_int(debug_port, 5858, "port for remote debugging");
+DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response");
+DECLARE_bool(allow_natives_syntax);
+
+
+static void PrintLn(v8::Local<v8::Value> value) {
+ v8::Local<v8::String> s = value->ToString();
+ char* data = NewArray<char>(s->Length() + 1);
+ if (data == NULL) {
+ V8::FatalProcessOutOfMemory("PrintLn");
+ return;
+ }
+ s->WriteAscii(data);
+ PrintF("%s\n", data);
+ DeleteArray(data);
+}
+
+
+PendingRequest::PendingRequest(const uint16_t* json_request, int length)
+ : json_request_(Vector<uint16_t>::empty()),
+ next_(NULL) {
+ // Copy the request.
+ json_request_ =
+ Vector<uint16_t>(const_cast<uint16_t *>(json_request), length).Clone();
+}
+
+
+PendingRequest::~PendingRequest() {
+ // Deallocate what was allocated.
+ if (!json_request_.is_empty()) {
+ json_request_.Dispose();
+ }
+}
+
+Handle<String> PendingRequest::request() {
+ // Create a string in the heap from the pending request.
+ if (!json_request_.is_empty()) {
+ return Factory::NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(json_request_.start()),
+ json_request_.length()));
+ } else {
+ return Handle<String>();
+ }
+}
+
+
+static Handle<Code> ComputeCallDebugBreak(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc), Code);
+}
+
+
+static Handle<Code> ComputeCallDebugPrepareStepIn(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugPrepareStepIn(argc), Code);
+}
+
+
+BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type) {
+ debug_info_ = debug_info;
+ type_ = type;
+ reloc_iterator_ = NULL;
+ reloc_iterator_original_ = NULL;
+ Reset(); // Initialize the rest of the member variables.
+}
+
+
+BreakLocationIterator::~BreakLocationIterator() {
+ ASSERT(reloc_iterator_ != NULL);
+ ASSERT(reloc_iterator_original_ != NULL);
+ delete reloc_iterator_;
+ delete reloc_iterator_original_;
+}
+
+
+void BreakLocationIterator::Next() {
+ AssertNoAllocation nogc;
+ ASSERT(!RinfoDone());
+
+ // Iterate through reloc info for code and original code stopping at each
+ // breakable code target.
+ bool first = break_point_ == -1;
+ while (!RinfoDone()) {
+ if (!first) RinfoNext();
+ first = false;
+ if (RinfoDone()) return;
+
+ // Update the current source position each time a source position is
+ // passed.
+ if (is_position(rmode())) {
+ position_ = rinfo()->data() - debug_info_->shared()->start_position();
+ if (is_statement_position(rmode())) {
+ statement_position_ =
+ rinfo()->data() - debug_info_->shared()->start_position();
+ }
+ ASSERT(position_ >= 0);
+ ASSERT(statement_position_ >= 0);
+ }
+
+ // Check for breakable code target. Look in the original code as setting
+ // break points can cause the code targets in the running (debugged) code to
+ // be of a different kind than in the original code.
+ if (is_code_target(rmode())) {
+ Address target = original_rinfo()->target_address();
+ Code* code = Debug::GetCodeTarget(target);
+ if (code->is_inline_cache_stub() || is_js_construct_call(rmode())) {
+ break_point_++;
+ return;
+ }
+ if (code->kind() == Code::STUB) {
+ if (type_ == ALL_BREAK_LOCATIONS) {
+ if (Debug::IsBreakStub(code)) {
+ break_point_++;
+ return;
+ }
+ } else {
+ ASSERT(type_ == SOURCE_BREAK_LOCATIONS);
+ if (Debug::IsSourceBreakStub(code)) {
+ break_point_++;
+ return;
+ }
+ }
+ }
+ }
+
+ // Check for break at return.
+ // Currently is_exit_js_frame is used on ARM.
+ if (is_js_return(rmode()) || is_exit_js_frame(rmode())) {
+ // Set the positions to the end of the function.
+ if (debug_info_->shared()->HasSourceCode()) {
+ position_ = debug_info_->shared()->end_position() -
+ debug_info_->shared()->start_position();
+ } else {
+ position_ = 0;
+ }
+ statement_position_ = position_;
+ break_point_++;
+ return;
+ }
+ }
+}
+
+
+void BreakLocationIterator::Next(int count) {
+ while (count > 0) {
+ Next();
+ count--;
+ }
+}
+
+
+// Find the break point closest to the supplied address.
+void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
+ // Run through all break points to locate the one closest to the address.
+ int closest_break_point = 0;
+ int distance = kMaxInt;
+ while (!Done()) {
+ // Check if this break point is closer that what was previously found.
+ if (this->pc() < pc && pc - this->pc() < distance) {
+ closest_break_point = break_point();
+ distance = pc - this->pc();
+ // Check whether we can't get any closer.
+ if (distance == 0) break;
+ }
+ Next();
+ }
+
+ // Move to the break point found.
+ Reset();
+ Next(closest_break_point);
+}
+
+
+// Find the break point closest to the supplied source position.
+void BreakLocationIterator::FindBreakLocationFromPosition(int position) {
+ // Run through all break points to locate the one closest to the source
+ // position.
+ int closest_break_point = 0;
+ int distance = kMaxInt;
+ while (!Done()) {
+ // Check if this break point is closer that what was previously found.
+ if (position <= statement_position() &&
+ statement_position() - position < distance) {
+ closest_break_point = break_point();
+ distance = statement_position() - position;
+ // Check whether we can't get any closer.
+ if (distance == 0) break;
+ }
+ Next();
+ }
+
+ // Move to the break point found.
+ Reset();
+ Next(closest_break_point);
+}
+
+
+void BreakLocationIterator::Reset() {
+ // Create relocation iterators for the two code objects.
+ if (reloc_iterator_ != NULL) delete reloc_iterator_;
+ if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
+ reloc_iterator_ = new RelocIterator(debug_info_->code());
+ reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
+
+ // Position at the first break point.
+ break_point_ = -1;
+ position_ = 1;
+ statement_position_ = 1;
+ Next();
+}
+
+
+bool BreakLocationIterator::Done() const {
+ return RinfoDone();
+}
+
+
+void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) {
+ // If there is not already a real break point here patch code with debug
+ // break.
+ if (!HasBreakPoint()) {
+ SetDebugBreak();
+ }
+ ASSERT(IsDebugBreak());
+ // Set the break point information.
+ DebugInfo::SetBreakPoint(debug_info_, code_position(),
+ position(), statement_position(),
+ break_point_object);
+}
+
+
+void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
+ // Clear the break point information.
+ DebugInfo::ClearBreakPoint(debug_info_, code_position(), break_point_object);
+ // If there are no more break points here remove the debug break.
+ if (!HasBreakPoint()) {
+ ClearDebugBreak();
+ ASSERT(!IsDebugBreak());
+ }
+}
+
+
+void BreakLocationIterator::SetOneShot() {
+ // If there is a real break point here no more to do.
+ if (HasBreakPoint()) {
+ ASSERT(IsDebugBreak());
+ return;
+ }
+
+ // Patch code with debug break.
+ SetDebugBreak();
+}
+
+
+void BreakLocationIterator::ClearOneShot() {
+ // If there is a real break point here no more to do.
+ if (HasBreakPoint()) {
+ ASSERT(IsDebugBreak());
+ return;
+ }
+
+ // Patch code removing debug break.
+ ClearDebugBreak();
+ ASSERT(!IsDebugBreak());
+}
+
+
+void BreakLocationIterator::SetDebugBreak() {
+ // If there is already a break point here just return. This might happen if
+ // the same code is flodded with break points twice. Flodding the same
+ // function twice might happen when stepping in a function with an exception
+ // handler as the handler and the function is the same.
+ if (IsDebugBreak()) {
+ return;
+ }
+
+ if (is_js_return(rmode())) {
+ // This path is currently only used on IA32 as JSExitFrame on ARM uses a
+ // stub.
+ // Patch the JS frame exit code with a debug break call. See
+ // VisitReturnStatement and ExitJSFrame in codegen-ia32.cc for the
+ // precise return instructions sequence.
+ ASSERT(Debug::kIa32JSReturnSequenceLength >=
+ Debug::kIa32CallInstructionLength);
+ rinfo()->patch_code_with_call(Debug::debug_break_return_entry()->entry(),
+ Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength);
+ } else {
+ // Patch the original code with the current address as the current address
+ // might have changed by the inline caching since the code was copied.
+ original_rinfo()->set_target_address(rinfo()->target_address());
+
+ // Patch the code to invoke the builtin debug break function matching the
+ // calling convention used by the call site.
+ Handle<Code> dbgbrk_code(Debug::FindDebugBreak(rinfo()));
+ rinfo()->set_target_address(dbgbrk_code->entry());
+ }
+ ASSERT(IsDebugBreak());
+}
+
+
+void BreakLocationIterator::ClearDebugBreak() {
+ if (is_js_return(rmode())) {
+ // Restore the JS frame exit code.
+ rinfo()->patch_code(original_rinfo()->pc(),
+ Debug::kIa32JSReturnSequenceLength);
+ } else {
+ // Patch the code to the original invoke.
+ rinfo()->set_target_address(original_rinfo()->target_address());
+ }
+ ASSERT(!IsDebugBreak());
+}
+
+
+void BreakLocationIterator::PrepareStepIn() {
+ // Step in can only be prepared if currently positioned on an IC call or
+ // construct call.
+ Address target = rinfo()->target_address();
+ Code* code = Debug::GetCodeTarget(target);
+ if (code->is_call_stub()) {
+ // Step in through IC call is handled by the runtime system. Therefore make
+ // sure that the any current IC is cleared and the runtime system is
+ // called. If the executing code has a debug break at the location change
+ // the call in the original code as it is the code there that will be
+ // executed in place of the debug break call.
+ Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count());
+ if (IsDebugBreak()) {
+ original_rinfo()->set_target_address(stub->entry());
+ } else {
+ rinfo()->set_target_address(stub->entry());
+ }
+ } else {
+ // Step in through constructs call requires no changs to the running code.
+ ASSERT(is_js_construct_call(rmode()));
+ }
+}
+
+
+// Check whether the break point is at a position which will exit the function.
+bool BreakLocationIterator::IsExit() const {
+ // Currently is_exit_js_frame is used on ARM.
+ return (is_js_return(rmode()) || is_exit_js_frame(rmode()));
+}
+
+
+bool BreakLocationIterator::HasBreakPoint() {
+ return debug_info_->HasBreakPoint(code_position());
+}
+
+
+// Check whether there is a debug break at the current position.
+bool BreakLocationIterator::IsDebugBreak() {
+ if (is_js_return(rmode())) {
+ // This is IA32 specific but works as long as the ARM version
+ // still uses a stub for JSExitFrame.
+ //
+ // TODO(1240753): Make the test architecture independent or split
+ // parts of the debugger into architecture dependent files.
+ return (*(rinfo()->pc()) == 0xE8);
+ } else {
+ return Debug::IsDebugBreak(rinfo()->target_address());
+ }
+}
+
+
+Object* BreakLocationIterator::BreakPointObjects() {
+ return debug_info_->GetBreakPointObjects(code_position());
+}
+
+
+bool BreakLocationIterator::RinfoDone() const {
+ ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
+ return reloc_iterator_->done();
+}
+
+
+void BreakLocationIterator::RinfoNext() {
+ reloc_iterator_->next();
+ reloc_iterator_original_->next();
+#ifdef DEBUG
+ ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
+ if (!reloc_iterator_->done()) {
+ ASSERT(rmode() == original_rmode());
+ }
+#endif
+}
+
+
+bool Debug::has_break_points_ = false;
+DebugInfoListNode* Debug::debug_info_list_ = NULL;
+
+
+// Threading support.
+void Debug::ThreadInit() {
+ thread_local_.last_step_action_ = StepNone;
+ thread_local_.last_statement_position_ = kNoPosition;
+ thread_local_.step_count_ = 0;
+ thread_local_.last_fp_ = 0;
+ thread_local_.step_into_fp_ = 0;
+ thread_local_.after_break_target_ = 0;
+}
+
+
+JSCallerSavedBuffer Debug::registers_;
+Debug::ThreadLocal Debug::thread_local_;
+
+
+char* Debug::ArchiveDebug(char* storage) {
+ char* to = storage;
+ memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ to += sizeof(ThreadLocal);
+ memcpy(to, reinterpret_cast<char*>(®isters_), sizeof(registers_));
+ ThreadInit();
+ ASSERT(to <= storage + ArchiveSpacePerThread());
+ return storage + ArchiveSpacePerThread();
+}
+
+
+char* Debug::RestoreDebug(char* storage) {
+ char* from = storage;
+ memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ from += sizeof(ThreadLocal);
+ memcpy(reinterpret_cast<char*>(®isters_), from, sizeof(registers_));
+ ASSERT(from <= storage + ArchiveSpacePerThread());
+ return storage + ArchiveSpacePerThread();
+}
+
+
+int Debug::ArchiveSpacePerThread() {
+ return sizeof(ThreadLocal) + sizeof(registers_);
+}
+
+
+// Default call debugger on uncaught exception.
+bool Debug::break_on_exception_ = false;
+bool Debug::break_on_uncaught_exception_ = true;
+
+Handle<Context> Debug::debug_context_ = Handle<Context>();
+Code* Debug::debug_break_return_entry_ = NULL;
+Code* Debug::debug_break_return_ = NULL;
+
+
+void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Object> obj, void* data) {
+ DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+ RemoveDebugInfo(node->debug_info());
+#ifdef DEBUG
+ node = Debug::debug_info_list_;
+ while (node != NULL) {
+ ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
+ node = node->next();
+ }
+#endif
+}
+
+
+DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
+ // Globalize the request debug info object and make it weak.
+ debug_info_ = Handle<DebugInfo>::cast((GlobalHandles::Create(debug_info)));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+ this, Debug::HandleWeakDebugInfo);
+}
+
+
+DebugInfoListNode::~DebugInfoListNode() {
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+}
+
+
+void Debug::Setup(bool create_heap_objects) {
+ ThreadInit();
+ if (create_heap_objects) {
+ // Get code to handle entry to debug break on return.
+ debug_break_return_entry_ =
+ Builtins::builtin(Builtins::Return_DebugBreakEntry);
+ ASSERT(debug_break_return_entry_->IsCode());
+
+ // Get code to handle debug break on return.
+ debug_break_return_ =
+ Builtins::builtin(Builtins::Return_DebugBreak);
+ ASSERT(debug_break_return_->IsCode());
+ }
+}
+
+
+bool Debug::CompileDebuggerScript(int index) {
+ HandleScope scope;
+
+ // Find source and name for the requested script.
+ if (index == -1) {
+ return false;
+ }
+ Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ Vector<const char> name = Natives::GetScriptName(index);
+ Handle<String> script_name = Factory::NewStringFromAscii(name);
+
+ // Compile the script.
+ bool allow_natives_syntax = FLAG_allow_natives_syntax;
+ FLAG_allow_natives_syntax = true;
+ Handle<JSFunction> boilerplate;
+ boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+ FLAG_allow_natives_syntax = allow_natives_syntax;
+
+ // Silently ignore stack overflows during compilation.
+ if (boilerplate.is_null()) {
+ ASSERT(Top::has_pending_exception());
+ Top::clear_pending_exception();
+ return false;
+ }
+
+ // Execute the boilerplate function in the global object for the supplied
+ // context.
+ bool caught_exception = false;
+ Handle<Context> context = Top::global_context();
+ Handle<JSFunction> debug_fun(Factory::NewFunctionFromBoilerplate(boilerplate,
+ context));
+ Handle<GlobalObject> debug_global(context->global());
+ Handle<Object> result = Execution::TryCall(debug_fun, debug_global,
+ 0, NULL, &caught_exception);
+ if (caught_exception) {
+ MessageHandler::ReportMessage("error_loading_debugger",
+ NULL,
+ HandleVector<Object>(&result, 1));
+ return false;
+ }
+
+ // Mark this script as native.
+ Handle<Script> script(Script::cast(debug_fun->shared()->script()));
+ script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+
+ return true;
+}
+
+
+bool Debug::Load() {
+ // Return if debugger is already loaded.
+ if (IsLoaded()) {
+ return true;
+ }
+
+ // Create the debugger context.
+ HandleScope scope;
+ Handle<Object> empty_global_object;
+ Handle<Context> debug_context;
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Handle<v8::ObjectTemplate>();
+ debug_context = Bootstrapper::CreateEnvironment(empty_global_object,
+ global_template,
+ NULL);
+
+ // Enter the debugger context.
+ SaveContext save;
+ Top::set_context(*debug_context);
+ Top::set_security_context(*debug_context);
+
+ // Compile the JavaScript for the debugger in the debugger context.
+ Debugger::set_compiling_natives(true);
+ if (!CompileDebuggerScript(Natives::GetIndex("mirror"))) {
+ return false;
+ };
+ if (!CompileDebuggerScript(Natives::GetIndex("debug"))) {
+ return false;
+ };
+ Debugger::set_compiling_natives(false);
+
+ // Expose the builtins object in the debugger context.
+ Handle<String> builtins_string = Factory::LookupAsciiSymbol("builtins");
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(debug_context->global()));
+ SetProperty(global, builtins_string,
+ Handle<JSObject>(global->builtins()), NONE);
+
+ // Debugger loaded.
+ debug_context_ = Handle<Context>::cast(GlobalHandles::Create(*debug_context));
+ return true;
+}
+
+
+void Debug::Unload() {
+ // Return debugger is not loaded.
+ if (!IsLoaded()) {
+ return;
+ }
+
+ // Clear debugger context global handle.
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
+ debug_context_ = Handle<Context>();
+}
+
+
+void Debug::Iterate(ObjectVisitor* v) {
+#define VISIT(field) v->VisitPointer(reinterpret_cast<Object**>(&(field)));
+ VISIT(debug_break_return_entry_);
+ VISIT(debug_break_return_);
+#undef VISIT
+}
+
+
+Object* Debug::Break(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ if (!Load()) {
+ return Heap::undefined_value();
+ }
+
+ SaveBreakFrame save;
+ EnterDebuggerContext enter;
+
+ // Get the top-most JavaScript frame.
+ JavaScriptFrameIterator it;
+ JavaScriptFrame* frame = it.frame();
+
+ // Deactivate interrupt during breakpoint processing.
+ StackGuard::DisableInterrupts();
+
+ // Get the debug info (create it if it does not exist).
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+
+ // Check whether step next reached a new statement.
+ if (!StepNextContinue(&break_location_iterator, frame)) {
+ // Decrease steps left if performing multiple steps.
+ if (thread_local_.step_count_ > 0) {
+ thread_local_.step_count_--;
+ }
+ }
+
+ // If there is one or more real break points check whether any of these are
+ // triggered.
+ Handle<Object> break_points_hit(Heap::undefined_value());
+ if (break_location_iterator.HasBreakPoint()) {
+ Handle<Object> break_point_objects =
+ Handle<Object>(break_location_iterator.BreakPointObjects());
+ break_points_hit = CheckBreakPoints(break_point_objects);
+ }
+
+ // Notify debugger if a real break point is triggered or if performing single
+ // stepping with no more steps to perform. Otherwise do another step.
+ if (!break_points_hit->IsUndefined() ||
+ (thread_local_.last_step_action_ != StepNone &&
+ thread_local_.step_count_ == 0)) {
+ // Clear all current stepping setup.
+ ClearStepping();
+
+ // Notify the debug event listeners.
+ Debugger::OnDebugBreak(break_points_hit);
+ } else if (thread_local_.last_step_action_ != StepNone) {
+ // Hold on to last step action as it is cleared by the call to
+ // ClearStepping.
+ StepAction step_action = thread_local_.last_step_action_;
+ int step_count = thread_local_.step_count_;
+
+ // Clear all current stepping setup.
+ ClearStepping();
+
+ // Set up for the remaining steps.
+ PrepareStep(step_action, step_count);
+ }
+
+ // Reactivate interrupt.
+ StackGuard::EnableInterrupts();
+
+ // Install jump to the call address which was overwritten.
+ SetAfterBreakTarget(frame);
+
+ // Return whatever - return value is ignored.
+ return Heap::undefined_value();
+}
+
+
+// Check the break point objects for whether one or more are actually
+// triggered. This function returns a JSArray with the break point objects
+// which is triggered.
+Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
+ int break_points_hit_count = 0;
+ Handle<JSArray> break_points_hit = Factory::NewJSArray(1);
+
+ // If there are multiple break points they are in a Fixedrray.
+ ASSERT(!break_point_objects->IsUndefined());
+ if (break_point_objects->IsFixedArray()) {
+ Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+ for (int i = 0; i < array->length(); i++) {
+ Handle<Object> o(array->get(i));
+ if (CheckBreakPoint(o)) {
+ break_points_hit->SetElement(break_points_hit_count++, *o);
+ }
+ }
+ } else {
+ if (CheckBreakPoint(break_point_objects)) {
+ break_points_hit->SetElement(break_points_hit_count++,
+ *break_point_objects);
+ }
+ }
+
+ // Return undefined if no break points where triggered.
+ if (break_points_hit_count == 0) {
+ return Factory::undefined_value();
+ }
+ return break_points_hit;
+}
+
+
+// Check whether a single break point object is triggered.
+bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
+ // Ignore check if break point object is not a JSObject.
+ if (!break_point_object->IsJSObject()) return true;
+
+ // Get the function CheckBreakPoint (defined in debug.js).
+ Handle<JSFunction> check_break_point =
+ Handle<JSFunction>(JSFunction::cast(
+ debug_context()->global()->GetProperty(
+ *Factory::LookupAsciiSymbol("IsBreakPointTriggered"))));
+
+ // Get the break id as an object.
+ Handle<Object> break_id = Factory::NewNumberFromInt(Top::break_id());
+
+ // Call HandleBreakPointx.
+ bool caught_exception = false;
+ const int argc = 2;
+ Object** argv[argc] = {
+ break_id.location(),
+ reinterpret_cast<Object**>(break_point_object.location())
+ };
+ Handle<Object> result = Execution::TryCall(check_break_point,
+ Top::builtins(), argc, argv,
+ &caught_exception);
+
+ // If exception or non boolean result handle as not triggered
+ if (caught_exception || !result->IsBoolean()) {
+ return false;
+ }
+
+ // Return whether the break point is triggered.
+ return *result == Heap::true_value();
+}
+
+
+// Check whether the function has debug information.
+bool Debug::HasDebugInfo(Handle<SharedFunctionInfo> shared) {
+ return !shared->debug_info()->IsUndefined();
+}
+
+
+// Return the debug info for this function. If the function currently has no
+// debug info it will be created. The reason for having this function is that
+// the debug info member is of type Object and not DebugInfo, as it can contain
+// undefined to indicate that currently no debug info exists for the function.
+Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
+ // If the debug info does not exist create it.
+ if (!HasDebugInfo(shared)) {
+ AddDebugInfo(shared);
+ }
+
+ // Return the debug info.
+ return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
+}
+
+
+void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
+ int source_position,
+ Handle<Object> break_point_object) {
+ // Make sure the function is compiled before accessing code object.
+ EnsureCompiled(shared);
+
+ // Get the debug info (create it if it does not exist).
+ Handle<DebugInfo> debug_info;
+ if (shared->debug_info()->IsUndefined()) {
+ debug_info = AddDebugInfo(shared);
+ } else {
+ debug_info = Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
+ }
+
+ // Source positions starts with zero.
+ ASSERT(source_position >= 0);
+
+ // Find the break point and change it.
+ BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+ it.FindBreakLocationFromPosition(source_position);
+ it.SetBreakPoint(break_point_object);
+
+ // At least one active break point now.
+ ASSERT(debug_info->GetBreakPointCount() > 0);
+}
+
+
+void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
+ DebugInfoListNode* node = debug_info_list_;
+ while (node != NULL) {
+ Object* result = DebugInfo::FindBreakPointInfo(node->debug_info(),
+ break_point_object);
+ if (!result->IsUndefined()) {
+ // Get information in the break point.
+ BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
+ Handle<DebugInfo> debug_info = node->debug_info();
+ Handle<SharedFunctionInfo> shared(debug_info->shared());
+ int source_position = break_point_info->statement_position()->value();
+
+ // Source positions starts with zero.
+ ASSERT(source_position >= 0);
+
+ // Find the break point and clear it.
+ BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+ it.FindBreakLocationFromPosition(source_position);
+ it.ClearBreakPoint(break_point_object);
+
+ // If there are no more break points left remove the debug info for this
+ // function.
+ if (debug_info->GetBreakPointCount() == 0) {
+ RemoveDebugInfo(debug_info);
+ }
+
+ return;
+ }
+ node = node->next();
+ }
+}
+
+
+void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+ // Make sure the function is compiled before accessing code object.
+ EnsureCompiled(shared);
+
+ // Get the debug info.
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+
+ // Flood the function with break points.
+ BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ while (!it.Done()) {
+ it.SetOneShot();
+ it.Next();
+ }
+}
+
+
+void Debug::FloodHandlerWithOneShot() {
+ StackFrame::Id id = Top::break_frame_id();
+ for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->HasHandler()) {
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(
+ JSFunction::cast(frame->function())->shared());
+ // Flood the function with the catch block with break points
+ FloodWithOneShot(shared);
+ return;
+ }
+ }
+}
+
+
+void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
+ if (type == BreakUncaughtException) {
+ break_on_uncaught_exception_ = enable;
+ } else {
+ break_on_exception_ = enable;
+ }
+}
+
+
+void Debug::PrepareStep(StepAction step_action, int step_count) {
+ HandleScope scope;
+ ASSERT(Debug::InDebugger());
+
+ // Remember this step action and count.
+ thread_local_.last_step_action_ = step_action;
+ thread_local_.step_count_ = step_count;
+
+ // Get the frame where the execution has stopped and skip the debug frame if
+ // any. The debug frame will only be present if execution was stopped due to
+ // hitting a break point. In other situations (e.g. unhandled exception) the
+ // debug frame is not present.
+ StackFrame::Id id = Top::break_frame_id();
+ JavaScriptFrameIterator frames_it(id);
+ JavaScriptFrame* frame = frames_it.frame();
+
+ // First of all ensure there is one-shot break points in the top handler
+ // if any.
+ FloodHandlerWithOneShot();
+
+ // If the function on the top frame is unresolved perform step out. This will
+ // be the case when calling unknown functions and having the debugger stopped
+ // in an unhandled exception.
+ if (!frame->function()->IsJSFunction()) {
+ // Step out: Find the calling JavaScript frame and flood it with
+ // breakpoints.
+ frames_it.Advance();
+ // Fill the function to return to with one-shot break points.
+ JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+ FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ return;
+ }
+
+ // Get the debug info (create it if it does not exist).
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+
+ // Find the break location where execution has stopped.
+ BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.FindBreakLocationFromAddress(frame->pc());
+
+ // Compute whether or not the target is a call target.
+ bool is_call_target = false;
+ if (is_code_target(it.rinfo()->rmode())) {
+ Address target = it.rinfo()->target_address();
+ Code* code = Debug::GetCodeTarget(target);
+ if (code->is_call_stub()) is_call_target = true;
+ }
+
+ // If this is the last break code target step out is the only posibility.
+ if (it.IsExit() || step_action == StepOut) {
+ // Step out: If there is a JavaScript caller frame, we need to
+ // flood it with breakpoints.
+ frames_it.Advance();
+ if (!frames_it.done()) {
+ // Fill the function to return to with one-shot break points.
+ JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+ FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ }
+ } else if (!(is_call_target || is_js_construct_call(it.rmode())) ||
+ step_action == StepNext || step_action == StepMin) {
+ // Step next or step min.
+
+ // Fill the current function with one-shot break points.
+ FloodWithOneShot(shared);
+
+ // Remember source position and frame to handle step next.
+ thread_local_.last_statement_position_ =
+ debug_info->code()->SourceStatementPosition(frame->pc());
+ thread_local_.last_fp_ = frame->fp();
+ } else {
+ // Fill the current function with one-shot break points even for step in on
+ // a call target as the function called might be a native function for
+ // which step in will not stop.
+ FloodWithOneShot(shared);
+
+ // Step in or Step in min
+ it.PrepareStepIn();
+ ActivateStepIn(frame);
+ }
+}
+
+
+// Check whether the current debug break should be reported to the debugger. It
+// is used to have step next and step in only report break back to the debugger
+// if on a different frame or in a different statement. In some situations
+// there will be several break points in the same statement when the code is
+// flodded with one-shot break points. This function helps to perform several
+// steps before reporting break back to the debugger.
+bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
+ JavaScriptFrame* frame) {
+ // If the step last action was step next or step in make sure that a new
+ // statement is hit.
+ if (thread_local_.last_step_action_ == StepNext ||
+ thread_local_.last_step_action_ == StepIn) {
+ // Never continue if returning from function.
+ if (break_location_iterator->IsExit()) return false;
+
+ // Continue if we are still on the same frame and in the same statement.
+ int current_statement_position =
+ break_location_iterator->code()->SourceStatementPosition(frame->pc());
+ return thread_local_.last_fp_ == frame->fp() &&
+ thread_local_.last_statement_position_ == current_statement_position;
+ }
+
+ // No step next action - don't continue.
+ return false;
+}
+
+
+// Check whether the code object at the specified address is a debug break code
+// object.
+bool Debug::IsDebugBreak(Address addr) {
+ Code* code = GetCodeTarget(addr);
+ return code->state() == DEBUG_BREAK;
+}
+
+
+// Check whether a code stub with the specified major key is a possible break
+// point location when looking for source break locations.
+bool Debug::IsSourceBreakStub(Code* code) {
+ CodeStub::Major major_key = code->major_key();
+ return major_key == CodeStub::CallFunction;
+}
+
+
+// Check whether a code stub with the specified major key is a possible break
+// location.
+bool Debug::IsBreakStub(Code* code) {
+ CodeStub::Major major_key = code->major_key();
+ return major_key == CodeStub::CallFunction ||
+ major_key == CodeStub::StackCheck;
+}
+
+
+// Find the builtin to use for invoking the debug break
+Handle<Code> Debug::FindDebugBreak(RelocInfo* rinfo) {
+ // Find the builtin debug break function matching the calling convention
+ // used by the call site.
+ RelocMode mode = rinfo->rmode();
+
+ if (is_code_target(mode)) {
+ Address target = rinfo->target_address();
+ Code* code = Debug::GetCodeTarget(target);
+ if (code->is_inline_cache_stub()) {
+ if (code->is_call_stub()) {
+ return ComputeCallDebugBreak(code->arguments_count());
+ }
+ if (code->is_load_stub()) {
+ return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+ }
+ if (code->is_store_stub()) {
+ return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+ }
+ if (code->is_keyed_load_stub()) {
+ Handle<Code> result =
+ Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+ return result;
+ }
+ if (code->is_keyed_store_stub()) {
+ Handle<Code> result =
+ Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+ return result;
+ }
+ }
+ if (is_js_construct_call(mode)) {
+ Handle<Code> result =
+ Handle<Code>(Builtins::builtin(Builtins::ConstructCall_DebugBreak));
+ return result;
+ }
+ // Currently is_exit_js_frame is used on ARM.
+ if (is_exit_js_frame(mode)) {
+ return Handle<Code>(Builtins::builtin(Builtins::Return_DebugBreak));
+ }
+ if (code->kind() == Code::STUB) {
+ ASSERT(code->major_key() == CodeStub::CallFunction ||
+ code->major_key() == CodeStub::StackCheck);
+ Handle<Code> result =
+ Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
+ return result;
+ }
+ }
+
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+
+// Simple function for returning the source positions for active break points.
+Handle<Object> Debug::GetSourceBreakLocations(
+ Handle<SharedFunctionInfo> shared) {
+ if (!HasDebugInfo(shared)) return Handle<Object>(Heap::undefined_value());
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ if (debug_info->GetBreakPointCount() == 0) {
+ return Handle<Object>(Heap::undefined_value());
+ }
+ Handle<FixedArray> locations =
+ Factory::NewFixedArray(debug_info->GetBreakPointCount());
+ int count = 0;
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (!debug_info->break_points()->get(i)->IsUndefined()) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(debug_info->break_points()->get(i));
+ if (break_point_info->GetBreakPointCount() > 0) {
+ locations->set(count++, break_point_info->statement_position());
+ }
+ }
+ }
+ return locations;
+}
+
+
+void Debug::ClearStepping() {
+ // Clear the various stepping setup.
+ ClearOneShot();
+ ClearStepIn();
+ ClearStepNext();
+
+ // Clear multiple step counter.
+ thread_local_.step_count_ = 0;
+}
+
+// Clears all the one-shot break points that are currently set. Normally this
+// function is called each time a break point is hit as one shot break points
+// are used to support stepping.
+void Debug::ClearOneShot() {
+ // The current implementation just runs through all the breakpoints. When the
+ // last break point for a function is removed that function is automaticaly
+ // removed from the list.
+
+ DebugInfoListNode* node = debug_info_list_;
+ while (node != NULL) {
+ BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+ while (!it.Done()) {
+ it.ClearOneShot();
+ it.Next();
+ }
+ node = node->next();
+ }
+}
+
+
+void Debug::ActivateStepIn(StackFrame* frame) {
+ thread_local_.step_into_fp_ = frame->fp();
+}
+
+
+void Debug::ClearStepIn() {
+ thread_local_.step_into_fp_ = 0;
+}
+
+
+void Debug::ClearStepNext() {
+ thread_local_.last_step_action_ = StepNone;
+ thread_local_.last_statement_position_ = kNoPosition;
+ thread_local_.last_fp_ = 0;
+}
+
+
+void Debug::EnsureCompiled(Handle<SharedFunctionInfo> shared) {
+ if (!shared->is_compiled()) {
+ // TODO(1240742): We need to handle stack-overflow exceptions
+ // here. It might make sense to add a boolean return value to
+ // EnsureCompiled to indicate whether or not the compilation
+ // succeeded.
+ CompileLazyShared(shared, KEEP_EXCEPTION);
+ }
+ ASSERT(shared->is_compiled());
+}
+
+
+Handle<DebugInfo> Debug::AddDebugInfo(Handle<SharedFunctionInfo> shared) {
+ ASSERT(!HasDebugInfo(shared));
+
+ // Make sure that the function is compiled.
+ EnsureCompiled(shared);
+
+ // Create the debug info object.
+ Handle<DebugInfo> debug_info =
+ Handle<DebugInfo>::cast(Factory::NewStruct(DEBUG_INFO_TYPE));
+
+ // Get the function original code.
+ Handle<Code> code(shared->code());
+
+ // Debug info contains function, a copy of the original code and the executing
+ // code.
+ debug_info->set_shared(*shared);
+ debug_info->set_original_code(*Factory::CopyCode(code));
+ debug_info->set_code(*code);
+
+ // Link debug info to function.
+ shared->set_debug_info(*debug_info);
+
+ // Initially no active break points.
+ debug_info->set_break_points(
+ *Factory::NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
+
+ // Add debug info to the list.
+ DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
+ node->set_next(debug_info_list_);
+ debug_info_list_ = node;
+
+ // Now there is at least one break point.
+ has_break_points_ = true;
+
+ return debug_info;
+}
+
+
+void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
+ ASSERT(debug_info_list_ != NULL);
+ // Run through the debug info objects to find this one and remove it.
+ DebugInfoListNode* prev = NULL;
+ DebugInfoListNode* current = debug_info_list_;
+ while (current != NULL) {
+ if (*current->debug_info() == *debug_info) {
+ // Unlink from list. If prev is NULL we are looking at the first element.
+ if (prev == NULL) {
+ debug_info_list_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ current->debug_info()->shared()->set_debug_info(Heap::undefined_value());
+ delete current;
+
+ // If there are no more debug info objects there are not more break
+ // points.
+ has_break_points_ = debug_info_list_ != NULL;
+
+ return;
+ }
+ // Move to next in list.
+ prev = current;
+ current = current->next();
+ }
+ UNREACHABLE();
+}
+
+
+void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
+ // Get the executing function in which the debug break occurred.
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<Code> code(debug_info->code());
+ Handle<Code> original_code(debug_info->original_code());
+#ifdef DEBUG
+ // Get the code which is actually executing.
+ Handle<Code> frame_code(frame->FindCode());
+ ASSERT(frame_code.is_identical_to(code));
+#endif
+
+ // Find the call address in the running code. This address holds the call to
+ // either a DebugBreakXXX or to the debug break return entry code if the
+ // break point is still active after processing the break point.
+ Address addr = frame->pc() - Assembler::kTargetAddrToReturnAddrDist;
+
+ // Check if the location is at JS exit.
+ bool at_js_exit = false;
+ RelocIterator it(debug_info->code());
+ while (!it.done()) {
+ if (is_js_return(it.rinfo()->rmode())) {
+ at_js_exit = it.rinfo()->pc() == addr - 1;
+ }
+ it.next();
+ }
+
+ // Handle the jump to continue execution after break point depending on the
+ // break location.
+ if (at_js_exit) {
+ // First check if the call in the code is still the debug break return
+ // entry code. If it is the break point is still active. If not the break
+ // point was removed during break point processing.
+ if (Assembler::target_address_at(addr) ==
+ debug_break_return_entry()->entry()) {
+ // Break point still active. Jump to the corresponding place in the
+ // original code.
+ addr += original_code->instruction_start() - code->instruction_start();
+ }
+
+ // Move one byte back to where the call instruction was placed.
+ thread_local_.after_break_target_ = addr - 1;
+ } else {
+ // Check if there still is a debug break call at the target address. If the
+ // break point has been removed it will have disappeared. If it have
+ // disappeared don't try to look in the original code as the running code
+ // will have the right address. This takes care of the case where the last
+ // break point is removed from the function and therefore no "original code"
+ // is available. If the debug break call is still there find the address in
+ // the original code.
+ if (IsDebugBreak(Assembler::target_address_at(addr))) {
+ // If the break point is still there find the call address which was
+ // overwritten in the original code by the call to DebugBreakXXX.
+
+ // Find the corresponding address in the original code.
+ addr += original_code->instruction_start() - code->instruction_start();
+ }
+
+ // Install jump to the call address in the original code. This will be the
+ // call which was overwritten by the call to DebugBreakXXX.
+ thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ }
+}
+
+
+Code* Debug::GetCodeTarget(Address target) {
+ // Maybe this can be refactored with the stuff in ic-inl.h?
+ Code* result =
+ Code::cast(HeapObject::FromAddress(target - Code::kHeaderSize));
+ return result;
+}
+
+
+bool Debug::IsDebugGlobal(GlobalObject* global) {
+ return IsLoaded() && global == Debug::debug_context()->global();
+}
+
+
+bool Debugger::debugger_active_ = false;
+bool Debugger::compiling_natives_ = false;
+DebugMessageThread* Debugger::message_thread_ = NULL;
+v8::DebugMessageHandler Debugger::debug_message_handler_ = NULL;
+void* Debugger::debug_message_handler_data_ = NULL;
+
+Mutex* Debugger::pending_requests_access_ = OS::CreateMutex();
+PendingRequest* Debugger::pending_requests_head_ = NULL;
+PendingRequest* Debugger::pending_requests_tail_ = NULL;
+
+
+void Debugger::DebugRequest(const uint16_t* json_request, int length) {
+ // Create a pending request.
+ PendingRequest* pending_request = new PendingRequest(json_request, length);
+
+ // Add the pending request to list.
+ Guard with(pending_requests_access_);
+ if (pending_requests_head_ == NULL) {
+ ASSERT(pending_requests_tail_ == NULL);
+ pending_requests_head_ = pending_request;
+ pending_requests_tail_ = pending_request;
+ } else {
+ ASSERT(pending_requests_tail_ != NULL);
+ pending_requests_tail_->set_next(pending_request);
+ pending_requests_tail_ = pending_request;
+ }
+
+ // Use the stack guard to signal the debug request.
+ StackGuard::DebugBreak();
+}
+
+
+bool Debugger::ProcessPendingRequests() {
+ HandleScope scope;
+
+ // Lock access to pending requests list while processing them. Typically
+ // there will be either zero or one pending request.
+ Guard with(pending_requests_access_);
+
+ EnterDebuggerContext enter;
+
+ // Get the current execution state.
+ bool caught_exception;
+ Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+ if (caught_exception) {
+ return false;
+ }
+
+ // Process the list of pending requests.
+ bool plain_break = false;
+ PendingRequest* pending_request = pending_requests_head_;
+ if (pending_request == NULL) {
+ // If no pending commands plain break issued some other way (e.g. debugger
+ // statement).
+ plain_break = true;
+ }
+ while (pending_request != NULL) {
+ Handle<String> response = ProcessRequest(exec_state,
+ pending_request->request(),
+ false);
+ OnPendingRequestProcessed(response);
+
+ // Check whether one of the commands is a plain break request.
+ if (!plain_break) {
+ plain_break = IsPlainBreakRequest(pending_request->request());
+ }
+
+ // Move to the next item in the list.
+ PendingRequest* next = pending_request->next();
+ delete pending_request;
+ pending_request = next;
+ }
+
+ // List processed.
+ pending_requests_head_ = NULL;
+ pending_requests_tail_ = NULL;
+
+ return plain_break;
+}
+
+
+Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
+ int argc, Object*** argv,
+ bool* caught_exception) {
+ ASSERT(Top::context() == *Debug::debug_context());
+
+ // Create the execution state object.
+ Handle<String> constructor_str = Factory::LookupSymbol(constructor_name);
+ Handle<Object> constructor(Top::global()->GetProperty(*constructor_str));
+ ASSERT(constructor->IsJSFunction());
+ if (!constructor->IsJSFunction()) {
+ *caught_exception = true;
+ return Factory::undefined_value();
+ }
+ Handle<Object> js_object = Execution::TryCall(
+ Handle<JSFunction>::cast(constructor),
+ Handle<JSObject>(Debug::debug_context()->global()), argc, argv,
+ caught_exception);
+ return js_object;
+}
+
+
+Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
+ // Create the execution state object.
+ Handle<Object> break_id = Factory::NewNumberFromInt(Top::break_id());
+ const int argc = 1;
+ Object** argv[argc] = { break_id.location() };
+ return MakeJSObject(CStrVector("MakeExecutionState"),
+ argc, argv, caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
+ Handle<Object> break_points_hit,
+ bool* caught_exception) {
+ // Create the new break event object.
+ const int argc = 2;
+ Object** argv[argc] = { exec_state.location(),
+ break_points_hit.location() };
+ return MakeJSObject(CStrVector("MakeBreakEvent"),
+ argc,
+ argv,
+ caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
+ Handle<Object> exception,
+ bool uncaught,
+ bool* caught_exception) {
+ // Create the new exception event object.
+ const int argc = 3;
+ Object** argv[argc] = { exec_state.location(),
+ exception.location(),
+ uncaught ? Factory::true_value().location() :
+ Factory::false_value().location()};
+ return MakeJSObject(CStrVector("MakeExceptionEvent"),
+ argc, argv, caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
+ bool* caught_exception) {
+ // Create the new function event object.
+ const int argc = 1;
+ Object** argv[argc] = { function.location() };
+ return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
+ argc, argv, caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
+ Handle<Object> script_function,
+ bool* caught_exception) {
+ // Create the compile event object.
+ Handle<Object> exec_state = MakeExecutionState(caught_exception);
+ Handle<Object> script_source(script->source());
+ Handle<Object> script_name(script->name());
+ const int argc = 3;
+ Object** argv[argc] = { script_source.location(),
+ script_name.location(),
+ script_function.location() };
+ return MakeJSObject(CStrVector("MakeCompileEvent"),
+ argc,
+ argv,
+ caught_exception);
+}
+
+
+Handle<String> Debugger::ProcessRequest(Handle<Object> exec_state,
+ Handle<Object> request,
+ bool stopped) {
+ // Get the function ProcessDebugRequest (declared in debug.js).
+ Handle<JSFunction> process_denbug_request =
+ Handle<JSFunction>(JSFunction::cast(
+ Debug::debug_context()->global()->GetProperty(
+ *Factory::LookupAsciiSymbol("ProcessDebugRequest"))));
+
+ // Call ProcessDebugRequest expect String result. The ProcessDebugRequest
+ // will never throw an exception (see debug.js).
+ bool has_pending_exception = false;
+ const int argc = 3;
+ Object** argv[argc] = { exec_state.location(),
+ request.location(),
+ stopped ? Factory::true_value().location() :
+ Factory::false_value().location()};
+ Handle<Object> result = Execution::Call(process_denbug_request,
+ Factory::undefined_value(),
+ argc, argv,
+ &has_pending_exception);
+ ASSERT(!has_pending_exception);
+ return Handle<String>::cast(result);
+}
+
+
+bool Debugger::IsPlainBreakRequest(Handle<Object> request) {
+ // Get the function IsPlainBreakRequest (defined in debug.js).
+ Handle<JSFunction> process_denbug_request =
+ Handle<JSFunction>(JSFunction::cast(
+ Debug::debug_context()->global()->GetProperty(
+ *Factory::LookupAsciiSymbol("IsPlainBreakRequest"))));
+
+ // Call ProcessDebugRequest expect String result.
+ bool has_pending_exception = false;
+ const int argc = 1;
+ Object** argv[argc] = { request.location() };
+ Handle<Object> result = Execution::Call(process_denbug_request,
+ Factory::undefined_value(),
+ argc, argv,
+ &has_pending_exception);
+ ASSERT(!has_pending_exception);
+ return *result == Heap::true_value();
+}
+
+
+void Debugger::OnException(Handle<Object> exception, bool uncaught) {
+ HandleScope scope;
+
+ // Bail out based on state or if there is no listener for this event
+ if (Debug::InDebugger()) return;
+ if (!Debugger::EventActive(v8::Exception)) return;
+
+ // Bail out if exception breaks are not active
+ if (uncaught) {
+ // Uncaught exceptions are reported by either flags.
+ if (!(Debug::break_on_uncaught_exception() ||
+ Debug::break_on_exception())) return;
+ } else {
+ // Caught exceptions are reported is activated.
+ if (!Debug::break_on_exception()) return;
+ }
+
+ // Enter the debugger.
+ SaveBreakFrame save;
+ EnterDebuggerContext enter;
+
+ // Clear all current stepping setup.
+ Debug::ClearStepping();
+ // Create the event data object.
+ bool caught_exception = false;
+ Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+ Handle<Object> event_data;
+ if (!caught_exception) {
+ event_data = MakeExceptionEvent(exec_state, exception, uncaught,
+ &caught_exception);
+ }
+ // Bail out and don't call debugger if exception.
+ if (caught_exception) {
+ return;
+ }
+
+ // Process debug event
+ ProcessDebugEvent(v8::Exception, event_data);
+ // Return to continue execution from where the exception was thrown.
+}
+
+
+void Debugger::OnDebugBreak(Handle<Object> break_points_hit) {
+ HandleScope scope;
+
+ // Bail out if there is no listener for this event
+ if (!Debugger::EventActive(v8::Break)) return;
+
+ // Debugger must be entered in advance.
+ ASSERT(Top::context() == *Debug::debug_context());
+
+ // Create the event data object.
+ bool caught_exception = false;
+ Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+ Handle<Object> event_data;
+ if (!caught_exception) {
+ event_data = MakeBreakEvent(exec_state, break_points_hit,
+ &caught_exception);
+ }
+ // Bail out and don't call debugger if exception.
+ if (caught_exception) {
+ return;
+ }
+
+ // Process debug event
+ ProcessDebugEvent(v8::Break, event_data);
+}
+
+
+void Debugger::OnBeforeCompile(Handle<Script> script) {
+ HandleScope scope;
+
+ // Bail out based on state or if there is no listener for this event
+ if (Debug::InDebugger()) return;
+ if (compiling_natives()) return;
+ if (!EventActive(v8::BeforeCompile)) return;
+
+ // Enter the debugger.
+ Debug::Load();
+ SaveBreakFrame save;
+ EnterDebuggerContext enter;
+
+ // Create the event data object.
+ bool caught_exception = false;
+ Handle<Object> event_data = MakeCompileEvent(script,
+ Factory::undefined_value(),
+ &caught_exception);
+ // Bail out and don't call debugger if exception.
+ if (caught_exception) {
+ return;
+ }
+
+ // Process debug event
+ ProcessDebugEvent(v8::BeforeCompile, event_data);
+}
+
+
+// Handle debugger actions when a new script is compiled.
+void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
+ // No compile events while compiling natives.
+ if (compiling_natives()) return;
+
+ // No more to do if not debugging.
+ if (!debugger_active()) return;
+
+ HandleScope scope;
+ EnterDebuggerContext enter;
+ bool caught_exception = false;
+
+ // If debugging there might be script break points registered for this
+ // script. Make sure that these break points are set.
+
+ // Get the function UpdateScriptBreakPoints (defined in debug-delay.js).
+ Handle<Object> update_script_break_points =
+ Handle<Object>(Debug::debug_context()->global()->GetProperty(
+ *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
+ if (!update_script_break_points->IsJSFunction()) {
+ return;
+ }
+ ASSERT(update_script_break_points->IsJSFunction());
+
+ // Wrap the script object in a proper JS object before passing it
+ // to JavaScript.
+ Handle<JSValue> wrapper = GetScriptWrapper(script);
+
+ // Call UpdateScriptBreakPoints expect no exceptions.
+ const int argc = 1;
+ Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
+ Handle<Object> result = Execution::TryCall(
+ Handle<JSFunction>::cast(update_script_break_points),
+ Top::builtins(), argc, argv,
+ &caught_exception);
+ if (caught_exception) {
+ return;
+ }
+
+ // Bail out based on state or if there is no listener for this event
+ if (Debug::InDebugger()) return;
+ if (!Debugger::EventActive(v8::AfterCompile)) return;
+
+ // Create the compile state object.
+ Handle<Object> event_data = MakeCompileEvent(script,
+ Factory::undefined_value(),
+ &caught_exception);
+ // Bail out and don't call debugger if exception.
+ if (caught_exception) {
+ return;
+ }
+
+ // Process debug event
+ ProcessDebugEvent(v8::AfterCompile, event_data);
+}
+
+
+void Debugger::OnNewFunction(Handle<JSFunction> function) {
+ return;
+ HandleScope scope;
+
+ // Bail out based on state or if there is no listener for this event
+ if (Debug::InDebugger()) return;
+ if (compiling_natives()) return;
+ if (!Debugger::EventActive(v8::NewFunction)) return;
+
+ // Enter the debugger.
+ SaveBreakFrame save;
+ EnterDebuggerContext enter;
+
+ // Create the event object.
+ bool caught_exception = false;
+ Handle<Object> event_data = MakeNewFunctionEvent(function, &caught_exception);
+ // Bail out and don't call debugger if exception.
+ if (caught_exception) {
+ return;
+ }
+
+ // Process debug event.
+ ProcessDebugEvent(v8::NewFunction, event_data);
+}
+
+
+void Debugger::OnPendingRequestProcessed(Handle<Object> event_data) {
+ // Process debug event.
+ ProcessDebugEvent(v8::PendingRequestProcessed, event_data);
+}
+
+
+void Debugger::ProcessDebugEvent(v8::DebugEvent event,
+ Handle<Object> event_data) {
+ // Create the execution state.
+ bool caught_exception = false;
+ Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+ if (caught_exception) {
+ return;
+ }
+
+ // First notify the builtin debugger.
+ if (message_thread_ != NULL) {
+ message_thread_->DebugEvent(event, exec_state, event_data);
+ }
+
+ // Notify registered debug event listeners. The list can contain both C and
+ // JavaScript functions.
+ v8::NeanderArray listeners(Factory::debug_event_listeners());
+ int length = listeners.length();
+ for (int i = 0; i < length; i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // Skip deleted ones.
+ v8::NeanderObject listener(JSObject::cast(listeners.get(i)));
+ Handle<Object> callback_data(listener.get(1));
+ if (listener.get(0)->IsProxy()) {
+ // C debug event listener.
+ Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
+ v8::DebugEventCallback callback =
+ FUNCTION_CAST<v8::DebugEventCallback>(callback_obj->proxy());
+ callback(event,
+ v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
+ v8::Utils::ToLocal(Handle<JSObject>::cast(event_data)),
+ v8::Utils::ToLocal(callback_data));
+ } else {
+ // JavaScript debug event listener.
+ ASSERT(listener.get(0)->IsJSFunction());
+ Handle<JSFunction> fun(JSFunction::cast(listener.get(0)));
+
+ // Invoke the JavaScript debug event listener.
+ const int argc = 4;
+ Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
+ exec_state.location(),
+ event_data.location(),
+ callback_data.location() };
+ Handle<Object> result = Execution::TryCall(fun, Top::global(),
+ argc, argv, &caught_exception);
+ if (caught_exception) {
+ // Silently ignore exceptions from debug event listeners.
+ }
+ }
+ }
+}
+
+
+void Debugger::SetMessageHandler(v8::DebugMessageHandler handler, void* data) {
+ debug_message_handler_ = handler;
+ debug_message_handler_data_ = data;
+ if (!message_thread_) {
+ message_thread_ = new DebugMessageThread();
+ message_thread_->Start();
+ }
+ UpdateActiveDebugger();
+}
+
+
+void Debugger::SendMessage(Vector< uint16_t> message) {
+ if (debug_message_handler_ != NULL) {
+ debug_message_handler_(message.start(), message.length(),
+ debug_message_handler_data_);
+ }
+}
+
+
+void Debugger::ProcessCommand(Vector<const uint16_t> command) {
+ if (message_thread_ != NULL) {
+ message_thread_->ProcessCommand(
+ Vector<uint16_t>(const_cast<uint16_t *>(command.start()),
+ command.length()));
+ }
+}
+
+
+void Debugger::UpdateActiveDebugger() {
+ v8::NeanderArray listeners(Factory::debug_event_listeners());
+ int length = listeners.length();
+ bool active_listener = false;
+ for (int i = 0; i < length && !active_listener; i++) {
+ active_listener = !listeners.get(i)->IsUndefined();
+ }
+
+ set_debugger_active((Debugger::message_thread_ != NULL &&
+ Debugger::debug_message_handler_ != NULL) ||
+ active_listener);
+ if (!debugger_active() && message_thread_)
+ message_thread_->OnDebuggerInactive();
+}
+
+
+DebugMessageThread::DebugMessageThread()
+ : host_running_(true),
+ event_json_(Vector<uint16_t>::empty()),
+ command_(Vector<uint16_t>::empty()),
+ result_(Vector<uint16_t>::empty()) {
+ command_received_ = OS::CreateSemaphore(0);
+ debug_event_ = OS::CreateSemaphore(0);
+ debug_command_ = OS::CreateSemaphore(0);
+ debug_result_ = OS::CreateSemaphore(0);
+}
+
+
+DebugMessageThread::~DebugMessageThread() {
+}
+
+
+void DebugMessageThread::SetEventJSON(Vector<uint16_t> event_json) {
+ SetVector(&event_json_, event_json);
+}
+
+
+void DebugMessageThread::SetEventJSONFromEvent(Handle<Object> event_data) {
+ v8::HandleScope scope;
+ // Call toJSONProtocol on the debug event object.
+ v8::Local<v8::Object> api_event_data =
+ v8::Utils::ToLocal(Handle<JSObject>::cast(event_data));
+ v8::Local<v8::String> fun_name = v8::String::New("toJSONProtocol");
+ v8::Local<v8::Function> fun =
+ v8::Function::Cast(*api_event_data->Get(fun_name));
+ v8::TryCatch try_catch;
+ v8::Local<v8::Value> json_result = *fun->Call(api_event_data, 0, NULL);
+ v8::Local<v8::String> json_result_string;
+ if (!try_catch.HasCaught()) {
+ if (!json_result->IsUndefined()) {
+ json_result_string = json_result->ToString();
+ if (FLAG_trace_debug_json) {
+ PrintLn(json_result_string);
+ }
+ v8::String::Value val(json_result_string);
+ Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
+ json_result_string->Length());
+ SetEventJSON(str);
+ } else {
+ SetEventJSON(Vector<uint16_t>::empty());
+ }
+ } else {
+ PrintLn(try_catch.Exception());
+ SetEventJSON(Vector<uint16_t>::empty());
+ }
+}
+
+
+void DebugMessageThread::SetCommand(Vector<uint16_t> command) {
+ SetVector(&command_, command);
+}
+
+
+void DebugMessageThread::SetResult(const char* result) {
+ int len = strlen(result);
+ uint16_t* tmp = NewArray<uint16_t>(len);
+ for (int i = 0; i < len; i++) {
+ tmp[i] = result[i];
+ }
+ SetResult(Vector<uint16_t>(tmp, len));
+ DeleteArray(tmp);
+}
+
+
+void DebugMessageThread::SetResult(Vector<uint16_t> result) {
+ SetVector(&result_, result);
+}
+
+
+void DebugMessageThread::SetVector(Vector<uint16_t>* vector,
+ Vector<uint16_t> value) {
+ // Deallocate current result.
+ if (!vector->is_empty()) {
+ vector->Dispose();
+ *vector = Vector<uint16_t>::empty();
+ }
+
+ // Allocate a copy of the new result.
+ if (!value.is_empty()) {
+ *vector = value.Clone();
+ }
+}
+
+
+// Compare a two byte string to an null terminated ASCII string.
+bool DebugMessageThread::TwoByteEqualsAscii(Vector<uint16_t> two_byte,
+ const char* ascii) {
+ for (int i = 0; i < two_byte.length(); i++) {
+ if (ascii[i] == '\0') {
+ return false;
+ }
+ if (two_byte[i] != static_cast<uint16_t>(ascii[i])) {
+ return false;
+ }
+ }
+ return ascii[two_byte.length()] == '\0';
+}
+
+
+void DebugMessageThread::CommandResult(Vector<uint16_t> result) {
+ SetResult(result);
+ debug_result_->Signal();
+}
+
+
+void DebugMessageThread::Run() {
+ // Process commands and debug events.
+ while (true) {
+ // Set the current command prompt
+ Semaphore* sems[2];
+ sems[0] = command_received_;
+ sems[1] = debug_event_;
+ int signal = Select(2, sems).WaitSingle();
+ if (signal == 0) {
+ if (command_.length() > 0) {
+ HandleCommand();
+ if (result_.length() > 0) {
+ Debugger::SendMessage(result_);
+ SetResult(Vector<uint16_t>::empty());
+ }
+ }
+ } else {
+ // Send the the current event as JSON to the debugger.
+ Debugger::SendMessage(event_json_);
+ }
+ }
+}
+
+
+// This method is called by the V8 thread whenever a debug event occours in the
+// vm.
+void DebugMessageThread::DebugEvent(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data) {
+ if (!Debug::Load()) {
+ return;
+ }
+
+ // Process the individual events.
+ bool interactive = false;
+ switch (event) {
+ case v8::Break:
+ interactive = true; // Break event is always interavtive
+ break;
+ case v8::Exception:
+ interactive = true; // Exception event is always interavtive
+ break;
+ case v8::BeforeCompile:
+ break;
+ case v8::AfterCompile:
+ break;
+ case v8::NewFunction:
+ break;
+ case v8::PendingRequestProcessed: {
+ // For a processed pending request the event_data is the JSON response
+ // string.
+ v8::Handle<v8::String> str =
+ v8::Handle<v8::String>(
+ Utils::ToLocal(Handle<String>::cast(event_data)));
+ v8::String::Value val(str);
+ SetEventJSON(Vector<uint16_t>(reinterpret_cast<uint16_t*>(*val),
+ str->Length()));
+ debug_event_->Signal();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Done if not interactive.
+ if (!interactive) return;
+
+ // Get the DebugCommandProcessor.
+ v8::Local<v8::Object> api_exec_state =
+ v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
+ v8::Local<v8::String> fun_name =
+ v8::String::New("debugCommandProcessor");
+ v8::Local<v8::Function> fun =
+ v8::Function::Cast(*api_exec_state->Get(fun_name));
+ v8::TryCatch try_catch;
+ v8::Local<v8::Object> cmd_processor =
+ v8::Object::Cast(*fun->Call(api_exec_state, 0, NULL));
+ if (try_catch.HasCaught()) {
+ PrintLn(try_catch.Exception());
+ return;
+ }
+
+ // Notify the debug session thread that a debug event has occoured.
+ host_running_ = false;
+ event_ = event;
+ SetEventJSONFromEvent(event_data);
+ debug_event_->Signal();
+
+ // Wait for commands from the debug session.
+ while (true) {
+ debug_command_->Wait();
+ ASSERT(!host_running_);
+ if (!Debugger::debugger_active()) {
+ host_running_ = true;
+ return;
+ }
+
+ // Invoke the JavaScript to convert the debug command line to a JSON
+ // request, invoke the JSON request and convert the JSON respose to a text
+ // representation.
+ v8::Local<v8::String> fun_name;
+ v8::Local<v8::Function> fun;
+ v8::Local<v8::Value> args[1];
+ v8::TryCatch try_catch;
+ fun_name = v8::String::New("processDebugCommand");
+ fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+ args[0] = v8::String::New(reinterpret_cast<uint16_t*>(command_.start()),
+ command_.length());
+ v8::Local<v8::Value> result_val = fun->Call(cmd_processor, 1, args);
+
+ // Get the result of the command.
+ v8::Local<v8::String> result_string;
+ bool running = false;
+ if (!try_catch.HasCaught()) {
+ // Get the result as an object.
+ v8::Local<v8::Object> result = v8::Object::Cast(*result_val);
+
+ // Log the JSON request/response.
+ if (FLAG_trace_debug_json) {
+ PrintLn(result->Get(v8::String::New("request")));
+ PrintLn(result->Get(v8::String::New("response")));
+ }
+
+ // Get the running state.
+ running = result->Get(v8::String::New("running"))->ToBoolean()->Value();
+
+ // Get result text.
+ v8::Local<v8::Value> text_result =
+ result->Get(v8::String::New("response"));
+ if (!text_result->IsUndefined()) {
+ result_string = text_result->ToString();
+ } else {
+ result_string = v8::String::New("");
+ }
+ } else {
+ // In case of failure the result text is the exception text.
+ result_string = try_catch.Exception()->ToString();
+ }
+
+ // Convert text result to C string.
+ v8::String::Value val(result_string);
+ Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
+ result_string->Length());
+
+ // Change the prompt if VM is running after this command.
+ if (running) {
+ host_running_ = true;
+ }
+
+ // Return the result.
+ CommandResult(str);
+
+ // Return from debug event processing is VM should be running.
+ if (running) {
+ return;
+ }
+ }
+}
+
+
+void DebugMessageThread::HandleCommand() {
+ // Handle the command.
+ if (TwoByteEqualsAscii(command_, "b") ||
+ TwoByteEqualsAscii(command_, "break")) {
+ v8::Debug::DebugBreak();
+ SetResult("request queued");
+ } else if (host_running_) {
+ // Send the JSON command to the running VM.
+ Debugger::DebugRequest(command_.start(), command_.length());
+ SetResult("request queued");
+ } else {
+ debug_command_->Signal();
+ debug_result_->Wait();
+ }
+}
+
+
+void DebugMessageThread::ProcessCommand(Vector<uint16_t> command) {
+ SetCommand(command);
+ command_received_->Signal();
+}
+
+
+void DebugMessageThread::OnDebuggerInactive() {
+ if (!host_running_) {
+ debug_command_->Signal();
+ SetResult("");
+ }
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DEBUG_H_
+#define V8_DEBUG_H_
+
+#include "../public/debug.h"
+#include "assembler.h"
+#include "code-stubs.h"
+#include "factory.h"
+#include "platform.h"
+#include "string-stream.h"
+
+
+namespace v8 { namespace internal {
+
+// Step actions. NOTE: These values are in macros.py as well.
+enum StepAction {
+ StepNone = -1, // Stepping not prepared.
+ StepOut = 0, // Step out of the current function.
+ StepNext = 1, // Step to the next statement in the current function.
+ StepIn = 2, // Step into new functions invoked or the next statement
+ // in the current function.
+ StepMin = 3, // Perform a minimum step in the current function.
+ StepInMin = 4 // Step into new functions invoked or perform a minimum step
+ // in the current function.
+};
+
+
+// Type of exception break. NOTE: These values are in macros.py as well.
+enum ExceptionBreakType {
+ BreakException = 0,
+ BreakUncaughtException = 1
+};
+
+
+// Type of exception break. NOTE: These values are in macros.py as well.
+enum BreakLocatorType {
+ ALL_BREAK_LOCATIONS = 0,
+ SOURCE_BREAK_LOCATIONS = 1
+};
+
+
+// Class for iterating through the break points in a function and changing
+// them.
+class BreakLocationIterator {
+ public:
+ explicit BreakLocationIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type);
+ virtual ~BreakLocationIterator();
+
+ void Next();
+ void Next(int count);
+ void FindBreakLocationFromAddress(Address pc);
+ void FindBreakLocationFromPosition(int position);
+ void Reset();
+ bool Done() const;
+ void SetBreakPoint(Handle<Object> break_point_object);
+ void ClearBreakPoint(Handle<Object> break_point_object);
+ void SetOneShot();
+ void ClearOneShot();
+ void PrepareStepIn();
+ bool IsExit() const;
+ bool HasBreakPoint();
+ bool IsDebugBreak();
+ Object* BreakPointObjects();
+
+
+ inline int code_position() { return pc() - debug_info_->code()->entry(); }
+ inline int break_point() { return break_point_; }
+ inline int position() { return position_; }
+ inline int statement_position() { return statement_position_; }
+ inline Address pc() { return reloc_iterator_->rinfo()->pc(); }
+ inline Code* code() { return debug_info_->code(); }
+ inline RelocInfo* rinfo() { return reloc_iterator_->rinfo(); }
+ inline RelocMode rmode() const { return reloc_iterator_->rinfo()->rmode(); }
+ inline RelocInfo* original_rinfo() {
+ return reloc_iterator_original_->rinfo();
+ }
+ inline RelocMode original_rmode() const {
+ return reloc_iterator_original_->rinfo()->rmode();
+ }
+
+ protected:
+ bool RinfoDone() const;
+ void RinfoNext();
+
+ BreakLocatorType type_;
+ int break_point_;
+ int position_;
+ int statement_position_;
+ Handle<DebugInfo> debug_info_;
+ RelocIterator* reloc_iterator_;
+ RelocIterator* reloc_iterator_original_;
+
+ private:
+ void SetDebugBreak();
+ void ClearDebugBreak();
+
+ DISALLOW_EVIL_CONSTRUCTORS(BreakLocationIterator);
+};
+
+
+// Linked list holding debug info objects. The debug info objects are kept as
+// weak handles to avoid a debug info object to keep a function alive.
+class DebugInfoListNode {
+ public:
+ explicit DebugInfoListNode(DebugInfo* debug_info);
+ virtual ~DebugInfoListNode();
+
+ DebugInfoListNode* next() { return next_; }
+ void set_next(DebugInfoListNode* next) { next_ = next; }
+ Handle<DebugInfo> debug_info() { return debug_info_; }
+
+ private:
+ // Global (weak) handle to the debug info object.
+ Handle<DebugInfo> debug_info_;
+
+ // Next pointer for linked list.
+ DebugInfoListNode* next_;
+};
+
+
+// This class contains the debugger support. The main purpose is to handle
+// setting break points in the code.
+//
+// This class controls the debug info for all functions which currently have
+// active breakpoints in them. This debug info is held in the heap root object
+// debug_info which is a FixedArray. Each entry in this list is of class
+// DebugInfo.
+class Debug {
+ public:
+ static void Setup(bool create_heap_objects);
+ static bool Load();
+ static void Unload();
+ static bool IsLoaded() { return !debug_context_.is_null(); }
+ static bool InDebugger() { return Top::is_break(); }
+ static void Iterate(ObjectVisitor* v);
+
+ static Object* Break(Arguments args);
+ static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+ int source_position,
+ Handle<Object> break_point_object);
+ static void ClearBreakPoint(Handle<Object> break_point_object);
+ static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ static void FloodHandlerWithOneShot();
+ static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+ static void PrepareStep(StepAction step_action, int step_count);
+ static void ClearStepping();
+ static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
+ JavaScriptFrame* frame);
+ static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
+ static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
+ static bool IsDebugBreak(Address addr);
+
+ // Check whether a code stub with the specified major key is a possible break
+ // point location.
+ static bool IsSourceBreakStub(Code* code);
+ static bool IsBreakStub(Code* code);
+
+ // Find the builtin to use for invoking the debug break
+ static Handle<Code> FindDebugBreak(RelocInfo* rinfo);
+
+ static Handle<Object> GetSourceBreakLocations(
+ Handle<SharedFunctionInfo> shared);
+ static Code* GetCodeTarget(Address target);
+
+ // Getter for the debug_context.
+ inline static Handle<Context> debug_context() { return debug_context_; }
+
+ // Check whether a global object is the debug global object.
+ static bool IsDebugGlobal(GlobalObject* global);
+
+ // Fast check to see if any break points are active.
+ inline static bool has_break_points() { return has_break_points_; }
+
+ static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
+ static Address step_in_fp() { return thread_local_.step_into_fp_; }
+ static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
+
+ // Getters for the current exception break state.
+ static bool break_on_exception() { return break_on_exception_; }
+ static bool break_on_uncaught_exception() {
+ return break_on_uncaught_exception_;
+ }
+
+ enum AddressId {
+ k_after_break_target_address,
+ k_debug_break_return_address,
+ k_register_address
+ };
+
+ // Support for setting the address to jump to when returning from break point.
+ static Address* after_break_target_address() {
+ return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
+ }
+
+ // Support for saving/restoring registers when handling debug break calls.
+ static Address* register_address(int r) {
+ return reinterpret_cast<Address *>(®isters_[r]);
+ }
+
+ // Addres of the debug break return entry code.
+ static Code* debug_break_return_entry() { return debug_break_return_entry_; }
+
+ // Support for getting the address of the debug break on return code.
+ static Address* debug_break_return_address() {
+ return reinterpret_cast<Address*>(&debug_break_return_);
+ }
+
+ static const int kEstimatedNofDebugInfoEntries = 16;
+ static const int kEstimatedNofBreakPointsInFunction = 16;
+
+ static void HandleWeakDebugInfo(v8::Persistent<v8::Object> obj, void* data);
+
+ friend class Debugger;
+ friend Handle<FixedArray> GetDebuggedFunctions(); // Found in test-debug.cc
+
+ // Threading support.
+ static char* ArchiveDebug(char* to);
+ static char* RestoreDebug(char* from);
+ static int ArchiveSpacePerThread();
+
+ // Code generation assumptions.
+ static const int kIa32CallInstructionLength = 5;
+ static const int kIa32JSReturnSequenceLength = 6;
+
+ private:
+ static bool CompileDebuggerScript(int index);
+ static void ClearOneShot();
+ static void ActivateStepIn(StackFrame* frame);
+ static void ClearStepIn();
+ static void ClearStepNext();
+ static void EnsureCompiled(Handle<SharedFunctionInfo> shared);
+ static Handle<DebugInfo> AddDebugInfo(Handle<SharedFunctionInfo> shared);
+ static void RemoveDebugInfo(Handle<DebugInfo> debug_info);
+ static void SetAfterBreakTarget(JavaScriptFrame* frame);
+ static Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+ static bool CheckBreakPoint(Handle<Object> break_point_object);
+
+ // Global handle to debug context where all the debugger JavaScript code is
+ // loaded.
+ static Handle<Context> debug_context_;
+
+ // Boolean state indicating whether any break points are set.
+ static bool has_break_points_;
+ static DebugInfoListNode* debug_info_list_;
+
+ static bool break_on_exception_;
+ static bool break_on_uncaught_exception_;
+
+ // Per-thread:
+ class ThreadLocal {
+ public:
+ // Step action for last step performed.
+ StepAction last_step_action_;
+
+ // Source statement position from last step next action.
+ int last_statement_position_;
+
+ // Number of steps left to perform before debug event.
+ int step_count_;
+
+ // Frame pointer from last step next action.
+ Address last_fp_;
+
+ // Frame pointer for frame from which step in was performed.
+ Address step_into_fp_;
+
+ // Storage location for jump when exiting debug break calls.
+ Address after_break_target_;
+ };
+
+ // Storage location for registers when handling debug break calls
+ static JSCallerSavedBuffer registers_;
+ static ThreadLocal thread_local_;
+ static void ThreadInit();
+
+ // Code object for debug break return entry code.
+ static Code* debug_break_return_entry_;
+
+ // Code to call for handling debug break on return.
+ static Code* debug_break_return_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(Debug);
+};
+
+
+class PendingRequest;
+class DebugMessageThread;
+
+
+class Debugger {
+ public:
+ static void DebugRequest(const uint16_t* json_request, int length);
+ static bool ProcessPendingRequests();
+
+ static Handle<Object> MakeJSObject(Vector<const char> constructor_name,
+ int argc, Object*** argv,
+ bool* caught_exception);
+ static Handle<Object> MakeExecutionState(bool* caught_exception);
+ static Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
+ Handle<Object> break_points_hit,
+ bool* caught_exception);
+ static Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
+ Handle<Object> exception,
+ bool uncaught,
+ bool* caught_exception);
+ static Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
+ bool* caught_exception);
+ static Handle<Object> MakeCompileEvent(Handle<Script> script,
+ Handle<Object> script_function,
+ bool* caught_exception);
+ static Handle<String> ProcessRequest(Handle<Object> exec_state,
+ Handle<Object> request,
+ bool stopped);
+ static bool IsPlainBreakRequest(Handle<Object> request);
+
+ static void OnDebugBreak(Handle<Object> break_points_hit);
+ static void OnException(Handle<Object> exception, bool uncaught);
+ static void OnBeforeCompile(Handle<Script> script);
+ static void OnAfterCompile(Handle<Script> script,
+ Handle<JSFunction> fun);
+ static void OnNewFunction(Handle<JSFunction> fun);
+ static void OnPendingRequestProcessed(Handle<Object> event_data);
+ static void ProcessDebugEvent(v8::DebugEvent event,
+ Handle<Object> event_data);
+ static void SetMessageHandler(v8::DebugMessageHandler handler, void* data);
+ static void SendMessage(Vector<uint16_t> message);
+ static void ProcessCommand(Vector<const uint16_t> command);
+ static void UpdateActiveDebugger();
+ inline static bool EventActive(v8::DebugEvent event) {
+ // Currently argument event is not used.
+ return !Debugger::compiling_natives_ && Debugger::debugger_active_;
+ }
+
+ static void set_debugger_active(bool debugger_active) {
+ Debugger::debugger_active_ = debugger_active;
+ }
+ static bool debugger_active() { return Debugger::debugger_active_; }
+ static void set_compiling_natives(bool compiling_natives) {
+ Debugger::compiling_natives_ = compiling_natives;
+ }
+ static bool compiling_natives() { return Debugger::compiling_natives_; }
+
+ private:
+ static bool debugger_active_; // Are there any active debugger?
+ static bool compiling_natives_; // Are we compiling natives?
+ static DebugMessageThread* message_thread_;
+ static v8::DebugMessageHandler debug_message_handler_;
+ static void* debug_message_handler_data_;
+
+ // Head and tail of linked list of pending commands. The list is protected
+ // by a mutex as it can be updated/read from different threads.
+ static Mutex* pending_requests_access_;
+ static PendingRequest* pending_requests_head_;
+ static PendingRequest* pending_requests_tail_;
+};
+
+
+// Linked list of pending requests issued by debugger while V8 was running.
+class PendingRequest {
+ public:
+ PendingRequest(const uint16_t* json_request, int length);
+ ~PendingRequest();
+
+ PendingRequest* next() { return next_; }
+ void set_next(PendingRequest* next) { next_ = next; }
+ Handle<String> request();
+
+ private:
+ Vector<uint16_t> json_request_; // Request string.
+ PendingRequest* next_; // Next pointer for linked list.
+};
+
+
+class DebugMessageThread: public Thread {
+ public:
+ DebugMessageThread();
+ virtual ~DebugMessageThread();
+
+ void DebugEvent(v8::DebugEvent,
+ Handle<Object> exec_state,
+ Handle<Object> event_data);
+ void SetEventJSON(Vector<uint16_t> event_json);
+ void SetEventJSONFromEvent(Handle<Object> event_data);
+ void SetCommand(Vector<uint16_t> command);
+ void SetResult(const char* result);
+ void SetResult(Vector<uint16_t> result);
+ void CommandResult(Vector<uint16_t> result);
+
+ void ProcessCommand(Vector<uint16_t> command);
+
+ void OnDebuggerInactive();
+
+ protected:
+ void Run();
+ void HandleCommand();
+
+ bool host_running_; // Is the debugging host running or stopped
+ v8::DebugEvent event_; // Active event
+ Semaphore* command_received_; // Signal from the telnet connection
+ Semaphore* debug_event_; // Signal from the V8 thread
+ Semaphore* debug_command_; // Signal to the V8 thread
+ Semaphore* debug_result_; // Signal from the V8 thread
+
+ private:
+ void SetVector(Vector<uint16_t>* vector, Vector<uint16_t> value);
+ bool TwoByteEqualsAscii(Vector<uint16_t> two_byte, const char* ascii);
+
+ Vector<uint16_t> event_json_; // Active event JSON.
+ Vector<uint16_t> command_; // Current command.
+ Vector<uint16_t> result_; // Result of processing command.
+ DISALLOW_EVIL_CONSTRUCTORS(DebugMessageThread);
+};
+
+
+// Helper class to support saving/restoring the top break frame id.
+class SaveBreakFrame {
+ public:
+ SaveBreakFrame() : set_(!it_.done()) {
+ if (set_) {
+ // Store the previous break is and frame id.
+ break_id_ = Top::break_id();
+ break_frame_id_ = Top::break_frame_id();
+
+ // Create the new break info.
+ Top::new_break(it_.frame()->id());
+ }
+ }
+
+ ~SaveBreakFrame() {
+ if (set_) {
+ // restore to the previous break state.
+ Top::set_break(break_frame_id_, break_id_);
+ }
+ }
+
+ private:
+ JavaScriptFrameIterator it_;
+ const bool set_; // Was the break actually set?
+ StackFrame::Id break_frame_id_; // Previous break frame id.
+ int break_id_; // Previous break id.
+};
+
+
+class EnterDebuggerContext BASE_EMBEDDED {
+ public:
+ // Enter the debugger by storing the previous top context and setting the
+ // current top context to the debugger context.
+ EnterDebuggerContext() {
+ // NOTE the member variable save which saves the previous context before
+ // this change.
+ Top::set_context(*Debug::debug_context());
+ Top::set_security_context(*Debug::debug_context());
+ }
+
+ private:
+ SaveContext save;
+};
+
+
+// Debug_Address encapsulates the Address pointers used in generating debug
+// code.
+class Debug_Address {
+ public:
+ Debug_Address(Debug::AddressId id, int reg = 0)
+ : id_(id), reg_(reg) {
+ ASSERT(reg == 0 || id == Debug::k_register_address);
+ }
+
+ static Debug_Address AfterBreakTarget() {
+ return Debug_Address(Debug::k_after_break_target_address);
+ }
+
+ static Debug_Address DebugBreakReturn() {
+ return Debug_Address(Debug::k_debug_break_return_address);
+ }
+
+ static Debug_Address Register(int reg) {
+ return Debug_Address(Debug::k_register_address, reg);
+ }
+
+ Address address() const {
+ switch (id_) {
+ case Debug::k_after_break_target_address:
+ return reinterpret_cast<Address>(Debug::after_break_target_address());
+ case Debug::k_debug_break_return_address:
+ return reinterpret_cast<Address>(Debug::debug_break_return_address());
+ case Debug::k_register_address:
+ return reinterpret_cast<Address>(Debug::register_address(reg_));
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+ private:
+ Debug::AddressId id_;
+ int reg_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_DEBUG_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+
+#include "v8.h"
+
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace assembler { namespace arm {
+
+namespace v8i = v8::internal;
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ char* out_buffer, const int out_buffer_size)
+ : converter_(converter),
+ out_buffer_(out_buffer),
+ out_buffer_size_(out_buffer_size),
+ out_buffer_pos_(0) {
+ ASSERT(out_buffer_size_ > 0);
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ const disasm::NameConverter& converter_;
+ char* out_buffer_;
+ const int out_buffer_size_;
+ int out_buffer_pos_;
+
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ void PrintRegister(int reg);
+ void PrintCondition(Instr* instr);
+ void PrintShiftRm(Instr* instr);
+ void PrintShiftImm(Instr* instr);
+
+ int FormatOption(Instr* instr, const char* option);
+ void Format(Instr* instr, const char* format);
+ void Unknown(Instr* instr);
+
+ void DecodeType0(Instr* instr);
+ void DecodeType1(Instr* instr);
+ void DecodeType2(Instr* instr);
+ void DecodeType3(Instr* instr);
+ void DecodeType4(Instr* instr);
+ void DecodeType5(Instr* instr);
+ void DecodeType6(Instr* instr);
+ void DecodeType7(Instr* instr);
+};
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+ ASSERT(out_buffer_pos_ < out_buffer_size_);
+ out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != 0 && (out_buffer_pos_ < (out_buffer_size_-1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+static const char* cond_names[16] = {
+"eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
+"hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
+};
+
+
+// Print the condition guarding the instruction.
+void Decoder::PrintCondition(Instr* instr) {
+ Print(cond_names[instr->ConditionField()]);
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+
+static const char* shift_names[4] = {
+ "lsl", "lsr", "asr", "ror"
+};
+
+
+// Print the register shift operands for the instruction. Generally used for
+// data processing instructions.
+void Decoder::PrintShiftRm(Instr* instr) {
+ Shift shift = instr->ShiftField();
+ int shift_amount = instr->ShiftAmountField();
+ int rm = instr->RmField();
+
+ PrintRegister(rm);
+ if ((shift != LSL) || (shift_amount != 0)) {
+ if (instr->RegShiftField() == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ Print(", RRX");
+ return;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
+ }
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift], shift_amount);
+ } else {
+ // by register
+ int rs = instr->RsField();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ ", %s ", shift_names[shift]);
+ PrintRegister(rs);
+ }
+ }
+}
+
+
+// Print the immediate operand for the instruction. Generally used for data
+// processing instructions.
+void Decoder::PrintShiftImm(Instr* instr) {
+ int rotate = instr->RotateField() * 2;
+ int immed8 = instr->Immed8Field();
+ int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "#%d", imm);
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instr* instr, const char* format) {
+ switch (format[0]) {
+ case 'a': { // 'a: accumulate multiplies
+ if (instr->Bit(21) == 0) {
+ Print("ul");
+ } else {
+ Print("la");
+ }
+ return 1;
+ break;
+ }
+ case 'b': { // 'b: byte loads or stores
+ if (instr->HasB()) {
+ Print("b");
+ }
+ return 1;
+ break;
+ }
+ case 'c': { // 'cond: conditional execution
+ ASSERT((format[1] == 'o') && (format[2] == 'n') && (format[3] =='d'));
+ PrintCondition(instr);
+ return 4;
+ break;
+ }
+ case 'h': { // 'h: halfword operation for extra loads and stores
+ if (instr->HasH()) {
+ Print("h");
+ } else {
+ Print("b");
+ }
+ return 1;
+ break;
+ }
+ case 'i': { // 'imm: immediate value for data processing instructions
+ ASSERT((format[1] == 'm') && (format[2] == 'm'));
+ PrintShiftImm(instr);
+ return 3;
+ break;
+ }
+ case 'l': { // 'l: branch and link
+ if (instr->HasLink()) {
+ Print("l");
+ }
+ return 1;
+ break;
+ }
+ case 'm': { // 'msg: for simulator break instructions
+ if (format[1] == 'e') {
+ ASSERT((format[2] == 'm') && (format[3] == 'o') && (format[4] == 'p'));
+ if (instr->HasL()) {
+ Print("ldr");
+ } else {
+ Print("str");
+ }
+ return 5;
+ } else {
+ ASSERT(format[1] == 's' && format[2] == 'g');
+ byte* str =
+ reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
+ return 3;
+ }
+ break;
+ }
+ case 'o': {
+ ASSERT(format[1] == 'f' && format[2] == 'f');
+ if (format[3] == '1') {
+ // 'off12: 12-bit offset for load and store instructions
+ ASSERT(format[4] == '2');
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "%d", instr->Offset12Field());
+ return 5;
+ } else {
+ // 'off8: 8-bit offset for extra load and store instructions
+ ASSERT(format[3] == '8');
+ int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "%d", offs8);
+ return 4;
+ }
+ break;
+ }
+ case 'p': { // 'pu: P and U bits for load and store isntructions
+ ASSERT(format[1] == 'u');
+ switch (instr->PUField()) {
+ case 0: {
+ Print("da");
+ break;
+ }
+ case 1: {
+ Print("ia");
+ break;
+ }
+ case 2: {
+ Print("db");
+ break;
+ }
+ case 3: {
+ Print("ib");
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ return 2;
+ break;
+ }
+ case 'r': {
+ if (format[1] == 'n') { // 'rn: Rn register
+ int reg = instr->RnField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: Rd register
+ int reg = instr->RdField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 's') { // 'rs: Rs register
+ int reg = instr->RsField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') { // 'rm: Rm register
+ int reg = instr->RmField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'l') {
+ // 'rlist: register list for load and store multiple instructions
+ ASSERT(format[2] == 'i' && format[3] == 's' && format[4] == 't');
+ int rlist = instr->RlistField();
+ int reg = 0;
+ Print("{");
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ PrintRegister(reg);
+ if ((rlist >> 1) != 0) {
+ Print(", ");
+ }
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ Print("}");
+ return 5;
+ } else {
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return -1;
+ break;
+ }
+ case 's': {
+ if (format[1] == 'h') { // 'shift_rm: register shift operands
+ ASSERT(format[2] == 'i' && format[3] == 'f' && format[4] == 't'
+ && format[5] == '_' && format[6] == 'r' && format[7] == 'm');
+ PrintShiftRm(instr);
+ return 8;
+ } else if (format[1] == 'w') {
+ ASSERT(format[2] == 'i');
+ SoftwareInterruptCodes swi = instr->SwiField();
+ switch (swi) {
+ case call_rt_r5:
+ Print("call_rt_r5");
+ break;
+ case call_rt_r2:
+ Print("call_rt_r2");
+ break;
+ case break_point:
+ Print("break_point");
+ break;
+ default:
+ out_buffer_pos_ += v8i::OS::SNPrintF(
+ out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "%d",
+ swi);
+ break;
+ }
+ return 3;
+ } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
+ ASSERT(format[2] == 'g' && format[3] == 'n');
+ if (instr->HasSign()) {
+ Print("s");
+ }
+ return 4;
+ break;
+ } else { // 's: S field of data processing instructions
+ if (instr->HasS()) {
+ Print("s");
+ }
+ return 1;
+ }
+ break;
+ }
+ case 't': { // 'target: target of branch instructions
+ ASSERT(format[1] == 'a' && format[2] == 'r' && format[3] == 'g'
+ && format[4] == 'e' && format[5] == 't');
+ int off = (instr->SImmed24Field() << 2) + 8;
+ out_buffer_pos_ += v8i::OS::SNPrintF(
+ out_buffer_ + out_buffer_pos_,
+ out_buffer_size_ - out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ return 6;
+ break;
+ }
+ case 'u': { // 'u: signed or unsigned multiplies
+ if (instr->Bit(22) == 0) {
+ Print("u");
+ } else {
+ Print("s");
+ }
+ return 1;
+ break;
+ }
+ case 'w': { // 'w: W field of load and store instructions
+ if (instr->HasW()) {
+ Print("!");
+ }
+ return 1;
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instr* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_size_ - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ ASSERT(out_buffer_pos_ < out_buffer_size_);
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instr* instr) {
+ Format(instr, "unknown");
+}
+
+
+void Decoder::DecodeType0(Instr* instr) {
+ if (instr->IsSpecialType0()) {
+ // multiply instruction or extra loads and stores
+ if (instr->Bits(7, 4) == 9) {
+ if (instr->Bit(24) == 0) {
+ // multiply instructions
+ if (instr->Bit(23) == 0) {
+ if (instr->Bit(21) == 0) {
+ Format(instr, "mul'cond's 'rd, 'rm, 'rs");
+ } else {
+ Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
+ }
+ } else {
+ Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
+ }
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ } else {
+ // extra load/store instructions
+ switch (instr->PUField()) {
+ case 0: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+ }
+ break;
+ }
+ case 1: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+ }
+ break;
+ }
+ case 3: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ return;
+ }
+ } else {
+ switch (instr->OpcodeField()) {
+ case AND: {
+ Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case EOR: {
+ Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case SUB: {
+ Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case RSB: {
+ Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case ADD: {
+ Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case TST: {
+ if (instr->HasS()) {
+ Format(instr, "tst'cond 'rn, 'shift_rm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case TEQ: {
+ if (instr->HasS()) {
+ Format(instr, "teq'cond 'rn, 'shift_rm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case CMP: {
+ if (instr->HasS()) {
+ Format(instr, "cmp'cond 'rn, 'shift_rm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'shift_rm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case ORR: {
+ Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case MOV: {
+ Format(instr, "mov'cond's 'rd, 'shift_rm");
+ break;
+ }
+ case BIC: {
+ Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
+ break;
+ }
+ case MVN: {
+ Format(instr, "mvn'cond's 'rd, 'shift_rm");
+ break;
+ }
+ default: {
+ // The Opcode field is a 4-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeType1(Instr* instr) {
+ switch (instr->OpcodeField()) {
+ case AND: {
+ Format(instr, "and'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case EOR: {
+ Format(instr, "eor'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case SUB: {
+ Format(instr, "sub'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case RSB: {
+ Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case ADD: {
+ Format(instr, "add'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case TST: {
+ if (instr->HasS()) {
+ Format(instr, "tst'cond 'rn, 'imm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case TEQ: {
+ if (instr->HasS()) {
+ Format(instr, "teq'cond 'rn, 'imm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case CMP: {
+ if (instr->HasS()) {
+ Format(instr, "cmp'cond 'rn, 'imm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'imm");
+ } else {
+ Unknown(instr); // not used by V8
+ return;
+ }
+ break;
+ }
+ case ORR: {
+ Format(instr, "orr'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case MOV: {
+ Format(instr, "mov'cond's 'rd, 'imm");
+ break;
+ }
+ case BIC: {
+ Format(instr, "bic'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+ case MVN: {
+ Format(instr, "mvn'cond's 'rd, 'imm");
+ break;
+ }
+ default: {
+ // The Opcode field is a 4-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeType2(Instr* instr) {
+ switch (instr->PUField()) {
+ case 0: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ return;
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+ break;
+ }
+ case 1: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ return;
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+ break;
+ }
+ case 2: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ break;
+ }
+ case 3: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeType3(Instr* instr) {
+ switch (instr->PUField()) {
+ case 0: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ break;
+ }
+ case 1: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ break;
+ }
+ case 2: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+ break;
+ }
+ case 3: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeType4(Instr* instr) {
+ ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
+ if (instr->HasL()) {
+ Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ } else {
+ Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ }
+}
+
+
+void Decoder::DecodeType5(Instr* instr) {
+ Format(instr, "b'l'cond 'target");
+}
+
+
+void Decoder::DecodeType6(Instr* instr) {
+ // Coprocessor instructions currently not supported.
+ Unknown(instr);
+}
+
+
+void Decoder::DecodeType7(Instr* instr) {
+ if (instr->Bit(24) == 1) {
+ Format(instr, "swi'cond 'swi");
+ } else {
+ // Coprocessor instructions currently not supported.
+ Unknown(instr);
+ }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instr* instr = Instr::At(instr_ptr);
+ if (instr->ConditionField() == special_condition) {
+ Format(instr, "break 'msg");
+ return Instr::kInstrSize;
+ }
+ switch (instr->TypeField()) {
+ case 0: {
+ DecodeType0(instr);
+ break;
+ }
+ case 1: {
+ DecodeType1(instr);
+ break;
+ }
+ case 2: {
+ DecodeType2(instr);
+ break;
+ }
+ case 3: {
+ DecodeType3(instr);
+ break;
+ }
+ case 4: {
+ DecodeType4(instr);
+ break;
+ }
+ case 5: {
+ DecodeType5(instr);
+ break;
+ }
+ case 6: {
+ DecodeType6(instr);
+ break;
+ }
+ case 7: {
+ DecodeType7(instr);
+ break;
+ }
+ default: {
+ // The type field is 3-bits in the ARM encoding.
+ UNREACHABLE();
+ break;
+ }
+ }
+ return Instr::kInstrSize;
+}
+
+
+} } // namespace assembler::arm
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+static const char* reg_names[16] = {
+ "r0", "r1", "r2" , "r3" , "r4" , "r5" , "r6" , "r7" ,
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc",
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ static char tmp_buffer[32];
+#ifdef WIN32
+ _snprintf(tmp_buffer, sizeof tmp_buffer, "%p", addr);
+#else
+ snprintf(tmp_buffer, sizeof tmp_buffer, "%p", addr);
+#endif
+ return tmp_buffer;
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ const char* result;
+ if ((0 <= reg) && (reg < 16)) {
+ result = reg_names[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // ARM does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+static NameConverter defaultConverter;
+
+Disassembler::Disassembler() : converter_(defaultConverter) {}
+
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(char* buffer, const int buffer_size,
+ byte* instruction) {
+ assembler::arm::Decoder d(converter_, buffer, buffer_size);
+ return d.InstructionDecode(instruction);
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ Disassembler d;
+ for (byte* pc = begin; pc < end;) {
+ char buffer[128];
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+ fprintf(f, "%p", prev_pc);
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer);
+ }
+}
+
+
+} // namespace disasm
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+#include "disasm.h"
+
+namespace disasm {
+
+// Windows is missing the stdint.h header file
+#ifdef WIN32
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef int int32_t;
+#endif
+
+
+#define UNIMPLEMENTED() \
+ assert(false)
+
+#define UNREACHABLE() \
+ assert(false)
+
+enum OperandOrder {
+ UNSET_OP_ORDER = 0,
+ REG_OPER_OP_ORDER,
+ OPER_REG_OP_ORDER
+};
+
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+ int b; // -1 terminates, otherwise must be in range (0..255)
+ const char* mnem;
+ OperandOrder op_order_;
+};
+
+
+static ByteMnemonic two_operands_instr[] = {
+ {0x03, "add", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER},
+ {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
+ {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic zero_operands_instr[] = {
+ {0xC3, "ret", UNSET_OP_ORDER},
+ {0xC9, "leave", UNSET_OP_ORDER},
+ {0x90, "nop", UNSET_OP_ORDER},
+ {0xF4, "hlt", UNSET_OP_ORDER},
+ {0xCC, "int3", UNSET_OP_ORDER},
+ {0x60, "pushad", UNSET_OP_ORDER},
+ {0x61, "popad", UNSET_OP_ORDER},
+ {0x9C, "pushfd", UNSET_OP_ORDER},
+ {0x9D, "popfd", UNSET_OP_ORDER},
+ {0x9E, "sahf", UNSET_OP_ORDER},
+ {0x99, "cdq", UNSET_OP_ORDER},
+ {0x9B, "fwait", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic call_jump_instr[] = {
+ {0xE8, "call", UNSET_OP_ORDER},
+ {0xE9, "jmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic short_immediate_instr[] = {
+ {0x05, "add", UNSET_OP_ORDER},
+ {0x0D, "or", UNSET_OP_ORDER},
+ {0x15, "adc", UNSET_OP_ORDER},
+ {0x25, "and", UNSET_OP_ORDER},
+ {0x2D, "sub", UNSET_OP_ORDER},
+ {0x35, "xor", UNSET_OP_ORDER},
+ {0x3D, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const char* jump_conditional_mnem[] = {
+ /*0*/ "jo", "jno", "jc", "jnc",
+ /*4*/ "jz", "jnz", "jna", "ja",
+ /*8*/ "js", "jns", "jpe", "jpo",
+ /*12*/ "jl", "jnl", "jng", "jg"
+};
+
+
+enum InstructionType {
+ NO_INSTR,
+ ZERO_OPERANDS_INSTR,
+ TWO_OPERANDS_INSTR,
+ JUMP_CONDITIONAL_SHORT_INSTR,
+ REGISTER_INSTR,
+ MOVE_REG_INSTR,
+ CALL_JUMP_INSTR,
+ SHORT_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+ const char* mnem;
+ InstructionType type;
+ OperandOrder op_order_;
+};
+
+
+class InstructionTable {
+ public:
+ InstructionTable();
+ const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+
+ private:
+ InstructionDesc instructions_[256];
+ void Clear();
+ void Init();
+ void CopyTable(ByteMnemonic bm[], InstructionType type);
+ void SetTableRange(InstructionType type,
+ byte start,
+ byte end,
+ const char* mnem);
+ void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+ Clear();
+ Init();
+}
+
+
+void InstructionTable::Clear() {
+ for (int i = 0; i < 256; i++) {
+ instructions_[i].mnem = "";
+ instructions_[i].type = NO_INSTR;
+ instructions_[i].op_order_ = UNSET_OP_ORDER;
+ }
+}
+
+
+void InstructionTable::Init() {
+ CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+ CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+ CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+ CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+ AddJumpConditionalShort();
+ SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
+ SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
+ SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
+ SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+ SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+}
+
+
+void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+ for (int i = 0; bm[i].b >= 0; i++) {
+ InstructionDesc* id = &instructions_[bm[i].b];
+ id->mnem = bm[i].mnem;
+ id->op_order_ = bm[i].op_order_;
+ assert(id->type == NO_INSTR); // Information already entered
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type,
+ byte start,
+ byte end,
+ const char* mnem) {
+ for (byte b = start; b <= end; b++) {
+ InstructionDesc* id = &instructions_[b];
+ assert(id->type == NO_INSTR); // Information already entered
+ id->mnem = mnem;
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+ for (byte b = 0x70; b <= 0x7F; b++) {
+ InstructionDesc* id = &instructions_[b];
+ assert(id->type == NO_INSTR); // Information already entered
+ id->mnem = jump_conditional_mnem[b & 0x0F];
+ id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+ }
+}
+
+
+static InstructionTable instruction_table;
+
+
+// The IA32 disassembler implementation.
+class DisassemblerIA32 {
+ public:
+ DisassemblerIA32(const NameConverter& converter,
+ bool abort_on_unimplemented = true)
+ : converter_(converter),
+ tmp_buffer_pos_(0),
+ abort_on_unimplemented_(abort_on_unimplemented) {
+ tmp_buffer_[0] = '\0';
+ }
+
+ virtual ~DisassemblerIA32() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(char* buffer, const int buffer_size, byte* instruction);
+
+ private:
+ const NameConverter& converter_;
+ char tmp_buffer_[128];
+ unsigned int tmp_buffer_pos_;
+ bool abort_on_unimplemented_;
+
+
+ enum {
+ eax = 0,
+ ecx = 1,
+ edx = 2,
+ ebx = 3,
+ esp = 4,
+ ebp = 5,
+ esi = 6,
+ edi = 7
+ };
+
+
+ const char* NameOfCPURegister(int reg) const {
+ return converter_.NameOfCPURegister(reg);
+ }
+
+
+ const char* NameOfXMMRegister(int reg) const {
+ return converter_.NameOfXMMRegister(reg);
+ }
+
+
+ const char* NameOfAddress(byte* addr) const {
+ return converter_.NameOfAddress(addr);
+ }
+
+
+ // Disassembler helper functions.
+ static void get_modrm(byte data, int* mod, int* regop, int* rm) {
+ *mod = (data >> 6) & 3;
+ *regop = (data & 0x38) >> 3;
+ *rm = data & 7;
+ }
+
+
+ static void get_sib(byte data, int* scale, int* index, int* base) {
+ *scale = (data >> 6) & 3;
+ *index = (data >> 3) & 7;
+ *base = data & 7;
+ }
+
+
+ int PrintRightOperand(byte* modrmp);
+ int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
+ int PrintImmediateOp(byte* data);
+ int F7Instruction(byte* data);
+ int D1D3C1Instruction(byte* data);
+ int JumpShort(byte* data);
+ int JumpConditional(byte* data, const char* comment);
+ int JumpConditionalShort(byte* data, const char* comment);
+ int FPUInstruction(byte* data);
+ void AppendToBuffer(const char* format, ...);
+
+
+ void UnimplementedInstruction() {
+ if (abort_on_unimplemented_) {
+ UNIMPLEMENTED();
+ } else {
+ AppendToBuffer("'Unimplemented Instruction'");
+ }
+ }
+};
+
+
+void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
+ char* str = tmp_buffer_ + tmp_buffer_pos_;
+ int size = (sizeof tmp_buffer_) - tmp_buffer_pos_;
+ va_list args;
+ va_start(args, format);
+#ifdef WIN32
+ int result = _vsnprintf(str, size, format, args);
+#else
+ int result = vsnprintf(str, size, format, args);
+#endif
+ va_end(args);
+ tmp_buffer_pos_ += result;
+}
+
+
+// Returns number of bytes used including the current *modrmp.
+// Writes instruction's right operand to 'tmp_buffer_'.
+int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+ int mod, regop, rm;
+ get_modrm(*modrmp, &mod, ®op, &rm);
+ switch (mod) {
+ case 0:
+ if (rm == ebp) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
+ AppendToBuffer("[0x%x]", disp);
+ return 5;
+ } else if (rm == esp) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ if (index == esp && base == esp && scale == 0 /*times_1*/) {
+ AppendToBuffer("[%s]", NameOfCPURegister(rm));
+ return 2;
+ } else if (base == ebp) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ AppendToBuffer("[%s*%d+0x%x]",
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp);
+ return 6;
+ } else if (index != esp && base != ebp) {
+ // [base+index*scale]
+ AppendToBuffer("[%s+%s*%d]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale);
+ return 2;
+ } else {
+ UnimplementedInstruction();
+ return 1;
+ }
+ } else {
+ AppendToBuffer("[%s]", NameOfCPURegister(rm));
+ return 1;
+ }
+ break;
+ case 1: // fall through
+ case 2:
+ if (rm == esp) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ int disp =
+ mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
+ if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
+ AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
+ } else {
+ AppendToBuffer("[%s+%s*%d+0x%x]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp);
+ }
+ return mod == 2 ? 6 : 3;
+ } else {
+ // No sib.
+ int disp =
+ mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
+ AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
+ return mod == 2 ? 5 : 2;
+ }
+ break;
+ case 3:
+ AppendToBuffer("%s", NameOfCPURegister(rm));
+ return 1;
+ default:
+ UnimplementedInstruction();
+ return 1;
+ }
+ UNREACHABLE();
+}
+
+
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerIA32::PrintOperands(const char* mnem,
+ OperandOrder op_order,
+ byte* data) {
+ byte modrm = *data;
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ int advance = 0;
+ switch (op_order) {
+ case REG_OPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ advance = PrintRightOperand(data);
+ break;
+ }
+ case OPER_REG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return advance;
+}
+
+
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerIA32::PrintImmediateOp(byte* data) {
+ bool sign_extension_bit = (*data & 0x02) != 0;
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ const char* mnem = "Imm???";
+ switch (regop) {
+ case 0: mnem = "add"; break;
+ case 1: mnem = "or"; break;
+ case 2: mnem = "adc"; break;
+ case 4: mnem = "and"; break;
+ case 5: mnem = "sub"; break;
+ case 6: mnem = "xor"; break;
+ case 7: mnem = "cmp"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data+1);
+ if (sign_extension_bit) {
+ AppendToBuffer(",0x%x", *(data + 1 + count));
+ return 1 + count + 1 /*int8*/;
+ } else {
+ AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+ return 1 + count + 4 /*int32_t*/;
+ }
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::F7Instruction(byte* data) {
+ assert(*data == 0xF7);
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ if (mod == 3 && regop != 0) {
+ const char* mnem = NULL;
+ switch (regop) {
+ case 2: mnem = "not"; break;
+ case 3: mnem = "neg"; break;
+ case 4: mnem = "mul"; break;
+ case 7: mnem = "idiv"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
+ return 2;
+ } else if (mod == 3 && regop == eax) {
+ int32_t imm = *reinterpret_cast<int32_t*>(data+2);
+ AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
+ return 6;
+ } else if (regop == eax) {
+ AppendToBuffer("test ");
+ int count = PrintRightOperand(data+1);
+ int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
+ AppendToBuffer(",0x%x", imm);
+ return 1+count+4 /*int32_t*/;
+ } else {
+ UnimplementedInstruction();
+ return 2;
+ }
+}
+
+int DisassemblerIA32::D1D3C1Instruction(byte* data) {
+ byte op = *data;
+ assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ int imm8 = -1;
+ int num_bytes = 2;
+ if (mod == 3) {
+ const char* mnem = NULL;
+ if (op == 0xD1) {
+ imm8 = 1;
+ switch (regop) {
+ case edx: mnem = "rcl"; break;
+ case edi: mnem = "sar"; break;
+ case esp: mnem = "shl"; break;
+ default: UnimplementedInstruction();
+ }
+ } else if (op == 0xC1) {
+ imm8 = *(data+2);
+ num_bytes = 3;
+ switch (regop) {
+ case edx: mnem = "rcl"; break;
+ case esp: mnem = "shl"; break;
+ case ebp: mnem = "shr"; break;
+ case edi: mnem = "sar"; break;
+ default: UnimplementedInstruction();
+ }
+ } else if (op == 0xD3) {
+ switch (regop) {
+ case esp: mnem = "shl"; break;
+ case ebp: mnem = "shr"; break;
+ case edi: mnem = "sar"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ assert(mnem != NULL);
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
+ if (imm8 > 0) {
+ AppendToBuffer("%d", imm8);
+ } else {
+ AppendToBuffer("cl");
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpShort(byte* data) {
+ assert(*data == 0xEB);
+ byte b = *(data+1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ AppendToBuffer("jmp %s", NameOfAddress(dest));
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
+ assert(*data == 0x0F);
+ byte cond = *(data+1) & 0x0F;
+ byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
+ const char* mnem = jump_conditional_mnem[cond];
+ AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+ if (comment != NULL) {
+ AppendToBuffer(", %s", comment);
+ }
+ return 6; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
+ byte cond = *data & 0x0F;
+ byte b = *(data+1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ const char* mnem = jump_conditional_mnem[cond];
+ AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+ if (comment != NULL) {
+ AppendToBuffer(", %s", comment);
+ }
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::FPUInstruction(byte* data) {
+ byte b1 = *data;
+ byte b2 = *(data + 1);
+ if (b1 == 0xD9) {
+ const char* mnem = NULL;
+ switch (b2) {
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE0: mnem = "fchs"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xE4: mnem = "ftst"; break;
+ }
+ if (mnem != NULL) {
+ AppendToBuffer("%s", mnem);
+ return 2;
+ } else if ((b2 & 0xF8) == 0xC8) {
+ AppendToBuffer("fxch st%d", b2 & 0x7);
+ return 2;
+ } else {
+ int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case eax: mnem = "fld_s"; break;
+ case ebx: mnem = "fstp_s"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ }
+ } else if (b1 == 0xDD) {
+ if ((b2 & 0xF8) == 0xC0) {
+ AppendToBuffer("ffree st%d", b2 & 0x7);
+ return 2;
+ } else {
+ int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case eax: mnem = "fld_d"; break;
+ case ebx: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ }
+ } else if (b1 == 0xDB) {
+ int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case eax: mnem = "fild_s"; break;
+ case ebx: mnem = "fistp_s"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ } else if (b1 == 0xDF) {
+ if (b2 == 0xE0) {
+ AppendToBuffer("fnstsw_ax");
+ return 2;
+ }
+ int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case ebp: mnem = "fild_d"; break;
+ case edi: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ } else if (b1 == 0xDC || b1 == 0xDE) {
+ bool is_pop = (b1 == 0xDE);
+ if (is_pop && b2 == 0xD9) {
+ AppendToBuffer("fcompp");
+ return 2;
+ }
+ const char* mnem = "FP0xDC";
+ switch (b2 & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
+ return 2;
+ }
+ AppendToBuffer("Unknown FP instruction");
+ return 2;
+}
+
+
+// Mnemonics for instructions 0xF0 byte.
+// Returns NULL if the instruction is not handled here.
+static const char* F0Mnem(byte f0byte) {
+ switch (f0byte) {
+ case 0xA2: return "cpuid";
+ case 0x31: return "rdtsc";
+ case 0xBE: return "movsx_b";
+ case 0xBF: return "movsx_w";
+ case 0xB6: return "movzx_b";
+ case 0xB7: return "movzx_w";
+ case 0xAF: return "imul";
+ case 0xA5: return "shld";
+ case 0xAD: return "shrd";
+ case 0xAB: return "bts";
+ default: return NULL;
+ }
+}
+
+
+// Disassembled instruction '*instr' and writes it intro 'out_buffer'.
+int DisassemblerIA32::InstructionDecode(char* out_buffer,
+ const int out_buffer_size,
+ byte* instr) {
+ tmp_buffer_pos_ = 0; // starting to write as position 0
+ byte* data = instr;
+ // Check for hints.
+ const char* branch_hint = NULL;
+ // We use this two prefixes only with branch prediction
+ if (*data == 0x3E /*ds*/) {
+ branch_hint = "predicted taken";
+ data++;
+ } else if (*data == 0x2E /*cs*/) {
+ branch_hint = "predicted not taken";
+ data++;
+ }
+ bool processed = true; // Will be set to false if the current instruction
+ // is not in 'instructions' table.
+ const InstructionDesc& idesc = instruction_table.Get(*data);
+ switch (idesc.type) {
+ case ZERO_OPERANDS_INSTR:
+ AppendToBuffer(idesc.mnem);
+ data++;
+ break;
+
+ case TWO_OPERANDS_INSTR:
+ data++;
+ data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+ break;
+
+ case JUMP_CONDITIONAL_SHORT_INSTR:
+ data += JumpConditionalShort(data, branch_hint);
+ break;
+
+ case REGISTER_INSTR:
+ AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
+ data++;
+ break;
+
+ case MOVE_REG_INSTR: {
+ byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("mov %s,%s",
+ NameOfCPURegister(*data & 0x07),
+ NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case CALL_JUMP_INSTR: {
+ byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
+ AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case SHORT_IMMEDIATE_INSTR: {
+ byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case NO_INSTR:
+ processed = false;
+ break;
+
+ default:
+ UNIMPLEMENTED(); // This type is not implemented.
+ }
+ //----------------------------
+ if (!processed) {
+ switch (*data) {
+ case 0xC2:
+ AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
+ data += 3;
+ break;
+
+ case 0x69: // fall through
+ case 0x6B:
+ { int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ int32_t imm =
+ *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
+ AppendToBuffer("imul %s,%s,0x%x",
+ NameOfCPURegister(regop),
+ NameOfCPURegister(rm),
+ imm);
+ data += 2 + (*data == 0x6B ? 1 : 4);
+ }
+ break;
+
+ case 0xF6:
+ { int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ if (mod == 3 && regop == eax) {
+ AppendToBuffer("test_b %s,%d", NameOfCPURegister(rm), *(data+2));
+ } else {
+ UnimplementedInstruction();
+ }
+ data += 3;
+ }
+ break;
+
+ case 0x81: // fall through
+ case 0x83: // 0x81 with sign extension bit set
+ data += PrintImmediateOp(data);
+ break;
+
+ case 0x0F:
+ { byte f0byte = *(data+1);
+ const char* f0mnem = F0Mnem(f0byte);
+ if (f0byte == 0xA2 || f0byte == 0x31) {
+ AppendToBuffer("%s", f0mnem);
+ data += 2;
+ } else if ((f0byte & 0xF0) == 0x80) {
+ data += JumpConditional(data, branch_hint);
+ } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
+ f0byte == 0xB7 || f0byte == 0xAF) {
+ data += 2;
+ data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+ } else {
+ data += 2;
+ if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else {
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ }
+ break;
+
+ case 0x8F:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (regop == eax) {
+ AppendToBuffer("pop ");
+ data += PrintRightOperand(data);
+ }
+ }
+ break;
+
+ case 0xFF:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* mnem = NULL;
+ switch (regop) {
+ case esi: mnem = "push"; break;
+ case eax: mnem = "inc"; break;
+ case edx: mnem = "call"; break;
+ case esp: mnem = "jmp"; break;
+ default: mnem = "???";
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightOperand(data);
+ }
+ break;
+
+ case 0xC7: // imm32, fall through
+ case 0xC6: // imm8
+ { bool is_byte = *data == 0xC6;
+ data++;
+ AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
+ data += PrintRightOperand(data);
+ int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += is_byte ? 1 : 4;
+ }
+ break;
+
+ case 0x88: // 8bit, fall through
+ case 0x89: // 32bit
+ { bool is_byte = *data == 0x88;
+ int mod, regop, rm;
+ data++;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
+ break;
+
+ case 0x66: // prefix
+ data++;
+ if (*data == 0x8B) {
+ data++;
+ data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+ } else if (*data == 0x89) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("mov_w ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xFE:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (mod == 3 && regop == ecx) {
+ AppendToBuffer("dec_b %s", NameOfCPURegister(rm));
+ } else {
+ UnimplementedInstruction();
+ }
+ data++;
+ }
+ break;
+
+ case 0x68:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
+ data += 5;
+ break;
+
+ case 0x6A:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ data += 2;
+ break;
+
+ case 0xA8:
+ AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ data += 2;
+ break;
+
+ case 0xA9:
+ AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
+ data += 5;
+ break;
+
+ case 0xD1: // fall through
+ case 0xD3: // fall through
+ case 0xC1:
+ data += D1D3C1Instruction(data);
+ break;
+
+ case 0xD9: // fall through
+ case 0xDB: // fall through
+ case 0xDC: // fall through
+ case 0xDD: // fall through
+ case 0xDE: // fall through
+ case 0xDF:
+ data += FPUInstruction(data);
+ break;
+
+ case 0xEB:
+ data += JumpShort(data);
+ break;
+
+ case 0xF2:
+ if (*(data+1) == 0x0F) {
+ byte b2 = *(data+2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movsd ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ const char* mnem = "?";
+ switch (b2) {
+ case 0x2A: mnem = "cvtsi2sd"; break;
+ case 0x58: mnem = "addsd"; break;
+ case 0x59: mnem = "mulsd"; break;
+ case 0x5C: mnem = "subsd"; break;
+ case 0x5E: mnem = "divsd"; break;
+ }
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (b2 == 0x2A) {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ AppendToBuffer("%s %s,%s",
+ mnem,
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ }
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF3:
+ if (*(data+1) == 0x0F && *(data+2) == 0x2C) {
+ data += 3;
+ data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF7:
+ data += F7Instruction(data);
+ break;
+
+ default:
+ UnimplementedInstruction();
+ }
+ }
+
+ if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+ tmp_buffer_[tmp_buffer_pos_] = '\0';
+ }
+
+ int instr_len = data - instr;
+ if (instr_len == 0) instr_len = 1; // parse at least a byte
+#ifdef WIN32
+ _snprintf(out_buffer, out_buffer_size, "%s", tmp_buffer_);
+#else
+ snprintf(out_buffer, out_buffer_size, "%s", tmp_buffer_);
+#endif
+ return instr_len;
+}
+
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[8] = {
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+};
+
+
+static const char* xmm_regs[8] = {
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ static char tmp_buffer[32];
+#ifdef WIN32
+ _snprintf(tmp_buffer, sizeof tmp_buffer, "%p", addr);
+#else
+ snprintf(tmp_buffer, sizeof tmp_buffer, "%p", addr);
+#endif
+ return tmp_buffer;
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ if (0 <= reg && reg < 8) return cpu_regs[reg];
+ return "noreg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ if (0 <= reg && reg < 8) return xmm_regs[reg];
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // IA32 does not embed debug strings at the moment.
+ UNREACHABLE();
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+static NameConverter defaultConverter;
+
+Disassembler::Disassembler() : converter_(defaultConverter) {}
+
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(char* buffer,
+ const int buffer_size,
+ byte* instruction) {
+ DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
+ return d.InstructionDecode(buffer, buffer_size, instruction);
+}
+
+
+/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ Disassembler d;
+ for (byte* pc = begin; pc < end;) {
+ char buffer[128];
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+ fprintf(f, "%p", prev_pc);
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer);
+ }
+}
+
+
+} // namespace disasm
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DISASM_H_
+#define V8_DISASM_H_
+
+namespace disasm {
+
+typedef unsigned char byte;
+
+// Interface and default implementation for converting addresses and
+// register-numbers to text. The default implementation is machine
+// specific.
+class NameConverter {
+ public:
+ virtual ~NameConverter() {}
+ virtual const char* NameOfCPURegister(int reg) const;
+ virtual const char* NameOfXMMRegister(int reg) const;
+ virtual const char* NameOfAddress(byte* addr) const;
+ virtual const char* NameOfConstant(byte* addr) const;
+ virtual const char* NameInCode(byte* addr) const;
+};
+
+
+// A generic Disassembler interface
+class Disassembler {
+ public:
+ // Uses default NameConverter.
+ Disassembler();
+
+ // Caller deallocates converter.
+ explicit Disassembler(const NameConverter& converter);
+
+ virtual ~Disassembler();
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(char* buffer, const int buffer_size, byte* instruction);
+
+ // Write disassembly into specified file 'f' using specified NameConverter
+ // (see constructor).
+ static void Disassemble(FILE* f, byte* begin, byte* end);
+ private:
+ const NameConverter& converter_;
+};
+
+} // namespace disasm
+
+#endif // V8_DISASM_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "string-stream.h"
+
+namespace v8 { namespace internal {
+
+#ifdef ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
+ for (byte* pc = begin; pc < end; pc++) {
+ if (f == NULL) {
+ PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
+ } else {
+ fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
+ }
+ }
+}
+
+
+class V8NameConverter: public disasm::NameConverter {
+ public:
+ explicit V8NameConverter(Code* code) : code_(code) {}
+ virtual const char* NameOfAddress(byte* pc) const;
+ virtual const char* NameInCode(byte* addr) const;
+ Code* code() const { return code_; }
+ private:
+ Code* code_;
+};
+
+
+const char* V8NameConverter::NameOfAddress(byte* pc) const {
+ static char buffer[128];
+
+ const char* name = Builtins::Lookup(pc);
+ if (name != NULL) {
+ OS::SNPrintF(buffer, sizeof buffer, "%s (%p)", name, pc);
+ return buffer;
+ }
+
+ if (code_ != NULL) {
+ int offs = pc - code_->instruction_start();
+ // print as code offset, if it seems reasonable
+ if (0 <= offs && offs < code_->instruction_size()) {
+ OS::SNPrintF(buffer, sizeof buffer, "%d (%p)", offs, pc);
+ return buffer;
+ }
+ }
+
+ return disasm::NameConverter::NameOfAddress(pc);
+}
+
+
+const char* V8NameConverter::NameInCode(byte* addr) const {
+ // If the V8NameConverter is used for well known code, so we can "safely"
+ // dereference pointers in generated code.
+ return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
+}
+
+
+static void DumpBuffer(FILE* f, char* buff) {
+ if (f == NULL) {
+ PrintF("%s", buff);
+ } else {
+ fprintf(f, "%s", buff);
+ }
+}
+
+static const int kOutBufferSize = 1024;
+static const int kRelocInfoPosition = 57;
+
+static int DecodeIt(FILE* f,
+ const V8NameConverter& converter,
+ byte* begin,
+ byte* end) {
+ ExternalReferenceEncoder ref_encoder;
+ char decode_buffer[128];
+ char out_buffer[kOutBufferSize];
+ const int sob = sizeof out_buffer;
+ byte* pc = begin;
+ disasm::Disassembler d(converter);
+ RelocIterator* it = NULL;
+ if (converter.code() != NULL) {
+ it = new RelocIterator(converter.code());
+ } else {
+ // No relocation information when printing code stubs.
+ }
+ int constants = -1; // no constants being decoded at the start
+
+ while (pc < end) {
+ // First decode instruction so that we know its length.
+ byte* prev_pc = pc;
+ if (constants > 0) {
+ OS::SNPrintF(decode_buffer, sizeof(decode_buffer), "%s", "constant");
+ constants--;
+ pc += 4;
+ } else {
+ int instruction_bits = *(reinterpret_cast<int*>(pc));
+ if ((instruction_bits & 0xfff00000) == 0x03000000) {
+ OS::SNPrintF(decode_buffer, sizeof(decode_buffer),
+ "%s", "constant pool begin");
+ constants = instruction_bits & 0x0000ffff;
+ pc += 4;
+ } else {
+ decode_buffer[0] = '\0';
+ pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
+ }
+ }
+
+ // Collect RelocInfo for this instruction (prev_pc .. pc-1)
+ List<const char*> comments(4);
+ List<byte*> pcs(1);
+ List<RelocMode> rmodes(1);
+ List<intptr_t> datas(1);
+ if (it != NULL) {
+ while (!it->done() && it->rinfo()->pc() < pc) {
+ if (is_comment(it->rinfo()->rmode())) {
+ // For comments just collect the text.
+ comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
+ } else {
+ // For other reloc info collect all data.
+ pcs.Add(it->rinfo()->pc());
+ rmodes.Add(it->rinfo()->rmode());
+ datas.Add(it->rinfo()->data());
+ }
+ it->next();
+ }
+ }
+
+ int outp = 0; // pointer into out_buffer, implements append operation.
+
+ // Comments.
+ for (int i = 0; i < comments.length(); i++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " %s\n", comments[i]);
+ }
+
+ // Write out comments, resets outp so that we can format the next line.
+ if (outp > 0) {
+ DumpBuffer(f, out_buffer);
+ outp = 0;
+ }
+
+ // Instruction address and instruction offset.
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ "%p %4d ", prev_pc, prev_pc - begin);
+
+ // Instruction bytes.
+ ASSERT(pc - prev_pc == 4);
+ outp += OS::SNPrintF(out_buffer + outp,
+ sob - outp,
+ "%08x",
+ *reinterpret_cast<intptr_t*>(prev_pc));
+
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
+ }
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " %s", decode_buffer);
+
+ // Print all the reloc info for this instruction which are not comments.
+ for (int i = 0; i < pcs.length(); i++) {
+ // Put together the reloc info
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+
+ // Indent the printing of the reloc info.
+ if (i == 0) {
+ // The first reloc info is printed after the disassembled instruction.
+ for (int p = outp; p < kRelocInfoPosition; p++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
+ }
+ } else {
+ // Additional reloc infos are printed on separate lines.
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
+ for (int p = 0; p < kRelocInfoPosition; p++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
+ }
+ }
+
+ if (is_position(relocinfo.rmode())) {
+ outp += OS::SNPrintF(out_buffer + outp,
+ sob - outp,
+ " ;; debug: statement %d",
+ relocinfo.data());
+ } else if (relocinfo.rmode() == embedded_object) {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ relocinfo.target_object()->ShortPrint(&accumulator);
+ SmartPointer<char> obj_name = accumulator.ToCString();
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; object: %s",
+ *obj_name);
+ } else if (relocinfo.rmode() == external_reference) {
+ const char* reference_name =
+ ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; external reference (%s)",
+ reference_name);
+ } else if (relocinfo.rmode() == code_target) {
+ outp +=
+ OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; code target (%s)",
+ converter.NameOfAddress(relocinfo.target_address()));
+ } else {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; %s%s",
+#if defined(DEBUG)
+ RelocInfo::RelocModeName(relocinfo.rmode()),
+#else
+ "reloc_info",
+#endif
+ "");
+ }
+ }
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
+
+ if (outp > 0) {
+ ASSERT(outp < kOutBufferSize);
+ DumpBuffer(f, out_buffer);
+ outp = 0;
+ }
+ }
+
+ delete it;
+ return pc - begin;
+}
+
+
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
+ V8NameConverter defaultConverter(NULL);
+ return DecodeIt(f, defaultConverter, begin, end);
+}
+
+
+void Disassembler::Decode(FILE* f, Code* code) {
+ byte* begin = Code::cast(code)->instruction_start();
+ byte* end = begin + Code::cast(code)->instruction_size();
+ V8NameConverter v8NameConverter(code);
+ DecodeIt(f, v8NameConverter, begin, end);
+}
+
+#else // ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
+void Disassembler::Decode(FILE* f, Code* code) {}
+
+#endif // ENABLE_DISASSEMBLER
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "string-stream.h"
+
+namespace v8 { namespace internal {
+
+#ifdef ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
+ for (byte* pc = begin; pc < end; pc++) {
+ if (f == NULL) {
+ PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
+ } else {
+ fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
+ }
+ }
+}
+
+
+class V8NameConverter: public disasm::NameConverter {
+ public:
+ explicit V8NameConverter(Code* code) : code_(code) {}
+ virtual const char* NameOfAddress(byte* pc) const;
+ Code* code() const { return code_; }
+ private:
+ Code* code_;
+};
+
+
+const char* V8NameConverter::NameOfAddress(byte* pc) const {
+ static char buffer[128];
+
+ const char* name = Builtins::Lookup(pc);
+ if (name != NULL) {
+ OS::SNPrintF(buffer, sizeof buffer, "%s (%p)", name, pc);
+ return buffer;
+ }
+
+ if (code_ != NULL) {
+ int offs = pc - code_->instruction_start();
+ // print as code offset, if it seems reasonable
+ if (0 <= offs && offs < code_->instruction_size()) {
+ OS::SNPrintF(buffer, sizeof buffer, "%d (%p)", offs, pc);
+ return buffer;
+ }
+ }
+
+ return disasm::NameConverter::NameOfAddress(pc);
+}
+
+
+static void DumpBuffer(FILE* f, char* buff) {
+ if (f == NULL) PrintF("%s", buff);
+ else fprintf(f, "%s", buff);
+}
+
+static const int kOutBufferSize = 1024;
+static const int kRelocInfoPosition = 57;
+
+static int DecodeIt(FILE* f,
+ const V8NameConverter& converter,
+ byte* begin,
+ byte* end) {
+ NoHandleAllocation ha;
+ AssertNoAllocation no_alloc;
+ ExternalReferenceEncoder ref_encoder;
+
+ char decode_buffer[128];
+ char out_buffer[kOutBufferSize];
+ const int sob = sizeof out_buffer;
+ byte* pc = begin;
+ disasm::Disassembler d(converter);
+ RelocIterator* it = NULL;
+ if (converter.code() != NULL) {
+ it = new RelocIterator(converter.code());
+ } else {
+ // No relocation information when printing code stubs.
+ }
+
+ while (pc < end) {
+ // First decode instruction so that we know its length.
+ byte* prev_pc = pc;
+ decode_buffer[0] = '\0';
+ pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
+
+ // Collect RelocInfo for this instruction (prev_pc .. pc-1)
+ List<const char*> comments(4);
+ List<byte*> pcs(1);
+ List<RelocMode> rmodes(1);
+ List<intptr_t> datas(1);
+ if (it != NULL) {
+ while (!it->done() && it->rinfo()->pc() < pc) {
+ if (is_comment(it->rinfo()->rmode())) {
+ // For comments just collect the text.
+ comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
+ } else {
+ // For other reloc info collect all data.
+ pcs.Add(it->rinfo()->pc());
+ rmodes.Add(it->rinfo()->rmode());
+ datas.Add(it->rinfo()->data());
+ }
+ it->next();
+ }
+ }
+
+ int outp = 0; // pointer into out_buffer, implements append operation.
+
+ // Comments.
+ for (int i = 0; i < comments.length(); i++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " %s\n", comments[i]);
+ }
+
+ // Write out comments, resets outp so that we can format the next line.
+ if (outp > 0) {
+ DumpBuffer(f, out_buffer);
+ outp = 0;
+ }
+
+ // Instruction address and instruction offset.
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ "%p %4d ", prev_pc, prev_pc - begin);
+
+ // Instruction bytes.
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
+ }
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " %s", decode_buffer);
+
+ // Print all the reloc info for this instruction which are not comments.
+ for (int i = 0; i < pcs.length(); i++) {
+ // Put together the reloc info
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+
+ // Indent the printing of the reloc info.
+ if (i == 0) {
+ // The first reloc info is printed after the disassembled instruction.
+ for (int p = outp; p < kRelocInfoPosition; p++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
+ }
+ } else {
+ // Additional reloc infos are printed on separate lines.
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
+ for (int p = 0; p < kRelocInfoPosition; p++) {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
+ }
+ }
+
+ if (is_position(relocinfo.rmode())) {
+ outp += OS::SNPrintF(out_buffer + outp,
+ sob - outp,
+ " ;; debug: statement %d",
+ relocinfo.data());
+ } else if (relocinfo.rmode() == embedded_object) {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ relocinfo.target_object()->ShortPrint(&accumulator);
+ SmartPointer<char> obj_name = accumulator.ToCString();
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; object: %s",
+ *obj_name);
+ } else if (relocinfo.rmode() == external_reference) {
+ const char* reference_name =
+ ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; external reference (%s)",
+ reference_name);
+ } else {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " ;; %s",
+ RelocInfo::RelocModeName(relocinfo.rmode()));
+ if (is_code_target(relocinfo.rmode())) {
+ Code* code = Debug::GetCodeTarget(relocinfo.target_address());
+ Code::Kind kind = code->kind();
+ if (kind == Code::STUB) {
+ // Reverse lookup required as the minor key cannot be retrieved
+ // from the code object.
+ Object* obj = Heap::code_stubs()->SlowReverseLookup(code);
+ if (obj != Heap::undefined_value()) {
+ ASSERT(obj->IsSmi());
+ // Get the STUB key and extract major and minor key.
+ uint32_t key = Smi::cast(obj)->value();
+ CodeStub::Major major_key = code->major_key();
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
+ ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " (%s, %s, ",
+ Code::Kind2String(kind),
+ CodeStub::MajorName(code->major_key()));
+ switch (code->major_key()) {
+ case CodeStub::CallFunction:
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ "argc = %d)",
+ minor_key);
+ break;
+ case CodeStub::Runtime: {
+ Runtime::FunctionId id =
+ static_cast<Runtime::FunctionId>(minor_key);
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ "%s)",
+ Runtime::FunctionForId(id)->name);
+ break;
+ }
+ default:
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ "minor: %d)",
+ minor_key);
+ }
+ }
+ } else {
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp,
+ " (%s)",
+ Code::Kind2String(kind));
+ }
+ }
+ }
+ }
+ outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
+
+ if (outp > 0) {
+ ASSERT(outp < kOutBufferSize);
+ DumpBuffer(f, out_buffer);
+ outp = 0;
+ }
+ }
+
+ delete it;
+ return pc - begin;
+}
+
+
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
+ V8NameConverter defaultConverter(NULL);
+ return DecodeIt(f, defaultConverter, begin, end);
+}
+
+
+// Called by Code::CodePrint
+void Disassembler::Decode(FILE* f, Code* code) {
+ byte* begin = Code::cast(code)->instruction_start();
+ byte* end = begin + Code::cast(code)->instruction_size();
+ V8NameConverter v8NameConverter(code);
+ DecodeIt(f, v8NameConverter, begin, end);
+}
+#else // ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
+void Disassembler::Decode(FILE* f, Code* code) {}
+
+#endif // ENABLE_DISASSEMBLER
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DISASSEMBLER_H_
+#define V8_DISASSEMBLER_H_
+
+namespace v8 { namespace internal {
+
+class Disassembler : public AllStatic {
+ public:
+ // Print the bytes in the interval [begin, end) into f.
+ static void Dump(FILE* f, byte* begin, byte* end);
+
+ // Decode instructions in the the interval [begin, end) and print the
+ // code into f. Returns the number of bytes disassembled or 1 if no
+ // instruction could be decoded.
+ static int Decode(FILE* f, byte* begin, byte* end);
+
+ // Decode instructions in code.
+ static void Decode(FILE* f, Code* code);
+ private:
+ // Decode instruction at pc and print disassembled instruction into f.
+ // Returns the instruction length in bytes, or 1 if the instruction could
+ // not be decoded. The number of characters written is written into
+ // the out parameter char_count.
+ static int Decode(FILE* f, byte* pc, int* char_count);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_DISASSEMBLER_H_
--- /dev/null
+/*
+ * Copyright 2007-2008 Google, Inc. All Rights Reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Dtoa needs to have a particular environment set up for it so
+ * instead of using it directly you should use this file.
+ *
+ * The way it works is that when you link with it, its definitions
+ * of dtoa, strtod etc. override the default ones. So if you fail
+ * to link with this library everything will still work, it's just
+ * subtly wrong.
+ */
+
+#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32)
+#include <endian.h>
+#endif
+#include <math.h>
+#include <float.h>
+
+/* The floating point word order on ARM is big endian when floating point
+ * emulation is used, even if the byte order is little endian */
+#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
+ __FLOAT_WORD_ORDER == __BIG_ENDIAN
+#define IEEE_MC68k
+#else
+#define IEEE_8087
+#endif
+
+#define __MATH_H__
+#if defined(__APPLE__) && defined(__MACH__)
+/* stdlib.h on Apple's 10.5 and later SDKs will mangle the name of strtod.
+ * If it's included after strtod is redefined as gay_strtod, it will mangle
+ * the name of gay_strtod, which is unwanted. */
+#include <stdlib.h>
+#endif
+/* Make sure we use the David M. Gay version of strtod(). On Linux, we
+ * cannot use the same name (maybe the function does not have weak
+ * linkage?). */
+#define strtod gay_strtod
+#include "third_party/dtoa/dtoa.c"
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "codegen-inl.h"
+
+#if defined(ARM) || defined (__arm__) || defined(__thumb__)
+#include "simulator-arm.h"
+#else // ia32
+#include "simulator-ia32.h"
+#endif
+
+namespace v8 { namespace internal {
+
+
+static Handle<Object> Invoke(bool construct,
+ Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Object*** args,
+ bool* has_pending_exception) {
+ // Make sure we have a real function, not a boilerplate function.
+ ASSERT(!func->IsBoilerplate());
+
+ // Entering JavaScript.
+ VMState state(JS);
+
+ // Guard the stack against too much recursion.
+ StackGuard guard;
+
+ // Placeholder for return value.
+ Object* value = reinterpret_cast<Object*>(kZapValue);
+
+ typedef Object* (*JSEntryFunction)(
+ byte* entry,
+ Object* function,
+ Object* receiver,
+ int argc,
+ Object*** args);
+
+ Handle<Code> code;
+ if (construct) {
+ JSConstructEntryStub stub;
+ code = stub.GetCode();
+ } else {
+ JSEntryStub stub;
+ code = stub.GetCode();
+ }
+
+ { // Save and restore context around invocation and block the
+ // allocation of handles without explicit handle scopes.
+ SaveContext save;
+ NoHandleAllocation na;
+ JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+
+ // Call the function through the right JS entry stub.
+ value = CALL_GENERATED_CODE(entry, func->code()->entry(), *func,
+ *receiver, argc, args);
+ }
+
+#ifdef DEBUG
+ value->Verify();
+#endif
+
+ // Update the pending exception flag and return the value.
+ *has_pending_exception = value->IsException();
+ ASSERT(*has_pending_exception == Top::has_pending_exception());
+
+ // If the pending exception is OutOfMemoryException set out_of_memory in
+ // the global context. Note: We have to mark the global context here
+ // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
+ // set it.
+ if (*has_pending_exception) {
+ if (Top::pending_exception() == Failure::OutOfMemoryException()) {
+ Top::context()->mark_out_of_memory();
+ }
+ }
+
+ return Handle<Object>(value);
+}
+
+
+Handle<Object> Execution::Call(Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Object*** args,
+ bool* pending_exception) {
+ return Invoke(false, func, receiver, argc, args, pending_exception);
+}
+
+
+Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
+ Object*** args, bool* pending_exception) {
+ return Invoke(true, func, Top::global(), argc, args, pending_exception);
+}
+
+
+Handle<Object> Execution::TryCall(Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Object*** args,
+ bool* caught_exception) {
+ // Enter a try-block while executing the JavaScript code. To avoid
+ // duplicate error printing it must be non-verbose.
+ v8::TryCatch catcher;
+ catcher.SetVerbose(false);
+
+ Handle<Object> result = Invoke(false, func, receiver, argc, args,
+ caught_exception);
+
+ if (*caught_exception) {
+ ASSERT(catcher.HasCaught());
+ ASSERT(Top::has_pending_exception());
+ ASSERT(Top::external_caught_exception());
+ Top::optional_reschedule_exception(true);
+ result = v8::Utils::OpenHandle(*catcher.Exception());
+ }
+
+ ASSERT(!Top::has_pending_exception());
+ ASSERT(!Top::external_caught_exception());
+ return result;
+}
+
+
+DEFINE_bool(call_regexp, false, "allow calls to RegExp objects");
+
+Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
+ ASSERT(!object->IsJSFunction());
+
+ // If you return a function from here, it will be called when an
+ // attempt is made to call the given object as a function.
+
+ // The regular expression code here is really meant more as an
+ // example than anything else. KJS does not support calling regular
+ // expressions as functions, but SpiderMonkey does.
+ if (FLAG_call_regexp) {
+ bool is_regexp =
+ object->IsHeapObject() &&
+ (HeapObject::cast(*object)->map()->constructor() ==
+ *Top::regexp_function());
+
+ if (is_regexp) {
+ Handle<String> exec = Factory::exec_symbol();
+ return Handle<Object>(object->GetProperty(*exec));
+ }
+ }
+
+ // Objects created through the API can have an instance-call handler
+ // that should be used when calling the object as a function.
+ if (object->IsHeapObject() &&
+ HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+ return Handle<JSFunction>(
+ Top::global_context()->call_as_function_delegate());
+ }
+
+ return Factory::undefined_value();
+}
+
+
+// Static state for stack guards.
+StackGuard::ThreadLocal StackGuard::thread_local_;
+
+
+StackGuard::StackGuard() {
+ ExecutionAccess access;
+ if (thread_local_.nesting_++ == 0 &&
+ thread_local_.jslimit_ != kInterruptLimit) {
+ // NOTE: We assume that the stack grows towards lower addresses.
+ ASSERT(thread_local_.jslimit_ == kIllegalLimit);
+ ASSERT(thread_local_.climit_ == kIllegalLimit);
+
+ thread_local_.initial_jslimit_ = thread_local_.jslimit_ =
+ GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ thread_local_.initial_climit_ = thread_local_.climit_ =
+ reinterpret_cast<uintptr_t>(this) - kLimitSize;
+
+ if (thread_local_.interrupt_flags_ != 0) {
+ set_limits(kInterruptLimit, access);
+ }
+ }
+ // make sure we have proper limits setup
+ ASSERT(thread_local_.jslimit_ != kIllegalLimit &&
+ thread_local_.climit_ != kIllegalLimit);
+}
+
+
+StackGuard::~StackGuard() {
+ ExecutionAccess access;
+ if (--thread_local_.nesting_ == 0) {
+ set_limits(kIllegalLimit, access);
+ }
+}
+
+
+bool StackGuard::IsStackOverflow() {
+ ExecutionAccess access;
+ return (thread_local_.jslimit_ != kInterruptLimit &&
+ thread_local_.climit_ != kInterruptLimit);
+}
+
+
+void StackGuard::EnableInterrupts() {
+ ExecutionAccess access;
+ if (IsSet(access)) {
+ set_limits(kInterruptLimit, access);
+ }
+}
+
+
+void StackGuard::SetStackLimit(uintptr_t limit) {
+ ExecutionAccess access;
+ // If the current limits are special (eg due to a pending interrupt) then
+ // leave them alone.
+ if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
+ thread_local_.jslimit_ = limit;
+ }
+ if (thread_local_.climit_ == thread_local_.initial_climit_) {
+ thread_local_.climit_ = limit;
+ }
+ thread_local_.initial_climit_ = limit;
+ thread_local_.initial_jslimit_ = limit;
+}
+
+
+void StackGuard::DisableInterrupts() {
+ ExecutionAccess access;
+ reset_limits(access);
+}
+
+
+bool StackGuard::IsSet(const ExecutionAccess& lock) {
+ return thread_local_.interrupt_flags_ != 0;
+}
+
+
+bool StackGuard::IsInterrupted() {
+ ExecutionAccess access;
+ return thread_local_.interrupt_flags_ & INTERRUPT;
+}
+
+
+void StackGuard::Interrupt() {
+ ExecutionAccess access;
+ thread_local_.interrupt_flags_ |= INTERRUPT;
+ if (!Top::is_break_no_lock()) {
+ set_limits(kInterruptLimit, access);
+ }
+}
+
+
+bool StackGuard::IsPreempted() {
+ ExecutionAccess access;
+ return thread_local_.interrupt_flags_ & PREEMPT;
+}
+
+
+void StackGuard::Preempt() {
+ ExecutionAccess access;
+ thread_local_.interrupt_flags_ |= PREEMPT;
+ if (!Top::is_break_no_lock()) {
+ set_limits(kInterruptLimit, access);
+ }
+}
+
+
+bool StackGuard::IsDebugBreak() {
+ ExecutionAccess access;
+ return thread_local_.interrupt_flags_ & DEBUGBREAK;
+}
+
+void StackGuard::DebugBreak() {
+ ExecutionAccess access;
+ if (!Top::is_break_no_lock()) {
+ thread_local_.interrupt_flags_ |= DEBUGBREAK;
+ set_limits(kInterruptLimit, access);
+ }
+}
+
+
+void StackGuard::Continue(InterruptFlag after_what) {
+ ExecutionAccess access;
+ thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
+ if (thread_local_.interrupt_flags_ == 0) {
+ reset_limits(access);
+ }
+}
+
+
+int StackGuard::ArchiveSpacePerThread() {
+ return sizeof(ThreadLocal);
+}
+
+
+char* StackGuard::ArchiveStackGuard(char* to) {
+ ExecutionAccess access;
+ memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ ThreadLocal blank;
+ thread_local_ = blank;
+ return to + sizeof(ThreadLocal);
+}
+
+
+char* StackGuard::RestoreStackGuard(char* from) {
+ ExecutionAccess access;
+ memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ return from + sizeof(ThreadLocal);
+}
+
+
+// --- C a l l s t o n a t i v e s ---
+
+#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
+ do { \
+ Object** args[argc] = argv; \
+ ASSERT(has_pending_exception != NULL); \
+ return Call(Top::name##_fun(), Top::builtins(), argc, args, \
+ has_pending_exception); \
+ } while (false)
+
+
+Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
+ // See the similar code in runtime.js:ToBoolean.
+ if (obj->IsBoolean()) return obj;
+ bool result = true;
+ if (obj->IsString()) {
+ result = Handle<String>::cast(obj)->length() != 0;
+ } else if (obj->IsNull() || obj->IsUndefined()) {
+ result = false;
+ } else if (obj->IsNumber()) {
+ double value = obj->Number();
+ result = !((value == 0) || isnan(value));
+ }
+ return Handle<Object>(Heap::ToBoolean(result));
+}
+
+
+Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
+ RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
+ RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
+ RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
+ if (obj->IsJSObject()) return obj;
+ RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
+ RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
+ RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
+ RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::NewDate(double time, bool* exc) {
+ Handle<Object> time_obj = Factory::NewNumber(time);
+ RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
+}
+
+
+#undef RETURN_NATIVE_CALL
+
+
+Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
+ int int_index = static_cast<int>(index);
+ if (int_index < 0 || int_index >= string->length()) {
+ return Factory::undefined_value();
+ }
+
+ Handle<Object> char_at =
+ GetProperty(Top::builtins(), Factory::char_at_symbol());
+ if (!char_at->IsJSFunction()) {
+ return Factory::undefined_value();
+ }
+
+ bool caught_exception;
+ Handle<Object> index_object = Factory::NewNumberFromInt(int_index);
+ Object** index_arg[] = { index_object.location() };
+ Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
+ string,
+ ARRAY_SIZE(index_arg),
+ index_arg,
+ &caught_exception);
+ if (caught_exception) {
+ return Factory::undefined_value();
+ }
+ return result;
+}
+
+
+Handle<JSFunction> Execution::InstantiateFunction(
+ Handle<FunctionTemplateInfo> data, bool* exc) {
+ // Fast case: see if the function has already been instantiated
+ int serial_number = Smi::cast(data->serial_number())->value();
+ Object* elm =
+ Top::global_context()->function_cache()->GetElement(serial_number);
+ if (!elm->IsUndefined()) return Handle<JSFunction>(JSFunction::cast(elm));
+ // The function has not yet been instantiated in this context; do it.
+ Object** args[1] = { Handle<Object>::cast(data).location() };
+ Handle<Object> result =
+ Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+ if (*exc) return Handle<JSFunction>::null();
+ return Handle<JSFunction>::cast(result);
+}
+
+
+Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
+ bool* exc) {
+ if (data->property_list()->IsUndefined() &&
+ !data->constructor()->IsUndefined()) {
+ Object* result;
+ {
+ HandleScope scope;
+ Handle<FunctionTemplateInfo> cons_template =
+ Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(data->constructor()));
+ Handle<JSFunction> cons = InstantiateFunction(cons_template, exc);
+ if (*exc) return Handle<JSObject>::null();
+ Handle<Object> value = New(cons, 0, NULL, exc);
+ if (*exc) return Handle<JSObject>::null();
+ result = *value;
+ }
+ ASSERT(!*exc);
+ return Handle<JSObject>(JSObject::cast(result));
+ } else {
+ Object** args[1] = { Handle<Object>::cast(data).location() };
+ Handle<Object> result =
+ Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+ if (*exc) return Handle<JSObject>::null();
+ return Handle<JSObject>::cast(result);
+ }
+}
+
+
+void Execution::ConfigureInstance(Handle<Object> instance,
+ Handle<Object> instance_template,
+ bool* exc) {
+ Object** args[2] = { instance.location(), instance_template.location() };
+ Execution::Call(Top::configure_instance_fun(), Top::builtins(), 2, args, exc);
+}
+
+
+Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
+ Handle<JSFunction> fun,
+ Handle<Object> pos,
+ Handle<Object> is_global) {
+ const int argc = 4;
+ Object** args[argc] = { recv.location(),
+ Handle<Object>::cast(fun).location(),
+ pos.location(),
+ is_global.location() };
+ bool caught_exception = false;
+ Handle<Object> result = TryCall(Top::get_stack_trace_line_fun(),
+ Top::builtins(), argc, args,
+ &caught_exception);
+ if (caught_exception || !result->IsString()) return Factory::empty_symbol();
+ return Handle<String>::cast(result);
+}
+
+
+// --- P r i n t E x t e n s i o n ---
+
+const char* PrintExtension::kSource = "native function print();";
+
+
+v8::Handle<v8::FunctionTemplate> PrintExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(PrintExtension::Print);
+}
+
+
+v8::Handle<v8::Value> PrintExtension::Print(const v8::Arguments& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ if (i != 0) printf(" ");
+ v8::HandleScope scope;
+ v8::Handle<v8::Value> arg = args[i];
+ v8::Handle<v8::String> string_obj = arg->ToString();
+ if (string_obj.IsEmpty()) return string_obj;
+ int length = string_obj->Length();
+ uint16_t* string = NewArray<uint16_t>(length + 1);
+ string_obj->Write(string);
+ for (int j = 0; j < length; j++)
+ printf("%lc", string[j]);
+ DeleteArray(string);
+ }
+ printf("\n");
+ return v8::Undefined();
+}
+
+
+static PrintExtension kPrintExtension;
+v8::DeclareExtension kPrintExtensionDeclaration(&kPrintExtension);
+
+
+// --- L o a d E x t e n s i o n ---
+
+const char* LoadExtension::kSource = "native function load();";
+
+
+v8::Handle<v8::FunctionTemplate> LoadExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(LoadExtension::Load);
+}
+
+
+v8::Handle<v8::Value> LoadExtension::Load(const v8::Arguments& args) {
+ v8::Handle<v8::String> path = args[0]->ToString();
+
+ // Create a handle for the result. Keep the result empty to be
+ // useful as the return value in case of exceptions.
+ v8::Handle<Value> result;
+
+ if (path.IsEmpty()) return result; // Exception was thrown in ToString.
+
+ // Check that the length of the file name is within bounds.
+ static const int kMaxPathLength = 255;
+ if (path->Length() > kMaxPathLength) {
+ v8::Handle<v8::String> message = v8::String::New("Path name too long");
+ v8::ThrowException(v8::Exception::RangeError(message));
+ return result;
+ }
+
+ // Convert the JavaScript string path into a C string and read the
+ // corresponding script from the file system.
+ char path_buffer[kMaxPathLength + 1];
+ path->WriteAscii(path_buffer);
+ bool exists;
+ Vector<const char> script = ReadFile(path_buffer, &exists, false);
+
+ // Find the base file name from the path.
+ char* file_name_buffer = path_buffer;
+ for (char* p = path_buffer; *p; p++) {
+ if (*p == '/' || *p == '\\') file_name_buffer = p + 1;
+ }
+
+ // Throw an exception in case the script couldn't be read.
+ if (script.is_empty()) {
+ static const char* kErrorPrefix = "Unable to read from file ";
+ static const size_t kErrorPrefixLength = 25; // strlen is not constant
+ ASSERT(strlen(kErrorPrefix) == kErrorPrefixLength);
+ static const int kMaxErrorLength = kMaxPathLength + kErrorPrefixLength;
+ char error_buffer[kMaxErrorLength + 1];
+ OS::SNPrintF(error_buffer, kMaxErrorLength, "%s%s",
+ kErrorPrefix, file_name_buffer);
+ v8::Handle<v8::String> error = v8::String::New(error_buffer);
+ v8::ThrowException(v8::Exception::Error(error));
+ return result;
+ }
+
+ // Convert the file name buffer into a script origin
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::New(file_name_buffer));
+
+ // Compile and run script.
+ v8::Handle<v8::String> source =
+ v8::String::New(script.start(), script.length());
+ v8::Handle<v8::Script> code =
+ v8::Script::Compile(source, &origin);
+
+ // Run the code if no exception occured during the compilation. In
+ // case of syntax errors, the code is empty and the exception is
+ // scheduled and will be thrown when returning to JavaScript.
+ if (!code.IsEmpty()) result = code->Run();
+ script.Dispose();
+ return result;
+}
+
+
+static LoadExtension kLoadExtension;
+v8::DeclareExtension kLoadExtensionDeclaration(&kLoadExtension);
+
+
+// --- Q u i t E x t e n s i o n ---
+
+const char* QuitExtension::kSource = "native function quit();";
+
+
+v8::Handle<v8::FunctionTemplate> QuitExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(QuitExtension::Quit);
+}
+
+
+v8::Handle<v8::Value> QuitExtension::Quit(const v8::Arguments& args) {
+ exit(args.Length() == 0 ? 0 : args[0]->Int32Value());
+ return v8::Undefined();
+}
+
+
+static QuitExtension kQuitExtension;
+v8::DeclareExtension kQuitExtensionDeclaration(&kQuitExtension);
+
+
+// --- V e r s i o n E x t e n s i o n ---
+
+static Extension kVersionExtension("v8/version",
+ "function version(){ return 150; }");
+v8::DeclareExtension kVersionExtensionDeclaration(&kVersionExtension);
+
+
+// --- G C E x t e n s i o n ---
+
+const char* GCExtension::kSource = "native function gc();";
+
+
+v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(GCExtension::GC);
+}
+
+
+v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
+ // All allocation spaces other than NEW_SPACE have the same effect.
+ Heap::CollectGarbage(0, OLD_SPACE);
+ return v8::Undefined();
+}
+
+
+static GCExtension kGCExtension;
+v8::DeclareExtension kGCExtensionDeclaration(&kGCExtension);
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXECUTION_H_
+#define V8_EXECUTION_H_
+
+namespace v8 { namespace internal {
+
+
+// Flag used to set the interrupt causes.
+enum InterruptFlag {
+ INTERRUPT = 1 << 0,
+ DEBUGBREAK = 1 << 1,
+ PREEMPT = 1 << 2
+};
+
+class Execution : public AllStatic {
+ public:
+ // Call a function, the caller supplies a receiver and an array
+ // of arguments. Arguments are Object* type. After function returns,
+ // pointers in 'args' might be invalid.
+ //
+ // *pending_exception tells whether the invoke resulted in
+ // a pending exception.
+ //
+ static Handle<Object> Call(Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Object*** args,
+ bool* pending_exception);
+
+ // Construct object from function, the caller supplies an array of
+ // arguments. Arguments are Object* type. After function returns,
+ // pointers in 'args' might be invalid.
+ //
+ // *pending_exception tells whether the invoke resulted in
+ // a pending exception.
+ //
+ static Handle<Object> New(Handle<JSFunction> func,
+ int argc,
+ Object*** args,
+ bool* pending_exception);
+
+ // Call a function, just like Call(), but make sure to silently catch
+ // any thrown exceptions. The return value is either the result of
+ // calling the function (if caught exception is false) or the exception
+ // that occurred (if caught exception is true).
+ static Handle<Object> TryCall(Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Object*** args,
+ bool* caught_exception);
+
+ // ECMA-262 9.2
+ static Handle<Object> ToBoolean(Handle<Object> obj);
+
+ // ECMA-262 9.3
+ static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
+
+ // ECMA-262 9.4
+ static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
+
+ // ECMA-262 9.5
+ static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
+
+ // ECMA-262 9.6
+ static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
+
+ // ECMA-262 9.8
+ static Handle<Object> ToString(Handle<Object> obj, bool* exc);
+
+ // ECMA-262 9.8
+ static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
+
+ // ECMA-262 9.9
+ static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
+
+ // Create a new date object from 'time'.
+ static Handle<Object> NewDate(double time, bool* exc);
+
+ // Used to implement [] notation on strings (calls JS code)
+ static Handle<Object> CharAt(Handle<String> str, uint32_t index);
+
+ static Handle<Object> GetFunctionFor();
+ static Handle<JSFunction> InstantiateFunction(
+ Handle<FunctionTemplateInfo> data, bool* exc);
+ static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
+ bool* exc);
+ static void ConfigureInstance(Handle<Object> instance,
+ Handle<Object> data,
+ bool* exc);
+ static Handle<String> GetStackTraceLine(Handle<Object> recv,
+ Handle<JSFunction> fun,
+ Handle<Object> pos,
+ Handle<Object> is_global);
+
+ // Get a function delegate (or undefined) for the given non-function
+ // object. Used for support calling objects as functions.
+ static Handle<Object> GetFunctionDelegate(Handle<Object> object);
+};
+
+
+class ExecutionAccess;
+
+
+// Stack guards are used to limit the number of nested invocations of
+// JavaScript and the stack size used in each invocation.
+class StackGuard BASE_EMBEDDED {
+ public:
+ StackGuard();
+
+ ~StackGuard();
+
+ static void SetStackLimit(uintptr_t limit);
+
+ static Address address_of_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.jslimit_);
+ }
+
+ // Threading support.
+ static char* ArchiveStackGuard(char* to);
+ static char* RestoreStackGuard(char* from);
+ static int ArchiveSpacePerThread();
+
+ static bool IsStackOverflow();
+ static void EnableInterrupts();
+ static void DisableInterrupts();
+ static bool IsPreempted();
+ static void Preempt();
+ static bool IsInterrupted();
+ static void Interrupt();
+ static bool IsDebugBreak();
+ static void DebugBreak();
+ static void Continue(InterruptFlag after_what);
+
+ private:
+ // You should hold the ExecutionAccess lock when calling this method.
+ static bool IsSet(const ExecutionAccess& lock);
+
+ // This provides an asynchronous read of the stack limit for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ static uintptr_t climit() {
+ return thread_local_.climit_;
+ }
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
+ thread_local_.jslimit_ = value;
+ thread_local_.climit_ = value;
+ }
+
+ // Reset limits to initial values. For example after handling interrupt.
+ // You should hold the ExecutionAccess lock when calling this method.
+ static void reset_limits(const ExecutionAccess& lock) {
+ if (thread_local_.nesting_ == 0) {
+ // No limits have been set yet.
+ set_limits(kIllegalLimit, lock);
+ } else {
+ thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+ thread_local_.climit_ = thread_local_.initial_climit_;
+ }
+ }
+
+ static const int kLimitSize = 512 * KB;
+ static const uintptr_t kInterruptLimit = 0xfffffffe;
+ static const uintptr_t kIllegalLimit = 0xffffffff;
+
+ class ThreadLocal {
+ public:
+ ThreadLocal()
+ : initial_jslimit_(kIllegalLimit),
+ jslimit_(kIllegalLimit),
+ initial_climit_(kIllegalLimit),
+ climit_(kIllegalLimit),
+ nesting_(0),
+ interrupt_flags_(0) {}
+ uintptr_t initial_jslimit_;
+ uintptr_t jslimit_;
+ uintptr_t initial_climit_;
+ uintptr_t climit_;
+ int nesting_;
+ int interrupt_flags_;
+ };
+ static ThreadLocal thread_local_;
+ friend class StackLimitCheck;
+};
+
+
+// Support for checking for stack-overflows in C++ code.
+class StackLimitCheck BASE_EMBEDDED {
+ public:
+ bool HasOverflowed() const {
+ return reinterpret_cast<uintptr_t>(this) < StackGuard::climit();
+ }
+};
+
+
+class PrintExtension : public v8::Extension {
+ public:
+ PrintExtension() : v8::Extension("v8/print", kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> Print(const v8::Arguments& args);
+ private:
+ static const char* kSource;
+};
+
+
+class LoadExtension : public v8::Extension {
+ public:
+ LoadExtension() : v8::Extension("v8/load", kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> Load(const v8::Arguments& args);
+ private:
+ static const char* kSource;
+};
+
+
+class QuitExtension : public v8::Extension {
+ public:
+ QuitExtension() : v8::Extension("v8/quit", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> Quit(const v8::Arguments& args);
+ private:
+ static const char* kSource;
+};
+
+
+class GCExtension : public v8::Extension {
+ public:
+ GCExtension() : v8::Extension("v8/gc", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> GC(const v8::Arguments& args);
+ private:
+ static const char* kSource;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_EXECUTION_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "execution.h"
+#include "factory.h"
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+
+Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
+ ASSERT(0 <= size);
+ CALL_HEAP_FUNCTION(Heap::AllocateFixedArray(size, pretenure), FixedArray);
+}
+
+
+Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
+ ASSERT(0 <= number_of_descriptors);
+ CALL_HEAP_FUNCTION(DescriptorArray::Allocate(number_of_descriptors),
+ DescriptorArray);
+}
+
+
+// Symbols are created in the old generation (code space).
+Handle<String> Factory::LookupSymbol(Vector<const char> string) {
+ CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
+}
+
+
+Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateStringFromAscii(string, pretenure), String);
+}
+
+Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateStringFromUtf8(string, pretenure), String);
+}
+
+
+Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string) {
+ CALL_HEAP_FUNCTION(Heap::AllocateStringFromTwoByte(string), String);
+}
+
+
+Handle<String> Factory::NewRawTwoByteString(int length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(length, pretenure), String);
+}
+
+
+Handle<String> Factory::NewConsString(Handle<String> first,
+ Handle<String> second) {
+ CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
+}
+
+
+Handle<String> Factory::NewStringSlice(Handle<String> str, int begin, int end) {
+ CALL_HEAP_FUNCTION(str->Slice(begin, end), String);
+}
+
+
+Handle<String> Factory::NewExternalStringFromAscii(
+ ExternalAsciiString::Resource* resource) {
+ CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromAscii(resource), String);
+}
+
+
+Handle<String> Factory::NewExternalStringFromTwoByte(
+ ExternalTwoByteString::Resource* resource) {
+ CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromTwoByte(resource), String);
+}
+
+
+Handle<Context> Factory::NewGlobalContext() {
+ CALL_HEAP_FUNCTION(Heap::AllocateGlobalContext(), Context);
+}
+
+
+Handle<Context> Factory::NewFunctionContext(int length,
+ Handle<JSFunction> closure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateFunctionContext(length, *closure), Context);
+}
+
+
+Handle<Context> Factory::NewWithContext(Handle<Context> previous,
+ Handle<JSObject> extension) {
+ CALL_HEAP_FUNCTION(Heap::AllocateWithContext(*previous, *extension), Context);
+}
+
+
+Handle<Struct> Factory::NewStruct(InstanceType type) {
+ CALL_HEAP_FUNCTION(Heap::AllocateStruct(type), Struct);
+}
+
+
+Handle<AccessorInfo> Factory::NewAccessorInfo() {
+ Handle<AccessorInfo> info =
+ Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
+ info->set_flag(0); // Must clear the flag, it was initialized as undefined.
+ return info;
+}
+
+
+Handle<Script> Factory::NewScript(Handle<String> source) {
+ Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
+ script->set_source(*source);
+ script->set_name(Heap::undefined_value());
+ script->set_line_offset(Smi::FromInt(0));
+ script->set_column_offset(Smi::FromInt(0));
+ script->set_wrapper(*Factory::NewProxy(0, TENURED));
+ script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
+ return script;
+}
+
+
+Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateProxy(addr, pretenure), Proxy);
+}
+
+
+Handle<Proxy> Factory::NewProxy(const AccessorDescriptor* desc) {
+ return NewProxy((Address) desc, TENURED);
+}
+
+
+Handle<ByteArray> Factory::NewByteArray(int length) {
+ ASSERT(0 <= length);
+ CALL_HEAP_FUNCTION(Heap::AllocateByteArray(length), ByteArray);
+}
+
+
+Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
+ CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
+}
+
+
+Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(Heap::AllocateFunctionPrototype(*function), JSObject);
+}
+
+
+Handle<Map> Factory::CopyMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->Copy(), Map);
+}
+
+
+Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
+ CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
+}
+
+
+Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
+ Handle<JSFunction> boilerplate,
+ Handle<Map> function_map) {
+ ASSERT(boilerplate->IsBoilerplate());
+ ASSERT(!boilerplate->has_initial_map());
+ ASSERT(!boilerplate->has_prototype());
+ ASSERT(boilerplate->properties() == Heap::empty_fixed_array());
+ ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
+ CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
+ boilerplate->shared(),
+ Heap::the_hole_value()),
+ JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
+ Handle<JSFunction> boilerplate,
+ Handle<Context> context) {
+ Handle<JSFunction> result =
+ BaseNewFunctionFromBoilerplate(boilerplate, Top::function_map());
+ result->set_context(*context);
+ int number_of_literals = boilerplate->literals()->length();
+ if (number_of_literals > 0) {
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(number_of_literals, TENURED);
+ result->set_literals(*literals);
+ }
+ ASSERT(!result->IsBoilerplate());
+ return result;
+}
+
+
+Handle<Object> Factory::NewNumber(double value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::NumberFromDouble(value, pretenure), Object);
+}
+
+
+Handle<Object> Factory::NewNumberFromInt(int value) {
+ CALL_HEAP_FUNCTION(Heap::NumberFromInt32(value), Object);
+}
+
+
+Handle<JSObject> Factory::NewNeanderObject() {
+ CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(Heap::neander_map()),
+ JSObject);
+}
+
+
+Handle<Object> Factory::NewTypeError(const char* type,
+ Vector< Handle<Object> > args) {
+ return NewError("MakeTypeError", type, args);
+}
+
+
+Handle<Object> Factory::NewTypeError(Handle<String> message) {
+ return NewError("$TypeError", message);
+}
+
+
+Handle<Object> Factory::NewRangeError(const char* type,
+ Vector< Handle<Object> > args) {
+ return NewError("MakeRangeError", type, args);
+}
+
+
+Handle<Object> Factory::NewRangeError(Handle<String> message) {
+ return NewError("$RangeError", message);
+}
+
+
+Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
+ return NewError("MakeSyntaxError", type, args);
+}
+
+
+Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
+ return NewError("$SyntaxError", message);
+}
+
+
+Handle<Object> Factory::NewReferenceError(const char* type,
+ Vector< Handle<Object> > args) {
+ return NewError("MakeReferenceError", type, args);
+}
+
+
+Handle<Object> Factory::NewReferenceError(Handle<String> message) {
+ return NewError("$ReferenceError", message);
+}
+
+
+Handle<Object> Factory::NewError(const char* maker, const char* type,
+ Vector< Handle<Object> > args) {
+ HandleScope scope;
+ Handle<JSArray> array = NewJSArray(args.length());
+ for (int i = 0; i < args.length(); i++)
+ SetElement(array, i, args[i]);
+ Handle<Object> result = NewError(maker, type, array);
+ return result.EscapeFrom(&scope);
+}
+
+
+Handle<Object> Factory::NewEvalError(const char* type,
+ Vector< Handle<Object> > args) {
+ return NewError("MakeEvalError", type, args);
+}
+
+
+Handle<Object> Factory::NewError(const char* type,
+ Vector< Handle<Object> > args) {
+ return NewError("MakeError", type, args);
+}
+
+
+Handle<Object> Factory::NewError(const char* maker,
+ const char* type,
+ Handle<JSArray> args) {
+ Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
+ Handle<JSFunction> fun =
+ Handle<JSFunction>(
+ JSFunction::cast(
+ Top::security_context_builtins()->GetProperty(*make_str)));
+ Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
+ Object** argv[2] = { type_obj.location(),
+ Handle<Object>::cast(args).location() };
+
+ // Invoke the JavaScript factory method. If an exception is thrown while
+ // running the factory method, use the exception as the result.
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(fun,
+ Top::security_context_builtins(),
+ 2,
+ argv,
+ &caught_exception);
+ return result;
+}
+
+
+Handle<Object> Factory::NewError(Handle<String> message) {
+ return NewError("$Error", message);
+}
+
+
+Handle<Object> Factory::NewError(const char* constructor,
+ Handle<String> message) {
+ Handle<String> constr = Factory::LookupAsciiSymbol(constructor);
+ Handle<JSFunction> fun =
+ Handle<JSFunction>(
+ JSFunction::cast(
+ Top::security_context_builtins()->GetProperty(*constr)));
+ Object** argv[1] = { Handle<Object>::cast(message).location() };
+
+ // Invoke the JavaScript factory method. If an exception is thrown while
+ // running the factory method, use the exception as the result.
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(fun,
+ Top::security_context_builtins(),
+ 1,
+ argv,
+ &caught_exception);
+ return result;
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<Code> code,
+ bool force_initial_map) {
+ // Allocate the function
+ Handle<JSFunction> function = NewFunction(name, the_hole_value());
+ function->set_code(*code);
+
+ if (force_initial_map ||
+ type != JS_OBJECT_TYPE ||
+ instance_size != JSObject::kHeaderSize) {
+ Handle<Map> initial_map = NewMap(type, instance_size);
+ Handle<JSObject> prototype = NewFunctionPrototype(function);
+ initial_map->set_prototype(*prototype);
+ function->set_initial_map(*initial_map);
+ initial_map->set_constructor(*function);
+ } else {
+ ASSERT(!function->has_initial_map());
+ ASSERT(!function->has_prototype());
+ }
+
+ return function;
+}
+
+
+Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
+ int number_of_literals,
+ Handle<Code> code) {
+ Handle<JSFunction> function = NewFunctionBoilerplate(name);
+ function->set_code(*code);
+ if (number_of_literals > 0) {
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(number_of_literals, TENURED);
+ function->set_literals(*literals);
+ } else {
+ function->set_literals(Heap::empty_fixed_array());
+ }
+ ASSERT(!function->has_initial_map());
+ ASSERT(!function->has_prototype());
+ return function;
+}
+
+
+Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
+ CALL_HEAP_FUNCTION(Heap::AllocateFunction(Heap::boilerplate_function_map(),
+ *shared,
+ Heap::the_hole_value()),
+ JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<JSObject> prototype,
+ Handle<Code> code,
+ bool force_initial_map) {
+ // Allocate the function
+ Handle<JSFunction> function = NewFunction(name, prototype);
+
+ function->set_code(*code);
+
+ if (force_initial_map ||
+ type != JS_OBJECT_TYPE ||
+ instance_size != JSObject::kHeaderSize) {
+ Handle<Map> initial_map = NewMap(type, instance_size);
+ function->set_initial_map(*initial_map);
+ initial_map->set_constructor(*function);
+ }
+
+ // Set function.prototype and give the prototype a constructor
+ // property that refers to the function.
+ SetPrototypeProperty(function, prototype);
+ SetProperty(prototype, Factory::constructor_symbol(), function, DONT_ENUM);
+ return function;
+}
+
+Handle<Code> Factory::NewCode(const CodeDesc& desc, ScopeInfo<>* sinfo,
+ Code::Flags flags) {
+ CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags), Code);
+}
+
+
+Handle<Code> Factory::CopyCode(Handle<Code> code) {
+ CALL_HEAP_FUNCTION(Heap::CopyCode(*code), Code);
+}
+
+
+#define CALL_GC(RETRY) \
+ do { \
+ if (!Heap::CollectGarbage(Failure::cast(RETRY)->requested(), \
+ Failure::cast(RETRY)->allocation_space())) { \
+ /* TODO(1181417): Fix this. */ \
+ V8::FatalProcessOutOfMemory("Factory CALL_GC"); \
+ } \
+ } while (false)
+
+
+// Allocate the new array. We cannot use the CALL_HEAP_FUNCTION macro here,
+// because the stack-allocated CallbacksDescriptor instance is not GC safe.
+Handle<DescriptorArray> Factory::CopyAppendProxyDescriptor(
+ Handle<DescriptorArray> array,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ GC_GREEDY_CHECK();
+ CallbacksDescriptor desc(*key, *value, attributes);
+ Object* obj = array->CopyInsert(&desc);
+ if (obj->IsRetryAfterGC()) {
+ CALL_GC(obj);
+ CallbacksDescriptor desc(*key, *value, attributes);
+ obj = array->CopyInsert(&desc);
+ if (obj->IsFailure()) {
+ // TODO(1181417): Fix this.
+ V8::FatalProcessOutOfMemory("CopyAppendProxyDescriptor");
+ }
+ }
+ return Handle<DescriptorArray>(DescriptorArray::cast(obj));
+}
+
+#undef CALL_GC
+
+
+Handle<String> Factory::SymbolFromString(Handle<String> value) {
+ CALL_HEAP_FUNCTION(Heap::LookupSymbol(*value), String);
+}
+
+
+Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
+ Handle<DescriptorArray> array,
+ Handle<Object> descriptors) {
+ v8::NeanderArray callbacks(descriptors);
+ int nof_callbacks = callbacks.length();
+ Handle<DescriptorArray> result =
+ NewDescriptorArray(array->number_of_descriptors() + nof_callbacks);
+
+ // Number of descriptors added to the result so far.
+ int descriptor_count = 0;
+
+ // Copy the descriptors from the array.
+ DescriptorWriter w(*result);
+ for (DescriptorReader r(*array); !r.eos(); r.advance()) {
+ w.WriteFrom(&r);
+ descriptor_count++;
+ }
+
+ // Number of duplicates detected.
+ int duplicates = 0;
+
+ // Fill in new callback descriptors. Process the callbacks from
+ // back to front so that the last callback with a given name takes
+ // precedence over previously added callbacks with that name.
+ for (int i = nof_callbacks - 1; i >= 0; i--) {
+ Handle<AccessorInfo> entry =
+ Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
+ // Ensure the key is a symbol before writing into the instance descriptor.
+ Handle<String> key =
+ SymbolFromString(Handle<String>(String::cast(entry->name())));
+ // Check if a descriptor with this name already exists before writing.
+ if (result->BinarySearch(*key, 0, descriptor_count - 1) ==
+ DescriptorArray::kNotFound) {
+ CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
+ w.Write(&desc);
+ descriptor_count++;
+ } else {
+ duplicates++;
+ }
+ }
+
+ // If duplicates were detected, allocate a result of the right size
+ // and transfer the elements.
+ if (duplicates > 0) {
+ Handle<DescriptorArray> new_result =
+ NewDescriptorArray(result->number_of_descriptors() - duplicates);
+ DescriptorWriter w(*new_result);
+ DescriptorReader r(*result);
+ while (!w.eos()) {
+ w.WriteFrom(&r);
+ r.advance();
+ }
+ result = new_result;
+ }
+
+ // Sort the result before returning.
+ result->Sort();
+ return result;
+}
+
+
+Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateJSObject(*constructor, pretenure), JSObject);
+}
+
+
+Handle<JSObject> Factory::NewObjectLiteral(int expected_number_of_properties) {
+ Handle<Map> map = Handle<Map>(Top::object_function()->initial_map());
+ map = Factory::CopyMap(map);
+ map->set_instance_descriptors(
+ DescriptorArray::cast(Heap::empty_fixed_array()));
+ map->set_unused_property_fields(expected_number_of_properties);
+ CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, TENURED),
+ JSObject);
+}
+
+
+Handle<JSArray> Factory::NewArrayLiteral(int length) {
+ return NewJSArrayWithElements(NewFixedArray(length), TENURED);
+}
+
+
+Handle<JSArray> Factory::NewJSArray(int length,
+ PretenureFlag pretenure) {
+ Handle<JSObject> obj = NewJSObject(Top::array_function(), pretenure);
+ CALL_HEAP_FUNCTION(Handle<JSArray>::cast(obj)->Initialize(length), JSArray);
+}
+
+
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
+ PretenureFlag pretenure) {
+ Handle<JSArray> result =
+ Handle<JSArray>::cast(NewJSObject(Top::array_function(), pretenure));
+ result->SetContent(*elements);
+ return result;
+}
+
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
+ CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
+ SharedFunctionInfo);
+}
+
+
+Handle<Dictionary> Factory::DictionaryAtNumberPut(Handle<Dictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), Dictionary);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
+ Handle<Object> prototype) {
+ Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
+ CALL_HEAP_FUNCTION(Heap::AllocateFunction(*Top::function_map(),
+ *function_share,
+ *prototype),
+ JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+ Handle<Object> prototype) {
+ Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
+ fun->set_context(Top::context()->global_context());
+ return fun;
+}
+
+
+Handle<Object> Factory::ToObject(Handle<Object> object,
+ Handle<Context> global_context) {
+ CALL_HEAP_FUNCTION(object->ToObject(*global_context), Object);
+}
+
+
+Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
+ int length) {
+ CALL_HEAP_FUNCTION(Heap::AllocateArgumentsObject(*callee, length), JSObject);
+}
+
+
+Handle<JSFunction> Factory::CreateApiFunction(
+ Handle<FunctionTemplateInfo> obj,
+ bool is_global) {
+ Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::HandleApiCall));
+
+ int internal_field_count = Smi::cast(obj->internal_field_count())->value();
+ int size = kPointerSize * internal_field_count;
+ if (is_global) {
+ size += JSGlobalObject::kSize;
+ } else {
+ size += JSObject::kHeaderSize;
+ }
+
+ InstanceType type = is_global ? JS_GLOBAL_OBJECT_TYPE : JS_OBJECT_TYPE;
+
+ Handle<JSFunction> result =
+ Factory::NewFunction(Factory::empty_symbol(), type, size, code, true);
+ // Set class name.
+ Handle<Object> class_name = Handle<Object>(obj->class_name());
+ if (class_name->IsString()) {
+ result->shared()->set_instance_class_name(*class_name);
+ result->shared()->set_name(*class_name);
+ }
+
+ Handle<Map> map = Handle<Map>(result->initial_map());
+
+ // Mark as undetectable if needed.
+ if (obj->undetectable()) {
+ map->set_is_undetectable();
+ }
+
+ // Mark as hidden for the __proto__ accessor if needed.
+ if (obj->hidden_prototype()) {
+ map->set_is_hidden_prototype();
+ }
+
+ // Mark as needs_access_check if needed.
+ if (obj->needs_access_check()) {
+ map->set_needs_access_check();
+ }
+
+ // If the function template info specifies a lookup handler the
+ // initial_map must have set the bit has_special_lookup.
+ if (obj->lookup_callback()->IsProxy()) {
+ ASSERT(!map->has_special_lookup());
+ map->set_special_lookup();
+ }
+
+ // Set interceptor information in the map.
+ if (!obj->named_property_handler()->IsUndefined()) {
+ map->set_has_named_interceptor();
+ }
+ if (!obj->indexed_property_handler()->IsUndefined()) {
+ map->set_has_indexed_interceptor();
+ }
+
+ // Set instance call-as-function information in the map.
+ if (!obj->instance_call_handler()->IsUndefined()) {
+ map->set_has_instance_call_handler();
+ }
+
+ result->shared()->set_function_data(*obj);
+
+ // Recursively copy parent templates' accessors, 'data' may be modified.
+ Handle<DescriptorArray> array =
+ Handle<DescriptorArray>(map->instance_descriptors());
+ while (true) {
+ Handle<Object> props = Handle<Object>(obj->property_accessors());
+ if (!props->IsUndefined()) {
+ array = Factory::CopyAppendCallbackDescriptors(array, props);
+ }
+ Handle<Object> parent = Handle<Object>(obj->parent_template());
+ if (parent->IsUndefined()) break;
+ obj = Handle<FunctionTemplateInfo>::cast(parent);
+ }
+ if (array->length() > 0) {
+ map->set_instance_descriptors(*array);
+ }
+
+ return result;
+}
+
+
+void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
+ Handle<JSObject> instance,
+ bool* pending_exception) {
+ // Configure the instance by adding the properties specified by the
+ // instance template.
+ Handle<Object> instance_template = Handle<Object>(desc->instance_template());
+ if (!instance_template->IsUndefined()) {
+ Execution::ConfigureInstance(instance,
+ instance_template,
+ pending_exception);
+ } else {
+ *pending_exception = false;
+ }
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FACTORY_H_
+#define V8_FACTORY_H_
+
+#include "heap.h"
+
+namespace v8 { namespace internal {
+
+
+// Interface for handle based allocation.
+
+class Factory : public AllStatic {
+ public:
+ // Allocate a new fixed array.
+ static Handle<FixedArray> NewFixedArray(
+ int size,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+
+ static Handle<String> LookupSymbol(Vector<const char> str);
+ static Handle<String> LookupAsciiSymbol(const char* str) {
+ return LookupSymbol(CStrVector(str));
+ }
+
+
+ // String creation functions. Most of the string creation functions take
+ // a Heap::PretenureFlag argument to optionally request that they be
+ // allocated in the old generation. The pretenure flag defaults to
+ // DONT_TENURE.
+ //
+ // Creates a new String object. There are two String encodings: ASCII and
+ // two byte. One should choose between the three string factory functions
+ // based on the encoding of the string buffer that the string is
+ // initialized from.
+ // - ...FromAscii initializes the string from a buffer that is ASCII
+ // encoded (it does not check that the buffer is ASCII encoded) and
+ // the result will be ASCII encoded.
+ // - ...FromUtf8 initializes the string from a buffer that is UTF-8
+ // encoded. If the characters are all single-byte characters, the
+ // result will be ASCII encoded, otherwise it will converted to two
+ // byte.
+ // - ...FromTwoByte initializes the string from a buffer that is two
+ // byte encoded. If the characters are all single-byte characters,
+ // the result will be converted to ASCII, otherwise it will be left as
+ // two byte.
+ //
+ // ASCII strings are pretenured when used as keys in the SourceCodeCache.
+ static Handle<String> NewStringFromAscii(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // UTF8 strings are pretenured when used for regexp literal patterns and
+ // flags in the parser.
+ static Handle<String> NewStringFromUtf8(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ static Handle<String> NewStringFromTwoByte(Vector<const uc16> str);
+
+ // Allocates and partially initializes a TwoByte String. The characters of
+ // the string are uninitialized. Currently used in regexp code only, where
+ // they are pretenured.
+ static Handle<String> NewRawTwoByteString(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Create a new cons string object which consists of a pair of strings.
+ static Handle<String> NewConsString(Handle<String> first,
+ Handle<String> second);
+
+ // Create a new sliced string object which represents a substring of a
+ // backing string.
+ static Handle<String> NewStringSlice(Handle<String> str, int begin, int end);
+
+ // Creates a new external String object. There are two String encodings
+ // in the system: ASCII and two byte. Unlike other String types, it does
+ // not make sense to have a UTF-8 factory function for external strings,
+ // because we cannot change the underlying buffer.
+ static Handle<String> NewExternalStringFromAscii(
+ ExternalAsciiString::Resource* resource);
+ static Handle<String> NewExternalStringFromTwoByte(
+ ExternalTwoByteString::Resource* resource);
+
+ // Create a global (but otherwise uninitialized) context.
+ static Handle<Context> NewGlobalContext();
+
+ // Create a function context.
+ static Handle<Context> NewFunctionContext(int length,
+ Handle<JSFunction> closure);
+
+ // Create a 'with' context.
+ static Handle<Context> NewWithContext(Handle<Context> previous,
+ Handle<JSObject> extension);
+
+ // Return the Symbol maching the passed in string.
+ static Handle<String> SymbolFromString(Handle<String> value);
+
+ // Allocate a new struct. The struct is pretenured (allocated directly in
+ // the old generation).
+ static Handle<Struct> NewStruct(InstanceType type);
+
+ static Handle<AccessorInfo> NewAccessorInfo();
+
+ static Handle<Script> NewScript(Handle<String> source);
+
+ // Proxies are pretenured when allocated by the bootstrapper.
+ static Handle<Proxy> NewProxy(Address addr,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocate a new proxy. The proxy is pretenured (allocated directly in
+ // the old generation).
+ static Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
+
+ static Handle<ByteArray> NewByteArray(int length);
+
+ static Handle<Map> NewMap(InstanceType type, int instance_size);
+
+ static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+
+ static Handle<Map> CopyMap(Handle<Map> map);
+
+ static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+
+ // Numbers (eg, literals) are pretenured by the parser.
+ static Handle<Object> NewNumber(double value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ static Handle<Object> NewNumberFromInt(int value);
+
+ // These objects are used by the api to create env-independent data
+ // structures in the heap.
+ static Handle<JSObject> NewNeanderObject();
+
+ static Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
+
+ // JS objects are pretenured when allocated by the bootstrapper and
+ // runtime.
+ static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocate a JS object representing an object literal. The object is
+ // pretenured (allocated directly in the old generation).
+ static Handle<JSObject> NewObjectLiteral(int expected_number_of_properties);
+
+ // Allocate a JS array representing an array literal. The array is
+ // pretenured (allocated directly in the old generation).
+ static Handle<JSArray> NewArrayLiteral(int length);
+
+ // JS arrays are pretenured when allocated by the parser.
+ static Handle<JSArray> NewJSArray(int init_length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ static Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArray> elements,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ static Handle<JSFunction> NewFunction(Handle<String> name,
+ Handle<Object> prototype);
+
+ static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
+
+ static Handle<JSFunction> NewFunctionFromBoilerplate(
+ Handle<JSFunction> boilerplate,
+ Handle<Context> context);
+
+ static Handle<Code> NewCode(const CodeDesc& desc, ScopeInfo<>* sinfo,
+ Code::Flags flags);
+
+ static Handle<Code> CopyCode(Handle<Code> code);
+
+ static Handle<Object> ToObject(Handle<Object> object,
+ Handle<Context> global_context);
+
+ // Interface for creating error objects.
+
+ static Handle<Object> NewError(const char* maker, const char* type,
+ Handle<JSArray> args);
+ static Handle<Object> NewError(const char* maker, const char* type,
+ Vector< Handle<Object> > args);
+ static Handle<Object> NewError(const char* type,
+ Vector< Handle<Object> > args);
+ static Handle<Object> NewError(Handle<String> message);
+ static Handle<Object> NewError(const char* constructor,
+ Handle<String> message);
+
+ static Handle<Object> NewTypeError(const char* type,
+ Vector< Handle<Object> > args);
+ static Handle<Object> NewTypeError(Handle<String> message);
+
+ static Handle<Object> NewRangeError(const char* type,
+ Vector< Handle<Object> > args);
+ static Handle<Object> NewRangeError(Handle<String> message);
+
+ static Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
+ static Handle<Object> NewSyntaxError(Handle<String> message);
+
+ static Handle<Object> NewReferenceError(const char* type,
+ Vector< Handle<Object> > args);
+ static Handle<Object> NewReferenceError(Handle<String> message);
+
+ static Handle<Object> NewEvalError(const char* type,
+ Vector< Handle<Object> > args);
+
+
+ static Handle<JSFunction> NewFunction(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<Code> code,
+ bool force_initial_map);
+
+ static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
+ int number_of_literals,
+ Handle<Code> code);
+
+ static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
+
+ static Handle<JSFunction> NewFunction(Handle<Map> function_map,
+ Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
+
+
+ static Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<JSObject> prototype,
+ Handle<Code> code,
+ bool force_initial_map);
+
+ static Handle<DescriptorArray> CopyAppendProxyDescriptor(
+ Handle<DescriptorArray> array,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static Handle<JSFunction> CreateApiFunction(Handle<FunctionTemplateInfo> data,
+ bool is_global = false);
+
+ static Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
+
+ // Installs interceptors on the instance. 'desc' is a function template,
+ // and instance is an object instance created by the function of this
+ // function tempalte.
+ static void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
+ Handle<JSObject> instance,
+ bool* pending_exception);
+
+#define ROOT_ACCESSOR(type, name) \
+ static Handle<type> name() { return Handle<type>(&Heap::name##_); }
+ ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, str) \
+ static Handle<String> name() { return Handle<String>(&Heap::name##_); }
+ SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+ static Handle<DescriptorArray> empty_descriptor_array() {
+ return Handle<DescriptorArray>::cast(empty_fixed_array());
+ }
+
+ static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+
+ static Handle<Dictionary> DictionaryAtNumberPut(Handle<Dictionary>,
+ uint32_t key,
+ Handle<Object> value);
+
+ private:
+ static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
+ Handle<Object> prototype);
+
+ static Handle<DescriptorArray> CopyAppendCallbackDescriptors(
+ Handle<DescriptorArray> array,
+ Handle<Object> descriptors);
+
+ static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
+ Handle<JSFunction> boilerplate,
+ Handle<Map> function_map);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FACTORY_H_
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FLAGS_INL_H_
+#define V8_FLAGS_INL_H_
+
+namespace v8 { namespace internal {
+
+bool* Flag::bool_variable() const {
+ ASSERT(type_ == BOOL);
+ return &variable_->b;
+}
+
+int* Flag::int_variable() const {
+ ASSERT(type_ == INT);
+ return &variable_->i;
+}
+
+double* Flag::float_variable() const {
+ ASSERT(type_ == FLOAT);
+ return &variable_->f;
+}
+
+const char** Flag::string_variable() const {
+ ASSERT(type_ == STRING);
+ return &variable_->s;
+}
+
+bool Flag::bool_default() const {
+ ASSERT(type_ == BOOL);
+ return default_.b;
+}
+
+int Flag::int_default() const {
+ ASSERT(type_ == INT);
+ return default_.i;
+}
+
+double Flag::float_default() const {
+ ASSERT(type_ == FLOAT);
+ return default_.f;
+}
+
+const char* Flag::string_default() const {
+ ASSERT(type_ == STRING);
+ return default_.s;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_FLAGS_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <ctype.h>
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+// -----------------------------------------------------------------------------
+// Helpers
+
+static inline char NormalizeChar(char ch) {
+ return ch == '_' ? '-' : ch;
+}
+
+
+static const char* NormalizeName(const char* name) {
+ int len = strlen(name);
+ char* result = NewArray<char>(len + 1);
+ for (int i = 0; i <= len; i++) {
+ result[i] = NormalizeChar(name[i]);
+ }
+ return const_cast<const char*>(result);
+}
+
+
+static bool EqualNames(const char* a, const char* b) {
+ for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
+ if (a[i] == '\0') {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Flag
+
+Flag::Flag(const char* file, const char* name, const char* comment,
+ Type type, void* variable, FlagValue default_) {
+ file_ = file;
+ name_ = NormalizeName(name);
+ comment_ = comment;
+ type_ = type;
+ variable_ = reinterpret_cast<FlagValue*>(variable);
+ this->default_ = default_;
+ FlagList::Register(this);
+}
+
+
+void Flag::SetToDefault() {
+ // Note that we cannot simply do '*variable_ = default_;' since
+ // flag variables are not really of type FlagValue and thus may
+ // be smaller! The FlagValue union is simply 'overlayed' on top
+ // of a flag variable for convenient access. Since union members
+ // are guarantee to be aligned at the beginning, this works.
+ switch (type_) {
+ case Flag::BOOL:
+ variable_->b = default_.b;
+ return;
+ case Flag::INT:
+ variable_->i = default_.i;
+ return;
+ case Flag::FLOAT:
+ variable_->f = default_.f;
+ return;
+ case Flag::STRING:
+ variable_->s = default_.s;
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+bool Flag::IsDefault() const {
+ switch (type_) {
+ case Flag::BOOL:
+ return variable_->b == default_.b;
+ case Flag::INT:
+ return variable_->i == default_.i;
+ case Flag::FLOAT:
+ return variable_->f == default_.f;
+ case Flag::STRING:
+ if (variable_->s && default_.s) {
+ return strcmp(variable_->s, default_.s) == 0;
+ } else {
+ return variable_->s == default_.s;
+ }
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+static const char* Type2String(Flag::Type type) {
+ switch (type) {
+ case Flag::BOOL: return "bool";
+ case Flag::INT: return "int";
+ case Flag::FLOAT: return "float";
+ case Flag::STRING: return "string";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+static char* ToString(Flag::Type type, FlagValue* variable) {
+ char* value = NULL;
+ switch (type) {
+ case Flag::BOOL:
+ value = NewArray<char>(6);
+ OS::SNPrintF(value, 6, "%s", (variable->b ? "true" : "false"));
+ break;
+ case Flag::INT:
+ value = NewArray<char>(12);
+ OS::SNPrintF(value, 12, "%d", variable->i);
+ break;
+ case Flag::FLOAT:
+ value = NewArray<char>(20);
+ OS::SNPrintF(value, 20, "%f", variable->f);
+ break;
+ case Flag::STRING:
+ if (variable->s) {
+ int length = strlen(variable->s) + 1;
+ value = NewArray<char>(length);
+ OS::SNPrintF(value, length, "%s", variable->s);
+ } else {
+ value = NewArray<char>(5);
+ OS::SNPrintF(value, 5, "NULL");
+ }
+ break;
+ }
+ ASSERT(value != NULL);
+ return value;
+}
+
+
+static void PrintFlagValue(Flag::Type type, FlagValue* variable) {
+ char* value = ToString(type, variable);
+ printf("%s", value);
+ DeleteArray(value);
+}
+
+
+char* Flag::StringValue() const {
+ return ToString(type_, variable_);
+}
+
+
+void Flag::Print(bool print_current_value) {
+ printf(" --%s (%s) type: %s default: ", name_, comment_,
+ Type2String(type_));
+ PrintFlagValue(type_, &default_);
+ if (print_current_value) {
+ printf(" current value: ");
+ PrintFlagValue(type_, variable_);
+ }
+ printf("\n");
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of FlagList
+
+Flag* FlagList::list_ = NULL;
+
+
+List<char *>* FlagList::argv() {
+ List<char *>* args = new List<char*>(8);
+ for (Flag* f = list_; f != NULL; f = f->next()) {
+ if (!f->IsDefault()) {
+ char* cmdline_flag;
+ if (f->type() != Flag::BOOL || *(f->bool_variable())) {
+ int length = strlen(f->name()) + 2 + 1;
+ cmdline_flag = NewArray<char>(length);
+ OS::SNPrintF(cmdline_flag, length, "--%s", f->name());
+ } else {
+ int length = strlen(f->name()) + 4 + 1;
+ cmdline_flag = NewArray<char>(length);
+ OS::SNPrintF(cmdline_flag, length, "--no%s", f->name());
+ }
+ args->Add(cmdline_flag);
+ if (f->type() != Flag::BOOL) {
+ args->Add(f->StringValue());
+ }
+ }
+ }
+ return args;
+}
+
+
+void FlagList::Print(const char* file, bool print_current_value) {
+ // Since flag registration is likely by file (= C++ file),
+ // we don't need to sort by file and still get grouped output.
+ const char* current = NULL;
+ for (Flag* f = list_; f != NULL; f = f->next()) {
+ if (file == NULL || file == f->file()) {
+ if (current != f->file()) {
+ printf("Flags from %s:\n", f->file());
+ current = f->file();
+ }
+ f->Print(print_current_value);
+ }
+ }
+}
+
+
+Flag* FlagList::Lookup(const char* name) {
+ Flag* f = list_;
+ while (f != NULL && !EqualNames(name, f->name()))
+ f = f->next();
+ return f;
+}
+
+
+void FlagList::SplitArgument(const char* arg,
+ char* buffer,
+ int buffer_size,
+ const char** name,
+ const char** value,
+ bool* is_bool) {
+ *name = NULL;
+ *value = NULL;
+ *is_bool = false;
+
+ if (*arg == '-') {
+ // find the begin of the flag name
+ arg++; // remove 1st '-'
+ if (*arg == '-')
+ arg++; // remove 2nd '-'
+ if (arg[0] == 'n' && arg[1] == 'o') {
+ arg += 2; // remove "no"
+ *is_bool = true;
+ }
+ *name = arg;
+
+ // find the end of the flag name
+ while (*arg != '\0' && *arg != '=')
+ arg++;
+
+ // get the value if any
+ if (*arg == '=') {
+ // make a copy so we can NUL-terminate flag name
+ int n = arg - *name;
+ CHECK(n < buffer_size); // buffer is too small
+ memcpy(buffer, *name, n);
+ buffer[n] = '\0';
+ *name = buffer;
+ // get the value
+ *value = arg + 1;
+ }
+ }
+}
+
+
+int FlagList::SetFlagsFromCommandLine(int* argc,
+ char** argv,
+ bool remove_flags) {
+ // parse arguments
+ for (int i = 1; i < *argc;) {
+ int j = i; // j > 0
+ const char* arg = argv[i++];
+
+ // split arg into flag components
+ char buffer[1*KB];
+ const char* name;
+ const char* value;
+ bool is_bool;
+ SplitArgument(arg, buffer, sizeof buffer, &name, &value, &is_bool);
+
+ if (name != NULL) {
+ // lookup the flag
+ Flag* flag = Lookup(name);
+ if (flag == NULL) {
+ fprintf(stderr, "Error: unrecognized flag %s\n", arg);
+ return j;
+ }
+
+ // if we still need a flag value, use the next argument if available
+ if (flag->type() != Flag::BOOL && value == NULL) {
+ if (i < *argc) {
+ value = argv[i++];
+ } else {
+ fprintf(stderr, "Error: missing value for flag %s of type %s\n",
+ arg, Type2String(flag->type()));
+ return j;
+ }
+ }
+
+ // set the flag
+ char* endp = const_cast<char*>(""); // *endp is only read
+ switch (flag->type()) {
+ case Flag::BOOL:
+ *flag->bool_variable() = !is_bool;
+ break;
+ case Flag::INT:
+ *flag->int_variable() = strtol(value, &endp, 10);
+ break;
+ case Flag::FLOAT:
+ *flag->float_variable() = strtod(value, &endp);
+ break;
+ case Flag::STRING:
+ *flag->string_variable() = value;
+ break;
+ }
+
+ // handle errors
+ if ((flag->type() == Flag::BOOL && value != NULL) ||
+ (flag->type() != Flag::BOOL && is_bool) ||
+ *endp != '\0') {
+ fprintf(stderr, "Error: illegal value for flag %s of type %s\n",
+ arg, Type2String(flag->type()));
+ return j;
+ }
+
+ // remove the flag & value from the command
+ if (remove_flags)
+ while (j < i)
+ argv[j++] = NULL;
+ }
+ }
+
+ // shrink the argument list
+ if (remove_flags) {
+ int j = 1;
+ for (int i = 1; i < *argc; i++) {
+ if (argv[i] != NULL)
+ argv[j++] = argv[i];
+ }
+ *argc = j;
+ }
+
+ // parsed all flags successfully
+ return 0;
+}
+
+
+static char* SkipWhiteSpace(char* p) {
+ while (*p != '\0' && isspace(*p) != 0) p++;
+ return p;
+}
+
+
+static char* SkipBlackSpace(char* p) {
+ while (*p != '\0' && isspace(*p) == 0) p++;
+ return p;
+}
+
+
+int FlagList::SetFlagsFromString(const char* str, int len) {
+ // make a 0-terminated copy of str
+ char* copy0 = NewArray<char>(len + 1);
+ memcpy(copy0, str, len);
+ copy0[len] = '\0';
+
+ // strip leading white space
+ char* copy = SkipWhiteSpace(copy0);
+
+ // count the number of 'arguments'
+ int argc = 1; // be compatible with SetFlagsFromCommandLine()
+ for (char* p = copy; *p != '\0'; argc++) {
+ p = SkipBlackSpace(p);
+ p = SkipWhiteSpace(p);
+ }
+
+ // allocate argument array
+ char** argv = NewArray<char*>(argc);
+
+ // split the flags string into arguments
+ argc = 1; // be compatible with SetFlagsFromCommandLine()
+ for (char* p = copy; *p != '\0'; argc++) {
+ argv[argc] = p;
+ p = SkipBlackSpace(p);
+ if (*p != '\0') *p++ = '\0'; // 0-terminate argument
+ p = SkipWhiteSpace(p);
+ }
+
+ // set the flags
+ int result = SetFlagsFromCommandLine(&argc, argv, false);
+
+ // cleanup
+ DeleteArray(argv);
+ // don't delete copy0 since the substrings
+ // may be pointed to by FLAG variables!
+ // (this is a memory leak, but it's minor since this
+ // code is only used for debugging, or perhaps once
+ // during initialization).
+
+ return result;
+}
+
+
+void FlagList::Register(Flag* flag) {
+ ASSERT(flag != NULL && strlen(flag->name()) > 0);
+ if (Lookup(flag->name()) != NULL)
+ V8_Fatal(flag->file(), 0, "flag %s declared twice", flag->name());
+ flag->next_ = list_;
+ list_ = flag;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_FLAGS_H_
+#define V8_FLAGS_H_
+
+namespace v8 { namespace internal {
+
+// Internal use only.
+union FlagValue {
+ static FlagValue New_BOOL(bool b) {
+ FlagValue v;
+ v.b = b;
+ return v;
+ }
+ static FlagValue New_INT(int i) {
+ FlagValue v;
+ v.i = i;
+ return v;
+ }
+ static FlagValue New_FLOAT(float f) {
+ FlagValue v;
+ v.f = f;
+ return v;
+ }
+ static FlagValue New_STRING(const char* s) {
+ FlagValue v;
+ v.s = s;
+ return v;
+ }
+
+ bool b;
+ int i;
+ double f;
+ const char* s;
+};
+
+
+// Each flag can be accessed programmatically via a Flag object.
+class Flag {
+ public:
+ enum Type { BOOL, INT, FLOAT, STRING };
+
+ // Internal use only.
+ Flag(const char* file, const char* name, const char* comment,
+ Type type, void* variable, FlagValue default_);
+
+ // General flag information
+ const char* file() const { return file_; }
+ const char* name() const { return name_; }
+ const char* comment() const { return comment_; }
+
+ // Flag type
+ Type type() const { return type_; }
+
+ // Flag variables
+ inline bool* bool_variable() const;
+ inline int* int_variable() const;
+ inline double* float_variable() const;
+ inline const char** string_variable() const;
+
+ // Default values
+ inline bool bool_default() const;
+ inline int int_default() const;
+ inline double float_default() const;
+ inline const char* string_default() const;
+
+ // Resets a flag to its default value
+ void SetToDefault();
+
+ // True if a flag is set to its default value
+ bool IsDefault() const;
+
+ // Iteration support
+ Flag* next() const { return next_; }
+
+ // Prints flag information. The current flag value is only printed
+ // if print_current_value is set.
+ void Print(bool print_current_value);
+
+
+ // Returns the string formatted value of the flag. The caller is responsible
+ // for disposing the string.
+ char* StringValue() const;
+
+ private:
+ const char* file_;
+ const char* name_;
+ const char* comment_;
+
+ Type type_;
+ FlagValue* variable_;
+ FlagValue default_;
+
+ Flag* next_;
+
+ friend class FlagList; // accesses next_
+};
+
+
+// Internal use only.
+#define DEFINE_FLAG(type, c_type, name, default, comment) \
+ /* define and initialize the flag */ \
+ c_type FLAG_##name = (default); \
+ /* register the flag */ \
+ static v8::internal::Flag Flag_##name(__FILE__, \
+ #name, \
+ (comment), \
+ v8::internal::Flag::type, \
+ &FLAG_##name, \
+ v8::internal::FlagValue::New_##type(default))
+
+
+// Internal use only.
+#define DECLARE_FLAG(c_type, name) \
+ /* declare the external flag */ \
+ extern c_type FLAG_##name
+
+
+// Use the following macros to define a new flag:
+#define DEFINE_bool(name, default, comment) \
+ DEFINE_FLAG(BOOL, bool, name, default, comment)
+#define DEFINE_int(name, default, comment) \
+ DEFINE_FLAG(INT, int, name, default, comment)
+#define DEFINE_float(name, default, comment) \
+ DEFINE_FLAG(FLOAT, double, name, default, comment)
+#define DEFINE_string(name, default, comment) \
+ DEFINE_FLAG(STRING, const char*, name, default, comment)
+
+
+// Use the following macros to declare a flag defined elsewhere:
+#define DECLARE_bool(name) DECLARE_FLAG(bool, name)
+#define DECLARE_int(name) DECLARE_FLAG(int, name)
+#define DECLARE_float(name) DECLARE_FLAG(double, name)
+#define DECLARE_string(name) DECLARE_FLAG(const char*, name)
+
+
+// The global list of all flags.
+class FlagList {
+ public:
+ // The NULL-terminated list of all flags. Traverse with Flag::next().
+ static Flag* list() { return list_; }
+
+ // The list of all flags with a value different from the default
+ // and their values. The format of the list is like the format of the
+ // argv array passed to the main function, e.g.
+ // ("--prof", "--log-file", "v8.prof", "--nolazy").
+ //
+ // The caller is responsible for disposing the list.
+ static List<char *>* argv();
+
+ // If file != NULL, prints information for all flags defined in file;
+ // otherwise prints information for all flags in all files. The current
+ // flag value is only printed if print_current_value is set.
+ static void Print(const char* file, bool print_current_value);
+
+ // Lookup a flag by name. Returns the matching flag or NULL.
+ static Flag* Lookup(const char* name);
+
+ // Helper function to parse flags: Takes an argument arg and splits it into
+ // a flag name and flag value (or NULL if they are missing). is_bool is set
+ // if the arg started with "-no" or "--no". The buffer may be used to NUL-
+ // terminate the name, it must be large enough to hold any possible name.
+ static void SplitArgument(const char* arg,
+ char* buffer,
+ int buffer_size,
+ const char** name,
+ const char** value,
+ bool* is_bool);
+
+ // Set the flag values by parsing the command line. If remove_flags is
+ // set, the flags and associated values are removed from (argc,
+ // argv). Returns 0 if no error occurred. Otherwise, returns the argv
+ // index > 0 for the argument where an error occurred. In that case,
+ // (argc, argv) will remain unchanged indepdendent of the remove_flags
+ // value, and no assumptions about flag settings should be made.
+ //
+ // The following syntax for flags is accepted (both '-' and '--' are ok):
+ //
+ // --flag (bool flags only)
+ // --noflag (bool flags only)
+ // --flag=value (non-bool flags only, no spaces around '=')
+ // --flag value (non-bool flags only)
+ static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
+
+ // Set the flag values by parsing the string str. Splits string into argc
+ // substrings argv[], each of which consisting of non-white-space chars,
+ // and then calls SetFlagsFromCommandLine() and returns its result.
+ static int SetFlagsFromString(const char* str, int len);
+
+ // Registers a new flag. Called during program initialization. Not
+ // thread-safe.
+ static void Register(Flag* flag);
+
+ private:
+ static Flag* list_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_FLAGS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "assembler-arm-inl.h"
+
+
+namespace v8 { namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (state->pp == NULL) {
+ if (Memory::Address_at(state->fp +
+ EntryFrameConstants::kConstructMarkOffset) != 0) {
+ return ENTRY_CONSTRUCT;
+ } else {
+ return ENTRY;
+ }
+ } else if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
+ } else if (
+ Memory::Object_at(state->fp +
+ StandardFrameConstants::kFunctionOffset)->IsSmi()) {
+ return INTERNAL;
+ } else {
+ return JAVA_SCRIPT;
+ }
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute frame type and stack pointer.
+ Address sp = fp + ExitFrameConstants::kSPDisplacement;
+ Type type;
+ if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+ type = EXIT_DEBUG;
+ sp -= kNumJSCallerSaved * kPointerSize;
+ } else {
+ type = EXIT;
+ }
+ // Fill in the state.
+ state->sp = sp;
+ state->fp = fp;
+ state->pp = fp + ExitFrameConstants::kPPDisplacement;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ return type;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // Traverse pointers in the callee-saved registers.
+ const int offset = ExitFrameConstants::kSavedRegistersOffset;
+ Object** base = &Memory::Object_at(fp() + offset);
+ Object** limit = base + kNumJSCalleeSaved;
+ v->VisitPointers(base, limit);
+}
+
+
+void ExitFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
+ // The callee-saved registers in an exit frame are pointed to by the
+ // frame pointer. See the implementations of C entry runtime stubs.
+ const int offset = ExitFrameConstants::kSavedRegistersOffset;
+ memcpy(buffer, fp() + offset, kNumJSCalleeSaved * kPointerSize);
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ const int offset = JavaScriptFrameConstants::kArgsLengthOffset;
+ int result = Memory::int_at(fp() + offset);
+ // We never remove extra parameters provided on the stack; we only
+ // fill in undefined values for parameters not provided.
+ ASSERT(0 <= result && result <= ComputeParametersCount());
+ return result;
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ return state_.pp;
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ // Argument adaptor frames aren't used on ARM (yet).
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ return state_.pp;
+}
+
+
+RegList JavaScriptFrame::FindCalleeSavedRegisters() const {
+ const unsigned kRegListTag = 1; // pc values have bit 0 cleared (no thumb)
+ const unsigned kRegListTagSize = 1;
+ const unsigned kRegListTagMask = (1 << kRegListTagSize) - 1;
+
+ // The prologue pc (or the cached register list) is available as a
+ // slot in the fixed part of the stack frame.
+ const int offset = +4 * kPointerSize;
+
+ // Once the register list has been calculated for a frame, it is
+ // cached in the prologue pc stack slot. Check the cache before
+ // doing the more expensive instruction decoding.
+ uint32_t cache = Memory::int_at(fp() + offset);
+ if ((cache & kRegListTagMask) == kRegListTag) {
+ return static_cast<RegList>(cache >> kRegListTagSize);
+ }
+
+ // If we can't find the register list in the instruction stream, we
+ // assume it's the empty list. [NOTE: Is this really a smart thing
+ // to do? Don't all JavaScript frames have the instruction?]
+ RegList result = 0;
+
+ // Compute the address of the stm (store multiple) instruction.
+ Address stm_address = AddressFrom<Address>(cache - PcStoreOffset());
+ ASSERT((Memory::int32_at(stm_address) & 0xffffcc00) == 0xe92dcc00);
+
+ // Fetch the instruction preceeding the stm - if it is also a stm
+ // instruction we read the register list from there.
+ uint32_t instruction = Memory::int32_at(stm_address - 4);
+ if ((instruction & 0xfffffc00) == 0xe92d0000) {
+ // The register list shouldn't be empty and must consist only of JS
+ // callee-saved registers.
+ result = instruction & 0xffff;
+ ASSERT(result != 0 && (result & ~kJSCalleeSaved) == 0);
+ }
+
+ // Cache the result in the prologue pc stack slot before returning
+ // it. This way future access to the register list is a bit faster.
+ Memory::int_at(fp() + offset) = (result << kRegListTagSize) | kRegListTag;
+ return result;
+}
+
+
+void JavaScriptFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
+ // The callee-saved registers in java script frames are in the fixed
+ // part of the frame below the frame pointer.
+ const int n = NumRegs(FindCalleeSavedRegisters());
+ const int offset = 5 * kPointerSize;
+ memcpy(buffer, fp() + offset, n * kPointerSize);
+}
+
+
+Code* JavaScriptFrame::FindCode() const {
+ const int offset = StandardFrameConstants::kCodeOffset;
+ Object* code = Memory::Object_at(fp() + offset);
+ if (code == NULL) {
+ // The code object isn't set; find it and set it.
+ code = Heap::FindCodeObject(pc());
+ ASSERT(!code->IsFailure());
+ Memory::Object_at(fp() + offset) = code;
+ }
+ ASSERT(code != NULL);
+ return Code::cast(code);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAMES_ARM_H_
+#define V8_FRAMES_ARM_H_
+
+namespace v8 { namespace internal {
+
+
+// The ARM ABI does not specify the usage of register r9, which may be reserved
+// as the static base or thread register on some platforms, in which case we
+// leave it alone. Adjust the value of kR9Available accordingly:
+static const int kR9Available = 1; // 1 if available to us, 0 if reserved
+
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+static const int kNumRegs = 16;
+
+
+// Caller-saved/arguments registers
+static const RegList kJSCallerSaved =
+ 1 << 0 | // r0 a1
+ 1 << 1 | // r1 a2
+ 1 << 2 | // r2 a3
+ 1 << 3; // r3 a4
+
+static const int kNumJSCallerSaved = 4;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers available for variable allocation in JavaScript code
+static const RegList kJSCalleeSaved =
+ 1 << 4 | // r4 v1
+ 1 << 5 | // r5 v2
+ 1 << 6 | // r6 v3
+ 1 << 7 | // r7 v4
+ kR9Available << 9 ; // r9 v6
+
+static const int kNumJSCalleeSaved = 4 + kR9Available;
+
+
+typedef Object* JSCalleeSavedBuffer[kNumJSCalleeSaved];
+
+
+// Callee-saved registers preserved when switching from C to JavaScript
+static const RegList kCalleeSaved = kJSCalleeSaved |
+ 1 << 8 | // r8 v5 (cp in JavaScript code)
+ 1 << 10 | // r10 v7 (pp in JavaScript code)
+ 1 << 11 ; // r11 v8 (fp in JavaScript code)
+
+static const int kNumCalleeSaved = kNumJSCalleeSaved + 3;
+
+
+// ----------------------------------------------------
+
+
+class StackHandlerConstants : public AllStatic {
+ public:
+ // TODO(1233780): Get rid of the code slot in stack handlers.
+ static const int kCodeOffset = 0 * kPointerSize;
+ static const int kNextOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kPPOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
+ static const int kPCOffset = 5 * kPointerSize;
+
+ static const int kAddressDisplacement = -1 * kPointerSize;
+ static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset = -2 * kPointerSize;
+ static const int kConstructMarkOffset = -1 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ // Exit frames have a debug marker on the stack.
+ static const int kSPDisplacement = -1 * kPointerSize;
+
+ // The debug marker is just above the frame pointer.
+ static const int kDebugMarkOffset = -1 * kPointerSize;
+
+ static const int kSavedRegistersOffset = 0 * kPointerSize;
+
+ // Let the parameters pointer for exit frames point just below the
+ // frame structure on the stack (includes callee saved registers).
+ static const int kPPDisplacement = (4 + kNumJSCalleeSaved) * kPointerSize;
+
+ // The frame pointer for exit frames points to the JavaScript callee
+ // saved registers. The caller fields are below those on the stack.
+ static const int kCallerPPOffset = (0 + kNumJSCalleeSaved) * kPointerSize;
+ static const int kCallerFPOffset = (1 + kNumJSCalleeSaved) * kPointerSize;
+ static const int kCallerPCOffset = (3 + kNumJSCalleeSaved) * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+ static const int kExpressionsOffset = -4 * kPointerSize;
+ static const int kCodeOffset = -3 * kPointerSize;
+ static const int kContextOffset = -2 * kPointerSize;
+ static const int kCallerPPOffset = 0 * kPointerSize;
+ static const int kCallerFPOffset = +1 * kPointerSize;
+ static const int kCallerPCOffset = +3 * kPointerSize;
+
+ // TODO(1233523): This is - of course - faked. The ARM port does not
+ // yet pass the callee function in a register, but the
+ // StackFrame::ComputeType code uses the field to figure out if a
+ // frame is a real JavaScript frame or an internal frame.
+ static const int kFunctionOffset = kContextOffset;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kArgsLengthOffset = -1 * kPointerSize;
+ static const int kSPOnExitOffset = +2 * kPointerSize;
+ static const int kSavedRegistersOffset = +5 * kPointerSize;
+
+ // PP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+ static const int kFunctionOffset = 0 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ static const int kCodeOffset = StandardFrameConstants::kCodeOffset;
+};
+
+
+inline Address StandardFrame::caller_pp() const {
+ return Memory::Address_at(fp() + StandardFrameConstants::kCallerPPOffset);
+}
+
+
+inline Object* JavaScriptFrame::function() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(pp() + offset);
+}
+
+
+inline Object** StackFrameIterator::register_buffer() const {
+ static Object* buffer[kNumJSCalleeSaved];
+ return buffer;
+}
+
+
+// ----------------------------------------------------
+
+
+
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // | JS frame |
+ // | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer)
+ // | function |
+ // +-------------+
+ // | |
+ // | expressions |
+ // | |
+ // +-------------+
+ // | |
+ // a | locals |
+ // c | |
+ // t +- - - - - - -+ <---
+ // i -4 | local0 | ^
+ // v +-------------+ |
+ // a -3 | code | |
+ // t +-------------+ | kLocal0Offset
+ // i -2 | context | |
+ // o +-------------+ |
+ // n -1 | args_length | v
+ // +-------------+ <--- fp (frame pointer)
+ // 0 | caller_pp |
+ // f +-------------+
+ // r 1 | caller_fp |
+ // a +-------------+
+ // m 2 | sp_on_exit | (pp if return, caller_sp if no return)
+ // e +-------------+
+ // 3 | caller_pc |
+ // +-------------+
+ // 4 | prolog_pc | (used to find list of callee-saved regs)
+ // +-------------+
+ // 5 | |
+ // |callee-saved | (only saved if clobbered by this function,
+ // | regs | must be traversed during GC)
+ // | |
+ // +-------------+ <--- caller_sp (incl. parameters)
+ // | |
+ // | parameters |
+ // | |
+ // +- - - - - - -+ <---
+ // -2 | parameter0 | ^
+ // +-------------+ | kParam0Offset
+ // -1 | receiver | v
+ // ----------- +=============+ <--- pp (parameter pointer, r10)
+ // 0 | function |
+ // +-------------+
+ // | |
+ // |caller-saved | (must be valid JS values, traversed during GC)
+ // | regs |
+ // | |
+ // +-------------+
+ // | |
+ // | caller |
+ // higher | expressions |
+ // addresses | |
+ // | |
+ // | JS frame |
+
+
+
+ // Handler frames (part of expressions of JS frames):
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // h | expressions |
+ // a | |
+ // n +-------------+
+ // d -1 | code |
+ // l +-------------+ <--- handler sp
+ // e 0 | next_sp | link to next handler (next handler's sp)
+ // r +-------------+
+ // 1 | state |
+ // f +-------------+
+ // r 2 | pp |
+ // a +-------------+
+ // m 3 | fp |
+ // e +-------------+
+ // 4 | pc |
+ // +-------------+
+ // | |
+ // higher | expressions |
+ // addresses | |
+
+
+
+ // JS entry frames: When calling from C to JS, we construct two extra
+ // frames: An entry frame (C) and a trampoline frame (JS). The
+ // following pictures shows the two frames:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // | JS frame |
+ // | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer)
+ // | |
+ // | parameters |
+ // t | |
+ // r +- - - - - - -+
+ // a | parameter0 |
+ // m +-------------+
+ // p | receiver |
+ // o +-------------+
+ // l | function |
+ // i +-------------+
+ // n -3 | code |
+ // e +-------------+
+ // -2 | NULL | context is always NULL
+ // +-------------+
+ // f -1 | 0 | args_length is always zero
+ // r +-------------+ <--- fp (frame pointer)
+ // a 0 | NULL | caller pp is always NULL for entries
+ // m +-------------+
+ // e 1 | caller_fp |
+ // +-------------+
+ // 2 | sp_on_exit | (caller_sp)
+ // +-------------+
+ // 3 | caller_pc |
+ // ----------- +=============+ <--- caller_sp == pp
+ // . ^
+ // . | try-handler, fake, not GC'ed
+ // . v
+ // +-------------+ <---
+ // -2 | next top pp |
+ // +-------------+
+ // -1 | next top fp |
+ // +-------------+ <--- fp
+ // | r4 | r4-r9 holding non-JS values must be preserved
+ // +-------------+
+ // J | r5 | before being initialized not to confuse GC
+ // S +-------------+
+ // | r6 |
+ // +-------------+
+ // e | r7 |
+ // n +-------------+
+ // t | r8 |
+ // r +-------------+
+ // y [ | r9 | ] only if r9 available
+ // +-------------+
+ // | r10 |
+ // f +-------------+
+ // r | r11 |
+ // a +-------------+
+ // m | caller_sp |
+ // e +-------------+
+ // | caller_pc |
+ // +-------------+ <--- caller_sp
+ // | argv | passed on stack from C code
+ // +-------------+
+ // | |
+ // higher | |
+ // addresses | C frame |
+
+
+ // The first 4 args are passed from C in r0-r3 and are not spilled on entry:
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // [sp+0]: argv
+
+
+ // C entry frames: When calling from JS to C, we construct one extra
+ // frame:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // | C frame |
+ // | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer)
+ // | |
+ // | parameters | (first 4 args are passed in r0-r3)
+ // | |
+ // +-------------+ <--- fp (frame pointer)
+ // C 0 | r4 | r4-r7, r9 are potentially holding JS locals
+ // +-------------+
+ // 1 | r5 | and must be traversed by the GC for proper
+ // e +-------------+
+ // n 2 | r6 | relocation
+ // t +-------------+
+ // r 3 | r7 |
+ // y +-------------+
+ // [ 4 | r9 | ] only if r9 available
+ // +-------------+
+ // f 4/5 | caller_fp |
+ // r +-------------+
+ // a 5/6 | sp_on_exit | (pp)
+ // m +-------------+
+ // e 6/7 | caller_pc |
+ // +-------------+ <--- caller_sp (incl. parameters)
+ // 7/8 | |
+ // | parameters |
+ // | |
+ // +- - - - - - -+ <---
+ // -2 | parameter0 | ^
+ // +-------------+ | kParam0Offset
+ // -1 | receiver | v
+ // ----------- +=============+ <--- pp (parameter pointer, r10)
+ // 0 | function |
+ // +-------------+
+ // | |
+ // |caller-saved |
+ // | regs |
+ // | |
+ // +-------------+
+ // | |
+ // | caller |
+ // | expressions |
+ // | |
+ // higher | |
+ // addresses | JS frame |
+
+
+} } // namespace v8::internal
+
+#endif // V8_FRAMES_ARM_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+
+namespace v8 { namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
+ }
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute the stack pointer.
+ Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+ // Fill in the state.
+ state->fp = fp;
+ state->sp = sp;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ // Determine frame type.
+ if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+ return EXIT_DEBUG;
+ } else {
+ return EXIT;
+ }
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // Exit frames on IA-32 do not contain any pointers. The arguments
+ // are traversed as part of the expression stack of the calling
+ // frame.
+}
+
+
+void ExitFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
+ // Do nothing.
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC) {
+ // The arguments for cooked frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when the stack is cooked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+RegList JavaScriptFrame::FindCalleeSavedRegisters() const {
+ return 0;
+}
+
+
+void JavaScriptFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
+ // Do nothing.
+}
+
+
+Code* JavaScriptFrame::FindCode() const {
+ JSFunction* function = JSFunction::cast(this->function());
+ return function->shared()->code();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAMES_IA32_H_
+#define V8_FRAMES_IA32_H_
+
+namespace v8 { namespace internal {
+
+
+// Register lists
+// Note that the bit values must match those used in actual instruction encoding
+static const int kNumRegs = 8;
+
+
+// Caller-saved registers
+static const RegList kJSCallerSaved =
+ 1 << 0 | // eax
+ 1 << 1 | // ecx
+ 1 << 2 | // edx
+ 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
+ 1 << 7; // edi - callee function
+
+static const int kNumJSCallerSaved = 5;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Callee-saved registers available for variable allocation in JavaScript code
+static const RegList kJSCalleeSaved = 0;
+
+static const int kNumJSCalleeSaved = 0;
+
+
+// ----------------------------------------------------
+
+
+class StackHandlerConstants : public AllStatic {
+ public:
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kPPOffset = 1 * kPointerSize;
+ static const int kFPOffset = 2 * kPointerSize;
+
+ // TODO(1233780): Get rid of the code slot in stack handlers.
+ static const int kCodeOffset = 3 * kPointerSize;
+
+ static const int kStateOffset = 4 * kPointerSize;
+ static const int kPCOffset = 5 * kPointerSize;
+
+ static const int kAddressDisplacement = -1 * kPointerSize;
+ static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset = -6 * kPointerSize;
+
+ static const int kFunctionArgOffset = +3 * kPointerSize;
+ static const int kReceiverArgOffset = +4 * kPointerSize;
+ static const int kArgcOffset = +5 * kPointerSize;
+ static const int kArgvOffset = +6 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kDebugMarkOffset = -3 * kPointerSize;
+ static const int kSPOffset = -2 * kPointerSize;
+
+ // Let the parameters pointer for exit frames point just below the
+ // frame structure on the stack (frame pointer and return address).
+ static const int kPPDisplacement = +2 * kPointerSize;
+
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerSPOffset = +2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // PP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ Object* result = Memory::Object_at(fp() + offset);
+ ASSERT(result->IsJSFunction());
+ return result;
+}
+
+
+Object** StackFrameIterator::register_buffer() const {
+ ASSERT(kNumJSCalleeSaved == 0);
+ return NULL;
+}
+
+// ----------------------------------------------------
+
+
+
+
+ // C Entry frames:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // +-------------+
+ // | entry_pc |
+ // +-------------+ <--+ entry_sp
+ // . |
+ // . |
+ // . |
+ // +-------------+ |
+ // -3 | entry_sp --+----+
+ // e +-------------+
+ // n -2 | C function |
+ // t +-------------+
+ // r -1 | caller_pp |
+ // y +-------------+ <--- fp (frame pointer, ebp)
+ // 0 | caller_fp |
+ // f +-------------+
+ // r 1 | caller_pc |
+ // a +-------------+ <--- caller_sp (stack pointer, esp)
+ // m 2 | |
+ // e | arguments |
+ // | |
+ // +- - - - - - -+
+ // | argument0 |
+ // +=============+
+ // | |
+ // | caller |
+ // higher | expressions |
+ // addresses | |
+
+
+ // Proper JS frames:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer, esp)
+ // | function |
+ // +-------------+
+ // | |
+ // | expressions |
+ // | |
+ // +-------------+
+ // a | |
+ // c | locals |
+ // t | |
+ // i +- - - - - - -+ <---
+ // v -4 | local0 | ^
+ // a +-------------+ |
+ // t -3 | code | |
+ // i +-------------+ |
+ // o -2 | context | | kLocal0Offset
+ // n +-------------+ |
+ // -1 | caller_pp | v
+ // f +-------------+ <--- fp (frame pointer, ebp)
+ // r 0 | caller_fp |
+ // a +-------------+
+ // m 1 | caller_pc |
+ // e +-------------+ <--- caller_sp (incl. parameters)
+ // 2 | |
+ // | parameters |
+ // | |
+ // +- - - - - - -+ <---
+ // -2 | parameter0 | ^
+ // +-------------+ | kParam0Offset
+ // -1 | receiver | v
+ // ----------- +=============+ <--- pp (parameter pointer, edi)
+ // 0 | function |
+ // +-------------+
+ // | |
+ // | caller |
+ // higher | expressions |
+ // addresses | |
+
+
+ // JS entry frames: When calling from C to JS, we construct two extra
+ // frames: An entry frame (C) and a trampoline frame (JS). The
+ // following pictures shows the two frames:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer, esp)
+ // | |
+ // | parameters |
+ // t | |
+ // r +- - - - - - -+
+ // a | parameter0 |
+ // m +-------------+
+ // p | receiver |
+ // o +-------------+ <---
+ // l | function | ^
+ // i +-------------+ |
+ // n -3 | code | | kLocal0Offset
+ // e +-------------+
+ // -2 | NULL | context is always NULL
+ // +-------------+
+ // f -1 | NULL | caller pp is always NULL for entry frames
+ // r +-------------+ <--- fp (frame pointer, ebp)
+ // a 0 | caller fp |
+ // m +-------------+
+ // e 1 | caller pc |
+ // +-------------+ <--- caller_sp (incl. parameters)
+ // | 0 |
+ // ----------- +=============+ <--- pp (parameter pointer, edi)
+ // | 0 |
+ // +-------------+ <---
+ // . ^
+ // . | try-handler (HandlerOffsets::kSize)
+ // . v
+ // +-------------+ <---
+ // -5 | next top pp |
+ // +-------------+
+ // e -4 | next top fp |
+ // n +-------------+ <---
+ // t -3 | ebx | ^
+ // r +-------------+ |
+ // y -2 | esi | | callee-saved registers
+ // +-------------+ |
+ // -1 | edi | v
+ // f +-------------+ <--- fp
+ // r 0 | caller fp |
+ // a +-------------+ pp == NULL (parameter pointer)
+ // m 1 | caller pc |
+ // e +-------------+ <--- caller sp
+ // 2 | code entry | ^
+ // +-------------+ |
+ // 3 | function | |
+ // +-------------+ | arguments passed from C code
+ // 4 | receiver | |
+ // +-------------+ |
+ // 5 | argc | |
+ // +-------------+ |
+ // 6 | argv | v
+ // +-------------+ <---
+ // | |
+ // higher | |
+ // addresses | |
+
+
+} } // namespace v8::internal
+
+#endif // V8_FRAMES_IA32_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAMES_INL_H_
+#define V8_FRAMES_INL_H_
+
+#include "frames.h"
+#if defined(ARM) || defined (__arm__) || defined(__thumb__)
+#include "frames-arm.h"
+#else
+#include "frames-ia32.h"
+#endif
+
+
+namespace v8 { namespace internal {
+
+
+inline Address StackHandler::address() const {
+ // NOTE: There's an obvious problem with the address of the NULL
+ // stack handler. Right now, it benefits us that the subtraction
+ // leads to a very high address (above everything else on the
+ // stack), but maybe we should stop relying on it?
+ const int displacement = StackHandlerConstants::kAddressDisplacement;
+ Address address = reinterpret_cast<Address>(const_cast<StackHandler*>(this));
+ return address + displacement;
+}
+
+
+inline StackHandler* StackHandler::next() const {
+ const int offset = StackHandlerConstants::kNextOffset;
+ return FromAddress(Memory::Address_at(address() + offset));
+}
+
+
+inline bool StackHandler::includes(Address address) const {
+ Address start = this->address();
+ Address end = start + StackHandlerConstants::kSize;
+ return start <= address && address <= end;
+}
+
+
+inline void StackHandler::Iterate(ObjectVisitor* v) const {
+ // Stack handlers do not contain any pointers that need to be
+ // traversed. The only field that have to worry about is the code
+ // field which is unused and should always be uninitialized.
+#ifdef DEBUG
+ const int offset = StackHandlerConstants::kCodeOffset;
+ Object* code = Memory::Object_at(address() + offset);
+ ASSERT(Smi::cast(code)->value() == StackHandler::kCodeNotPresent);
+#endif
+}
+
+
+inline StackHandler* StackHandler::FromAddress(Address address) {
+ return reinterpret_cast<StackHandler*>(address);
+}
+
+
+inline StackHandler::State StackHandler::state() const {
+ const int offset = StackHandlerConstants::kStateOffset;
+ return static_cast<State>(Memory::int_at(address() + offset));
+}
+
+
+inline Address StackHandler::pc() const {
+ const int offset = StackHandlerConstants::kPCOffset;
+ return Memory::Address_at(address() + offset);
+}
+
+
+inline void StackHandler::set_pc(Address value) {
+ const int offset = StackHandlerConstants::kPCOffset;
+ Memory::Address_at(address() + offset) = value;
+}
+
+
+inline StackHandler* StackFrame::top_handler() const {
+ return iterator_->handler();
+}
+
+
+inline Object** StackFrame::top_register_buffer() const {
+ return iterator_->register_buffer();
+}
+
+
+inline Object* StandardFrame::GetExpression(int index) const {
+ return Memory::Object_at(GetExpressionAddress(index));
+}
+
+
+inline void StandardFrame::SetExpression(int index, Object* value) {
+ Memory::Object_at(GetExpressionAddress(index)) = value;
+}
+
+
+inline Object* StandardFrame::context() const {
+ const int offset = StandardFrameConstants::kContextOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline Address StandardFrame::caller_sp() const {
+ return pp();
+}
+
+
+inline Address StandardFrame::caller_fp() const {
+ return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
+}
+
+
+inline Address StandardFrame::caller_pc() const {
+ return Memory::Address_at(ComputePCAddress(fp()));
+}
+
+
+inline Address StandardFrame::ComputePCAddress(Address fp) {
+ return fp + StandardFrameConstants::kCallerPCOffset;
+}
+
+
+inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
+ int context = Memory::int_at(fp + StandardFrameConstants::kContextOffset);
+ return context == ArgumentsAdaptorFrame::SENTINEL;
+}
+
+
+inline bool StandardFrame::IsConstructTrampolineFrame(Address pc) {
+ return Builtins::builtin(Builtins::JSConstructCall)->contains(pc);
+}
+
+
+inline Object* JavaScriptFrame::receiver() const {
+ const int offset = JavaScriptFrameConstants::kReceiverOffset;
+ return Memory::Object_at(pp() + offset);
+}
+
+
+inline void JavaScriptFrame::set_receiver(Object* value) {
+ const int offset = JavaScriptFrameConstants::kReceiverOffset;
+ Memory::Object_at(pp() + offset) = value;
+}
+
+
+inline bool JavaScriptFrame::has_adapted_arguments() const {
+ return IsArgumentsAdaptorFrame(caller_fp());
+}
+
+
+inline bool InternalFrame::is_construct_trampoline() const {
+ // TODO(1233795): This doesn't work when the stack frames have been
+ // cooked. We need to find another way of identifying construct
+ // trampoline frames possibly by manipulating the context field like
+ // we do for argument adaptor frames.
+ return IsConstructTrampolineFrame(pc());
+}
+
+
+inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
+ // TODO(1233797): The frame hierarchy needs to change. It's
+ // problematic that we can't use the safe-cast operator to cast to
+ // the JavaScript frame type, because we may encounter arguments
+ // adaptor frames.
+ StackFrame* frame = iterator_.frame();
+ ASSERT(frame->is_java_script() || frame->is_arguments_adaptor());
+ return static_cast<JavaScriptFrame*>(frame);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_FRAMES_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "scopeinfo.h"
+#include "string-stream.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 { namespace internal {
+
+
+DEFINE_int(max_stack_trace_source_length, 300,
+ "maximum length of function source code printed in a stack trace.");
+
+
+// -------------------------------------------------------------------------
+
+
+// Iterator that supports traversing the stack handlers of a
+// particular frame. Needs to know the top of the handler chain.
+class StackHandlerIterator BASE_EMBEDDED {
+ public:
+ StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
+ : limit_(frame->fp()), handler_(handler) {
+ // Make sure the handler has already been unwound to this frame.
+ ASSERT(frame->sp() <= handler->address());
+ }
+
+ StackHandler* handler() const { return handler_; }
+
+ bool done() { return handler_->address() > limit_; }
+ void Advance() {
+ ASSERT(!done());
+ handler_ = handler_->next();
+ }
+
+ private:
+ const Address limit_;
+ StackHandler* handler_;
+};
+
+
+// -------------------------------------------------------------------------
+
+
+#define INITIALIZE_SINGLETON(type, field) field##_(this),
+StackFrameIterator::StackFrameIterator()
+ : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+ frame_(NULL), handler_(NULL), thread(Top::GetCurrentThread()) {
+ Reset();
+}
+StackFrameIterator::StackFrameIterator(ThreadLocalTop* t)
+ : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+ frame_(NULL), handler_(NULL), thread(t) {
+ Reset();
+}
+#undef INITIALIZE_SINGLETON
+
+
+void StackFrameIterator::Advance() {
+ ASSERT(!done());
+ // Compute the state of the calling frame before restoring
+ // callee-saved registers and unwinding handlers. This allows the
+ // frame code that computes the caller state to access the top
+ // handler and the value of any callee-saved register if needed.
+ StackFrame::State state;
+ StackFrame::Type type = frame_->GetCallerState(&state);
+
+ // Restore any callee-saved registers to the register buffer. Avoid
+ // the virtual call if the platform doesn't have any callee-saved
+ // registers.
+ if (kNumJSCalleeSaved > 0) {
+ frame_->RestoreCalleeSavedRegisters(register_buffer());
+ }
+
+ // Unwind handlers corresponding to the current frame.
+ StackHandlerIterator it(frame_, handler_);
+ while (!it.done()) it.Advance();
+ handler_ = it.handler();
+
+ // Advance to the calling frame.
+ frame_ = SingletonFor(type, &state);
+
+ // When we're done iterating over the stack frames, the handler
+ // chain must have been completely unwound.
+ ASSERT(!done() || handler_ == NULL);
+}
+
+
+void StackFrameIterator::Reset() {
+ Address fp = Top::c_entry_fp(thread);
+ StackFrame::State state;
+ StackFrame::Type type = ExitFrame::GetStateForFramePointer(fp, &state);
+ frame_ = SingletonFor(type, &state);
+ handler_ = StackHandler::FromAddress(Top::handler(thread));
+ // Zap the register buffer in debug mode.
+ if (kDebug) {
+ Object** buffer = register_buffer();
+ for (int i = 0; i < kNumJSCalleeSaved; i++) {
+ buffer[i] = reinterpret_cast<Object*>(kZapValue);
+ }
+ }
+}
+
+
+Object** StackFrameIterator::RestoreCalleeSavedForTopHandler(Object** buffer) {
+ ASSERT(kNumJSCalleeSaved > 0);
+ // Traverse the frames until we find the frame containing the top
+ // handler. Such a frame is guaranteed to always exists by the
+ // callers of this function.
+ for (StackFrameIterator it; true; it.Advance()) {
+ StackHandlerIterator handlers(it.frame(), it.handler());
+ if (!handlers.done()) {
+ memcpy(buffer, it.register_buffer(), kNumJSCalleeSaved * kPointerSize);
+ return buffer;
+ }
+ }
+}
+
+
+StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
+ StackFrame::State* state) {
+#define FRAME_TYPE_CASE(type, field) \
+ case StackFrame::type: result = &field##_; break;
+
+ StackFrame* result = NULL;
+ switch (type) {
+ case StackFrame::NONE: return NULL;
+ STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
+ default: break;
+ }
+ ASSERT(result != NULL);
+ result->state_ = *state;
+ return result;
+
+#undef FRAME_TYPE_CASE
+}
+
+
+// -------------------------------------------------------------------------
+
+
+JavaScriptFrameIterator::JavaScriptFrameIterator(StackFrame::Id id) {
+ while (true) {
+ Advance();
+ if (frame()->id() == id) return;
+ }
+}
+
+
+void JavaScriptFrameIterator::Advance() {
+ do {
+ iterator_.Advance();
+ } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+}
+
+
+void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
+ if (!frame()->has_adapted_arguments()) return;
+ iterator_.Advance();
+ ASSERT(iterator_.frame()->is_arguments_adaptor());
+}
+
+
+void JavaScriptFrameIterator::Reset() {
+ iterator_.Reset();
+ Advance();
+}
+
+
+// -------------------------------------------------------------------------
+
+
+void StackHandler::Cook(Code* code) {
+ ASSERT(code->contains(pc()));
+ set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
+}
+
+
+void StackHandler::Uncook(Code* code) {
+ set_pc(code->instruction_start() + OffsetFrom(pc()));
+ ASSERT(code->contains(pc()));
+}
+
+
+// -------------------------------------------------------------------------
+
+
+bool StackFrame::HasHandler() const {
+ StackHandlerIterator it(this, top_handler());
+ return !it.done();
+}
+
+
+void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
+ ASSERT(!thread->stack_is_cooked());
+ for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+ it.frame()->Cook();
+ }
+ thread->set_stack_is_cooked(true);
+}
+
+
+void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
+ ASSERT(thread->stack_is_cooked());
+ for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+ it.frame()->Uncook();
+ }
+ thread->set_stack_is_cooked(false);
+}
+
+
+void StackFrame::Cook() {
+ Code* code = FindCode();
+ for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+ it.handler()->Cook(code);
+ }
+ ASSERT(code->contains(pc()));
+ set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
+}
+
+
+void StackFrame::Uncook() {
+ Code* code = FindCode();
+ for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+ it.handler()->Uncook(code);
+ }
+ set_pc(code->instruction_start() + OffsetFrom(pc()));
+ ASSERT(code->contains(pc()));
+}
+
+
+Code* EntryFrame::FindCode() const {
+ return Heap::js_entry_code();
+}
+
+
+StackFrame::Type EntryFrame::GetCallerState(State* state) const {
+ const int offset = EntryFrameConstants::kCallerFPOffset;
+ Address fp = Memory::Address_at(this->fp() + offset);
+ return ExitFrame::GetStateForFramePointer(fp, state);
+}
+
+
+Code* EntryConstructFrame::FindCode() const {
+ return Heap::js_construct_entry_code();
+}
+
+
+Code* ExitFrame::FindCode() const {
+ return Heap::c_entry_code();
+}
+
+
+StackFrame::Type ExitFrame::GetCallerState(State* state) const {
+ // Setup the caller state.
+ state->sp = pp();
+ state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
+#ifdef USE_OLD_CALLING_CONVENTIONS
+ state->pp = Memory::Address_at(fp() + ExitFrameConstants::kCallerPPOffset);
+#endif
+ state->pc_address
+ = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
+ return ComputeType(state);
+}
+
+
+Address ExitFrame::GetCallerStackPointer() const {
+ return fp() + ExitFrameConstants::kPPDisplacement;
+}
+
+
+Code* ExitDebugFrame::FindCode() const {
+ return Heap::c_entry_debug_break_code();
+}
+
+
+RegList ExitFrame::FindCalleeSavedRegisters() const {
+ // Exit frames save all - if any - callee-saved registers.
+ return kJSCalleeSaved;
+}
+
+
+Address StandardFrame::GetExpressionAddress(int n) const {
+ ASSERT(0 <= n && n < ComputeExpressionsCount());
+ if (kNumJSCalleeSaved > 0 && n < kNumJSCalleeSaved) {
+ return reinterpret_cast<Address>(top_register_buffer() + n);
+ } else {
+ const int offset = StandardFrameConstants::kExpressionsOffset;
+ return fp() + offset - (n - kNumJSCalleeSaved) * kPointerSize;
+ }
+}
+
+
+int StandardFrame::ComputeExpressionsCount() const {
+ const int offset =
+ StandardFrameConstants::kExpressionsOffset + kPointerSize;
+ Address base = fp() + offset;
+ Address limit = sp();
+ ASSERT(base >= limit); // stack grows downwards
+ // Include register-allocated locals in number of expressions.
+ return (base - limit) / kPointerSize + kNumJSCalleeSaved;
+}
+
+
+StackFrame::Type StandardFrame::GetCallerState(State* state) const {
+ state->sp = caller_sp();
+ state->fp = caller_fp();
+#ifdef USE_OLD_CALLING_CONVENTIONS
+ state->pp = caller_pp();
+#endif
+ state->pc_address = reinterpret_cast<Address*>(ComputePCAddress(fp()));
+ return ComputeType(state);
+}
+
+
+bool StandardFrame::IsExpressionInsideHandler(int n) const {
+ Address address = GetExpressionAddress(n);
+ for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+ if (it.handler()->includes(address)) return true;
+ }
+ return false;
+}
+
+
+Object* JavaScriptFrame::GetParameter(int index) const {
+ ASSERT(index >= 0 && index < ComputeParametersCount());
+ const int offset = JavaScriptFrameConstants::kParam0Offset;
+ return Memory::Object_at(pp() + offset - (index * kPointerSize));
+}
+
+
+int JavaScriptFrame::ComputeParametersCount() const {
+ Address base = pp() + JavaScriptFrameConstants::kReceiverOffset;
+ Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
+ int result = (base - limit) / kPointerSize;
+ if (kNumJSCalleeSaved > 0) {
+ return result - NumRegs(FindCalleeSavedRegisters());
+ } else {
+ return result;
+ }
+}
+
+
+bool JavaScriptFrame::IsConstructor() const {
+ Address pc = has_adapted_arguments()
+ ? Memory::Address_at(ComputePCAddress(caller_fp()))
+ : caller_pc();
+ return IsConstructTrampolineFrame(pc);
+}
+
+
+Code* ArgumentsAdaptorFrame::FindCode() const {
+ return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
+}
+
+
+Code* InternalFrame::FindCode() const {
+ const int offset = InternalFrameConstants::kCodeOffset;
+ Object* code = Memory::Object_at(fp() + offset);
+ if (code == NULL) {
+ // The code object isn't set; find it and set it.
+ code = Heap::FindCodeObject(pc());
+ ASSERT(!code->IsFailure());
+ Memory::Object_at(fp() + offset) = code;
+ }
+ ASSERT(code != NULL);
+ return Code::cast(code);
+}
+
+
+void StackFrame::PrintIndex(StringStream* accumulator,
+ PrintMode mode,
+ int index) {
+ accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
+}
+
+
+void JavaScriptFrame::Print(StringStream* accumulator,
+ PrintMode mode,
+ int index) const {
+ HandleScope scope;
+ Object* receiver = this->receiver();
+ Object* function = this->function();
+
+ accumulator->PrintSecurityTokenIfChanged(function);
+ PrintIndex(accumulator, mode, index);
+ Code* code = NULL;
+ if (IsConstructor()) accumulator->Add("new ");
+ accumulator->PrintFunction(function, receiver, &code);
+ accumulator->Add("(this=%o", receiver);
+
+ // Get scope information for nicer output, if possible. If code is
+ // NULL, or doesn't contain scope info, info will return 0 for the
+ // number of parameters, stack slots, or context slots.
+ ScopeInfo<PreallocatedStorage> info(code);
+
+ // Print the parameters.
+ int parameters_count = ComputeParametersCount();
+ for (int i = 0; i < parameters_count; i++) {
+ accumulator->Add(",");
+ // If we have a name for the parameter we print it. Nameless
+ // parameters are either because we have more actual parameters
+ // than formal parameters or because we have no scope information.
+ if (i < info.number_of_parameters()) {
+ accumulator->PrintName(*info.parameter_name(i));
+ accumulator->Add("=");
+ }
+ accumulator->Add("%o", GetParameter(i));
+ }
+
+ accumulator->Add(")");
+ if (mode == OVERVIEW) {
+ accumulator->Add("\n");
+ return;
+ }
+ accumulator->Add(" {\n");
+
+ // Compute the number of locals and expression stack elements.
+ int stack_locals_count = info.number_of_stack_slots();
+ int heap_locals_count = info.number_of_context_slots();
+ int expressions_count = ComputeExpressionsCount();
+
+ // Print stack-allocated local variables.
+ if (stack_locals_count > 0) {
+ accumulator->Add(" // stack-allocated locals\n");
+ }
+ for (int i = 0; i < stack_locals_count; i++) {
+ accumulator->Add(" var ");
+ accumulator->PrintName(*info.stack_slot_name(i));
+ accumulator->Add(" = ");
+ if (i < expressions_count) {
+ accumulator->Add("%o", GetExpression(i));
+ } else {
+ accumulator->Add("// no expression found - inconsistent frame?");
+ }
+ accumulator->Add("\n");
+ }
+
+ // Try to get hold of the context of this frame.
+ Context* context = NULL;
+ if (this->context() != NULL && this->context()->IsContext()) {
+ context = Context::cast(this->context());
+ }
+
+ // Print heap-allocated local variables.
+ if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
+ accumulator->Add(" // heap-allocated locals\n");
+ }
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
+ accumulator->Add(" var ");
+ accumulator->PrintName(*info.context_slot_name(i));
+ accumulator->Add(" = ");
+ if (context != NULL) {
+ if (i < context->length()) {
+ accumulator->Add("%o", context->get(i));
+ } else {
+ accumulator->Add(
+ "// warning: missing context slot - inconsistent frame?");
+ }
+ } else {
+ accumulator->Add("// warning: no context found - inconsistent frame?");
+ }
+ accumulator->Add("\n");
+ }
+
+ // Print the expression stack.
+ int expressions_start = Max(stack_locals_count, kNumJSCalleeSaved);
+ if (expressions_start < expressions_count) {
+ accumulator->Add(" // expression stack (top to bottom)\n");
+ }
+ for (int i = expressions_count - 1; i >= expressions_start; i--) {
+ if (IsExpressionInsideHandler(i)) continue;
+ accumulator->Add(" [%02d] : %o\n", i, GetExpression(i));
+ }
+
+ // Print details about the function.
+ if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
+ SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
+ accumulator->Add("--------- s o u r c e c o d e ---------\n");
+ shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
+ accumulator->Add("\n-----------------------------------------\n");
+ }
+
+ accumulator->Add("}\n\n");
+}
+
+
+void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
+ PrintMode mode,
+ int index) const {
+ int actual = ComputeParametersCount();
+ int expected = -1;
+ Object* function = this->function();
+ if (function->IsJSFunction()) {
+ expected = JSFunction::cast(function)->shared()->formal_parameter_count();
+ }
+
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
+ if (mode == OVERVIEW) {
+ accumulator->Add("\n");
+ return;
+ }
+ accumulator->Add(" {\n");
+
+ // Print actual arguments.
+ if (actual > 0) accumulator->Add(" // actual arguments\n");
+ for (int i = 0; i < actual; i++) {
+ accumulator->Add(" [%02d] : %o", i, GetParameter(i));
+ if (expected != -1 && i >= expected) {
+ accumulator->Add(" // not passed to callee");
+ }
+ accumulator->Add("\n");
+ }
+
+ accumulator->Add("}\n\n");
+}
+
+
+void EntryFrame::Iterate(ObjectVisitor* v) const {
+ StackHandlerIterator it(this, top_handler());
+ ASSERT(!it.done());
+ StackHandler* handler = it.handler();
+ ASSERT(handler->is_entry());
+ handler->Iterate(v);
+ // Make sure that there's the entry frame does not contain more than
+ // one stack handler.
+ if (kDebug) {
+ it.Advance();
+ ASSERT(it.done());
+ }
+}
+
+
+void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
+ const int offset = StandardFrameConstants::kContextOffset;
+ Object** base = &Memory::Object_at(sp());
+ Object** limit = &Memory::Object_at(fp() + offset) + 1;
+ for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+ StackHandler* handler = it.handler();
+ // Traverse pointers down to - but not including - the next
+ // handler in the handler chain. Update the base to skip the
+ // handler and allow the handler to traverse its own pointers.
+ const Address address = handler->address();
+ v->VisitPointers(base, reinterpret_cast<Object**>(address));
+ base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
+ // Traverse the pointers in the handler itself.
+ handler->Iterate(v);
+ }
+ v->VisitPointers(base, limit);
+}
+
+
+void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
+ IterateExpressions(v);
+
+ // Traverse callee-saved registers, receiver, and parameters.
+ const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
+ const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
+ Object** base = &Memory::Object_at(fp() + kBaseOffset);
+ Object** limit = &Memory::Object_at(pp() + kLimitOffset) + 1;
+ v->VisitPointers(base, limit);
+}
+
+
+void InternalFrame::Iterate(ObjectVisitor* v) const {
+ // Internal frames only have object pointers on the expression stack
+ // as they never have any arguments.
+ IterateExpressions(v);
+}
+
+
+// -------------------------------------------------------------------------
+
+
+JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
+ ASSERT(n >= 0);
+ for (int i = 0; i <= n; i++) {
+ while (!iterator_.frame()->is_java_script()) iterator_.Advance();
+ if (i == n) return JavaScriptFrame::cast(iterator_.frame());
+ iterator_.Advance();
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+// -------------------------------------------------------------------------
+
+
+int NumRegs(RegList reglist) {
+ int n = 0;
+ while (reglist != 0) {
+ n++;
+ reglist &= reglist - 1; // clear one bit
+ }
+ return n;
+}
+
+
+int JSCallerSavedCode(int n) {
+ static int reg_code[kNumJSCallerSaved];
+ static bool initialized = false;
+ if (!initialized) {
+ initialized = true;
+ int i = 0;
+ for (int r = 0; r < kNumRegs; r++)
+ if ((kJSCallerSaved & (1 << r)) != 0)
+ reg_code[i++] = r;
+
+ ASSERT(i == kNumJSCallerSaved);
+ }
+ ASSERT(0 <= n && n < kNumJSCallerSaved);
+ return reg_code[n];
+}
+
+
+int JSCalleeSavedCode(int n) {
+ static int reg_code[kNumJSCalleeSaved + 1]; // avoid zero-size array error
+ static bool initialized = false;
+ if (!initialized) {
+ initialized = true;
+ int i = 0;
+ for (int r = 0; r < kNumRegs; r++)
+ if ((kJSCalleeSaved & (1 << r)) != 0)
+ reg_code[i++] = r;
+
+ ASSERT(i == kNumJSCalleeSaved);
+ }
+ ASSERT(0 <= n && n < kNumJSCalleeSaved);
+ return reg_code[n];
+}
+
+
+RegList JSCalleeSavedList(int n) {
+ // avoid zero-size array error
+ static RegList reg_list[kNumJSCalleeSaved + 1];
+ static bool initialized = false;
+ if (!initialized) {
+ initialized = true;
+ reg_list[0] = 0;
+ for (int i = 0; i < kNumJSCalleeSaved; i++)
+ reg_list[i+1] = reg_list[i] + (1 << JSCalleeSavedCode(i));
+ }
+ ASSERT(0 <= n && n <= kNumJSCalleeSaved);
+ return reg_list[n];
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAMES_H_
+#define V8_FRAMES_H_
+
+namespace v8 { namespace internal {
+
+typedef uint32_t RegList;
+
+// Get the number of registers in a given register list.
+int NumRegs(RegList list);
+
+// Return the code of the n-th saved register available to JavaScript.
+int JSCallerSavedCode(int n);
+int JSCalleeSavedCode(int n);
+
+// Return the list of the first n callee-saved registers available to
+// JavaScript.
+RegList JSCalleeSavedList(int n);
+
+
+// Forward declarations.
+class StackFrameIterator;
+class Top;
+class ThreadLocalTop;
+
+
+class StackHandler BASE_EMBEDDED {
+ public:
+ enum State {
+ ENTRY,
+ TRY_CATCH,
+ TRY_FINALLY
+ };
+
+ // Get the address of this stack handler.
+ inline Address address() const;
+
+ // Get the next stack handler in the chain.
+ inline StackHandler* next() const;
+
+ // Tells whether the given address is inside this handler.
+ inline bool includes(Address address) const;
+
+ // Garbage collection support.
+ inline void Iterate(ObjectVisitor* v) const;
+
+ // Conversion support.
+ static inline StackHandler* FromAddress(Address address);
+
+ // Testers
+ bool is_entry() { return state() == ENTRY; }
+ bool is_try_catch() { return state() == TRY_CATCH; }
+ bool is_try_finally() { return state() == TRY_FINALLY; }
+
+ // Garbage collection support.
+ void Cook(Code* code);
+ void Uncook(Code* code);
+
+ // TODO(1233780): Get rid of the code slot in stack handlers.
+ static const int kCodeNotPresent = 0;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
+
+ // Accessors.
+ inline State state() const;
+
+ inline Address pc() const;
+ inline void set_pc(Address value);
+};
+
+
+#define STACK_FRAME_TYPE_LIST(V) \
+ V(ENTRY, EntryFrame) \
+ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
+ V(EXIT, ExitFrame) \
+ V(EXIT_DEBUG, ExitDebugFrame) \
+ V(JAVA_SCRIPT, JavaScriptFrame) \
+ V(INTERNAL, InternalFrame) \
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+
+
+// Abstract base class for all stack frames.
+class StackFrame BASE_EMBEDDED {
+ public:
+#define DECLARE_TYPE(type, ignore) type,
+ enum Type {
+ NONE = 0,
+ STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
+ NUMBER_OF_TYPES
+ };
+#undef DECLARE_TYPE
+
+ // Opaque data type for identifying stack frames. Used extensively
+ // by the debugger.
+ enum Id { NO_ID = 0 };
+
+ // Type testers.
+ bool is_entry() const { return type() == ENTRY; }
+ bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
+ bool is_exit() const { return type() == EXIT; }
+ bool is_exit_debug() const { return type() == EXIT_DEBUG; }
+ bool is_java_script() const { return type() == JAVA_SCRIPT; }
+ bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
+ bool is_internal() const { return type() == INTERNAL; }
+ virtual bool is_standard() const { return false; }
+
+ // Accessors.
+ Address sp() const { return state_.sp; }
+ Address fp() const { return state_.fp; }
+ Address pp() const { return GetCallerStackPointer(); }
+
+ Address pc() const { return *pc_address(); }
+ void set_pc(Address pc) { *pc_address() = pc; }
+
+ Address* pc_address() const { return state_.pc_address; }
+
+ // Get the id of this stack frame.
+ Id id() const { return static_cast<Id>(OffsetFrom(pp())); }
+
+ // Checks if this frame includes any stack handlers.
+ bool HasHandler() const;
+
+ // Get the type of this frame.
+ virtual Type type() const = 0;
+
+ // Get the code associated with this frame.
+ virtual Code* FindCode() const = 0;
+
+ // Garbage collection support.
+ static void CookFramesForThread(ThreadLocalTop* thread);
+ static void UncookFramesForThread(ThreadLocalTop* thread);
+
+ virtual void Iterate(ObjectVisitor* v) const { }
+
+ // Printing support.
+ enum PrintMode { OVERVIEW, DETAILS };
+ virtual void Print(StringStream* accumulator,
+ PrintMode mode,
+ int index) const { }
+
+ protected:
+ struct State {
+ Address sp;
+ Address fp;
+#ifdef USE_OLD_CALLING_CONVENTIONS
+ Address pp;
+#endif
+ Address* pc_address;
+ };
+
+ explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
+ virtual ~StackFrame() { }
+
+ // Compute the stack pointer for the calling frame.
+ virtual Address GetCallerStackPointer() const = 0;
+
+ // Printing support.
+ static void PrintIndex(StringStream* accumulator,
+ PrintMode mode,
+ int index);
+
+ // Find callee-saved registers for this frame.
+ virtual RegList FindCalleeSavedRegisters() const { return 0; }
+
+ // Restore state of callee-saved registers to the provided buffer.
+ virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const { }
+
+ // Get the top handler from the current stack iterator.
+ inline StackHandler* top_handler() const;
+ inline Object** top_register_buffer() const;
+
+ // Compute the stack frame type for the given state.
+ static Type ComputeType(State* state);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrame);
+
+ protected:
+ // TODO(1233523): Once the ARM code uses the new calling
+ // conventions, we should be able to make state_ private again.
+ State state_;
+
+ private:
+ const StackFrameIterator* iterator_;
+
+ // Get the type and the state of the calling frame.
+ virtual Type GetCallerState(State* state) const = 0;
+
+ // Cooking/uncooking support.
+ void Cook();
+ void Uncook();
+
+ friend class StackFrameIterator;
+ friend class StackHandlerIterator;
+};
+
+
+// Entry frames are used to enter JavaScript execution from C.
+class EntryFrame: public StackFrame {
+ public:
+ virtual Type type() const { return ENTRY; }
+
+ virtual Code* FindCode() const;
+
+ // Garbage collection support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ static EntryFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_entry());
+ return static_cast<EntryFrame*>(frame);
+ }
+
+ protected:
+ explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+
+ // The caller stack pointer for entry frames is always zero. The
+ // real information about the caller frame is available through the
+ // link to the top exit frame.
+ virtual Address GetCallerStackPointer() const { return 0; }
+
+ private:
+ virtual Type GetCallerState(State* state) const;
+
+ friend class StackFrameIterator;
+};
+
+
+class EntryConstructFrame: public EntryFrame {
+ public:
+ virtual Type type() const { return ENTRY_CONSTRUCT; }
+
+ virtual Code* FindCode() const;
+
+ static EntryConstructFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_entry_construct());
+ return static_cast<EntryConstructFrame*>(frame);
+ }
+
+ protected:
+ explicit EntryConstructFrame(StackFrameIterator* iterator)
+ : EntryFrame(iterator) { }
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
+// Exit frames are used to exit JavaScript execution and go to C.
+class ExitFrame: public StackFrame {
+ public:
+ virtual Type type() const { return EXIT; }
+
+ virtual Code* FindCode() const;
+
+ // Garbage colletion support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ static ExitFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_exit());
+ return static_cast<ExitFrame*>(frame);
+ }
+
+ // Compute the state and type of an exit frame given a frame
+ // pointer. Used when constructing the first stack frame seen by an
+ // iterator and the frames following entry frames.
+ static Type GetStateForFramePointer(Address fp, State* state);
+
+ protected:
+ explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+
+ virtual Address GetCallerStackPointer() const;
+
+ virtual RegList FindCalleeSavedRegisters() const;
+ virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const;
+
+ private:
+ virtual Type GetCallerState(State* state) const;
+
+ friend class StackFrameIterator;
+};
+
+
+class ExitDebugFrame: public ExitFrame {
+ public:
+ virtual Type type() const { return EXIT_DEBUG; }
+
+ virtual Code* FindCode() const;
+
+ static ExitDebugFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_exit_debug());
+ return static_cast<ExitDebugFrame*>(frame);
+ }
+
+ protected:
+ explicit ExitDebugFrame(StackFrameIterator* iterator)
+ : ExitFrame(iterator) { }
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
+class StandardFrame: public StackFrame {
+ public:
+ // Testers.
+ virtual bool is_standard() const { return true; }
+
+ // Accessors.
+ inline Object* context() const;
+
+ // Access the expressions in the stack frame including locals.
+ inline Object* GetExpression(int index) const;
+ inline void SetExpression(int index, Object* value);
+ int ComputeExpressionsCount() const;
+
+ static StandardFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_standard());
+ return static_cast<StandardFrame*>(frame);
+ }
+
+ protected:
+ explicit StandardFrame(StackFrameIterator* iterator)
+ : StackFrame(iterator) { }
+
+ virtual Type GetCallerState(State* state) const;
+
+ // Accessors.
+ inline Address caller_sp() const;
+ inline Address caller_fp() const;
+#ifdef USE_OLD_CALLING_CONVENTIONS
+ inline Address caller_pp() const;
+#endif
+ inline Address caller_pc() const;
+
+ // Computes the address of the PC field in the standard frame given
+ // by the provided frame pointer.
+ static inline Address ComputePCAddress(Address fp);
+
+ // Iterate over expression stack including stack handlers, locals,
+ // and parts of the fixed part including context and code fields.
+ void IterateExpressions(ObjectVisitor* v) const;
+
+ // Returns the address of the n'th expression stack element.
+ Address GetExpressionAddress(int n) const;
+
+ // Determines if the n'th expression stack element is in a stack
+ // handler or not. Requires traversing all handlers in this frame.
+ bool IsExpressionInsideHandler(int n) const;
+
+ // Determines if the standard frame for the given frame pointer is
+ // an arguments adaptor frame.
+ static inline bool IsArgumentsAdaptorFrame(Address fp);
+
+ // Determines if the standard frame for the given program counter is
+ // a construct trampoline.
+ static inline bool IsConstructTrampolineFrame(Address pc);
+
+ private:
+ friend class StackFrame;
+};
+
+
+class JavaScriptFrame: public StandardFrame {
+ public:
+ virtual Type type() const { return JAVA_SCRIPT; }
+
+ // Accessors.
+ inline Object* function() const;
+ inline Object* receiver() const;
+ inline void set_receiver(Object* value);
+
+ // Access the parameters.
+ Object* GetParameter(int index) const;
+ int ComputeParametersCount() const;
+
+ // Temporary way of getting access to the number of parameters
+ // passed on the stack by the caller. Once argument adaptor frames
+ // has been introduced on ARM, this number will always match the
+ // computed parameters count.
+ int GetProvidedParametersCount() const;
+
+ // Check if this frame is a constructor frame invoked through
+ // 'new'. The operation may involve digging through a few stack
+ // frames to account for arguments adaptors.
+ bool IsConstructor() const;
+
+ // Check if this frame has "adapted" arguments in the sense that the
+ // actual passed arguments are available in an arguments adaptor
+ // frame below it on the stack.
+ inline bool has_adapted_arguments() const;
+
+ // Garbage colletion support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ // Printing support.
+ virtual void Print(StringStream* accumulator,
+ PrintMode mode,
+ int index) const;
+
+ // Determine the code for the frame.
+ virtual Code* FindCode() const;
+
+ static JavaScriptFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_java_script());
+ return static_cast<JavaScriptFrame*>(frame);
+ }
+
+ protected:
+ explicit JavaScriptFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) { }
+
+ virtual Address GetCallerStackPointer() const;
+
+ // Find the callee-saved registers for this JavaScript frame. This
+ // may require traversing the instruction stream and decoding
+ // certain instructions.
+ virtual RegList FindCalleeSavedRegisters() const;
+
+ // Restore callee-saved registers.
+ virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const;
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
+// Arguments adaptor frames are automatically inserted below
+// JavaScript frames when the actual number of parameters does not
+// match the formal number of parameters.
+class ArgumentsAdaptorFrame: public JavaScriptFrame {
+ public:
+ // This sentinel value is temporarily used to distinguish arguments
+ // adaptor frames from ordinary JavaScript frames. If a frame has
+ // the sentinel as its context, it is an arguments adaptor frame. It
+ // must be tagged as a small integer to avoid GC issues. Crud.
+ enum {
+ SENTINEL = (1 << kSmiTagSize) | kSmiTag
+ };
+
+ virtual Type type() const { return ARGUMENTS_ADAPTOR; }
+
+ // Determine the code for the frame.
+ virtual Code* FindCode() const;
+
+ static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_arguments_adaptor());
+ return static_cast<ArgumentsAdaptorFrame*>(frame);
+ }
+
+ // Printing support.
+ virtual void Print(StringStream* accumulator,
+ PrintMode mode,
+ int index) const;
+ protected:
+ explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
+ : JavaScriptFrame(iterator) { }
+
+ virtual Address GetCallerStackPointer() const;
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
+class InternalFrame: public StandardFrame {
+ public:
+ virtual Type type() const { return INTERNAL; }
+
+ // Returns if this frame is a special trampoline frame introduced by
+ // the construct trampoline. NOTE: We should consider introducing a
+ // special stack frame type for this.
+ inline bool is_construct_trampoline() const;
+
+ // Garbage colletion support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ // Determine the code for the frame.
+ virtual Code* FindCode() const;
+
+ static InternalFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_internal());
+ return static_cast<InternalFrame*>(frame);
+ }
+
+ protected:
+ explicit InternalFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) { }
+
+ virtual Address GetCallerStackPointer() const;
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
+class StackFrameIterator BASE_EMBEDDED {
+ public:
+ // An iterator that iterates over the current thread's stack.
+ StackFrameIterator();
+
+ // An iterator that iterates over a given thread's stack.
+ explicit StackFrameIterator(ThreadLocalTop* thread);
+
+ StackFrame* frame() const {
+ ASSERT(!done());
+ return frame_;
+ }
+
+ bool done() const { return frame_ == NULL; }
+ void Advance();
+
+ // Go back to the first frame.
+ void Reset();
+
+ // Computes the state of the callee-saved registers for the top
+ // stack handler structure. Used for restoring register state when
+ // unwinding due to thrown exceptions.
+ static Object** RestoreCalleeSavedForTopHandler(Object** buffer);
+
+ private:
+#define DECLARE_SINGLETON(ignore, type) type type##_;
+ STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
+#undef DECLARE_SINGLETON
+ StackFrame* frame_;
+ StackHandler* handler_;
+ ThreadLocalTop* thread;
+
+ StackHandler* handler() const {
+ ASSERT(!done());
+ return handler_;
+ }
+
+ // Get the type-specific frame singleton in a given state.
+ StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
+
+ // The register buffer contains the state of callee-saved registers
+ // for the current frame. It is computed as the stack frame
+ // iterators advances through stack frames.
+ inline Object** register_buffer() const;
+
+ friend class StackFrame;
+ DISALLOW_EVIL_CONSTRUCTORS(StackFrameIterator);
+};
+
+
+// Iterator that supports iterating through all JavaScript frames.
+class JavaScriptFrameIterator BASE_EMBEDDED {
+ public:
+ JavaScriptFrameIterator() { if (!done()) Advance(); }
+
+ explicit JavaScriptFrameIterator(ThreadLocalTop* thread) : iterator_(thread) {
+ if (!done()) Advance();
+ }
+
+ // Skip frames until the frame with the given id is reached.
+ explicit JavaScriptFrameIterator(StackFrame::Id id);
+
+ inline JavaScriptFrame* frame() const;
+
+ bool done() const { return iterator_.done(); }
+ void Advance();
+
+ // Advance to the frame holding the arguments for the current
+ // frame. This only affects the current frame if it has adapted
+ // arguments.
+ void AdvanceToArgumentsFrame();
+
+ // Go back to the first frame.
+ void Reset();
+
+ private:
+ StackFrameIterator iterator_;
+};
+
+
+class StackFrameLocator BASE_EMBEDDED {
+ public:
+ // Find the nth JavaScript frame on the stack. The caller must
+ // guarantee that such a frame exists.
+ JavaScriptFrame* FindJavaScriptFrame(int n);
+
+ private:
+ StackFrameIterator iterator_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FRAMES_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "global-handles.h"
+
+namespace v8 { namespace internal {
+
+class GlobalHandles::Node : public Malloced {
+ public:
+
+ void Initialize(Object* object) {
+ // Set the initial value of the handle.
+ object_ = object;
+ state_ = NORMAL;
+ parameter_or_next_free_.parameter = NULL;
+ callback_ = NULL;
+ }
+
+ explicit Node(Object* object) {
+ Initialize(object);
+ // Initialize link structure.
+ next_ = NULL;
+ }
+
+ ~Node() {
+ if (state_ != DESTROYED) Destroy();
+#ifdef DEBUG
+ // Zap the values for eager trapping.
+ object_ = NULL;
+ next_ = NULL;
+ parameter_or_next_free_.next_free = NULL;
+#endif
+ }
+
+ void Destroy() {
+ if (state_ == WEAK || IsNearDeath()) {
+ GlobalHandles::number_of_weak_handles_--;
+ if (object_->IsJSGlobalObject()) {
+ GlobalHandles::number_of_global_object_weak_handles_--;
+ }
+ }
+ state_ = DESTROYED;
+ }
+
+ // Accessors for next_.
+ Node* next() { return next_; }
+ void set_next(Node* value) { next_ = value; }
+ Node** next_addr() { return &next_; }
+
+ // Accessors for next free node in the free list.
+ Node* next_free() {
+ ASSERT(state_ == DESTROYED);
+ return parameter_or_next_free_.next_free;
+ }
+ void set_next_free(Node* value) {
+ ASSERT(state_ == DESTROYED);
+ parameter_or_next_free_.next_free = value;
+ }
+
+ // Returns a link from the handle.
+ static Node* FromLocation(Object** location) {
+ ASSERT(OFFSET_OF(Node, object_) == 0);
+ return reinterpret_cast<Node*>(location);
+ }
+
+ // Returns the handle.
+ Handle<Object> handle() { return Handle<Object>(&object_); }
+
+ // Make this handle weak.
+ void MakeWeak(void* parameter, WeakReferenceCallback callback) {
+ LOG(HandleEvent("GlobalHandle::MakeWeak", handle().location()));
+ if (state_ != WEAK && !IsNearDeath()) {
+ GlobalHandles::number_of_weak_handles_++;
+ if (object_->IsJSGlobalObject()) {
+ GlobalHandles::number_of_global_object_weak_handles_++;
+ }
+ }
+ state_ = WEAK;
+ set_parameter(parameter);
+ callback_ = callback;
+ }
+
+ void ClearWeakness() {
+ LOG(HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
+ if (state_ == WEAK || IsNearDeath()) {
+ GlobalHandles::number_of_weak_handles_--;
+ if (object_->IsJSGlobalObject()) {
+ GlobalHandles::number_of_global_object_weak_handles_--;
+ }
+ }
+ state_ = NORMAL;
+ set_parameter(NULL);
+ }
+
+ bool IsNearDeath() {
+ // Check for PENDING to ensure correct answer when processing callbacks.
+ return state_ == PENDING || state_ == NEAR_DEATH;
+ }
+
+ bool IsWeak() {
+ return state_ == WEAK;
+ }
+
+ // Returns the id for this weak handle.
+ void set_parameter(void* parameter) {
+ ASSERT(state_ != DESTROYED);
+ parameter_or_next_free_.parameter = parameter;
+ }
+ void* parameter() {
+ ASSERT(state_ != DESTROYED);
+ return parameter_or_next_free_.parameter;
+ }
+
+ // Returns the callback for this weak handle.
+ WeakReferenceCallback callback() { return callback_; }
+
+ void PostGarbageCollectionProcessing() {
+ if (state_ != Node::PENDING) return;
+ LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
+ void* par = parameter();
+ state_ = NEAR_DEATH;
+ set_parameter(NULL);
+ // The callback function is resolved as late as possible to preserve old
+ // behavior.
+ WeakReferenceCallback func = callback();
+ if (func != NULL) {
+ func(v8::Persistent<v8::Object>(ToApi<v8::Object>(handle())), par);
+ }
+ }
+
+ // Place the handle address first to avoid offset computation.
+ Object* object_; // Storage for object pointer.
+
+ // Transition diagram:
+ // NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, DESTROYED }
+ enum State {
+ NORMAL, // Normal global handle.
+ WEAK, // Flagged as weak but not yet finalized.
+ PENDING, // Has been recognized as only reachable by weak handles.
+ NEAR_DEATH, // Callback has informed the handle is near death.
+ DESTROYED
+ };
+ State state_;
+
+ private:
+ // Handle specific callback.
+ WeakReferenceCallback callback_;
+ // Provided data for callback. In DESTROYED state, this is used for
+ // the free list link.
+ union {
+ void* parameter;
+ Node* next_free;
+ } parameter_or_next_free_;
+
+ // Linkage for the list.
+ Node* next_;
+
+ public:
+ TRACK_MEMORY("GlobalHandles::Node")
+};
+
+
+Handle<Object> GlobalHandles::Create(Object* value) {
+ Counters::global_handles.Increment();
+ Node* result;
+ if (first_free() == NULL) {
+ // Allocate a new node.
+ result = new Node(value);
+ result->set_next(head());
+ set_head(result);
+ } else {
+ // Take the first node in the free list.
+ result = first_free();
+ set_first_free(result->next_free());
+ result->Initialize(value);
+ }
+ return result->handle();
+}
+
+
+void GlobalHandles::Destroy(Object** location) {
+ Counters::global_handles.Decrement();
+ if (location == NULL) return;
+ Node* node = Node::FromLocation(location);
+ node->Destroy();
+ // Link the destroyed.
+ node->set_next_free(first_free());
+ set_first_free(node);
+}
+
+
+void GlobalHandles::MakeWeak(Object** location, void* parameter,
+ WeakReferenceCallback callback) {
+ ASSERT(callback != NULL);
+ Node::FromLocation(location)->MakeWeak(parameter, callback);
+}
+
+
+void GlobalHandles::ClearWeakness(Object** location) {
+ Node::FromLocation(location)->ClearWeakness();
+}
+
+
+bool GlobalHandles::IsNearDeath(Object** location) {
+ return Node::FromLocation(location)->IsNearDeath();
+}
+
+
+bool GlobalHandles::IsWeak(Object** location) {
+ return Node::FromLocation(location)->IsWeak();
+}
+
+
+void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
+ // Traversal of GC roots in the global handle list that are marked as
+ // WEAK or PENDING.
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ if (current->state_ == Node::WEAK
+ || current->state_ == Node::PENDING
+ || current->state_ == Node::NEAR_DEATH) {
+ v->VisitPointer(¤t->object_);
+ }
+ }
+}
+
+
+void GlobalHandles::MarkWeakRoots(WeakSlotCallback f) {
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ if (current->state_ == Node::WEAK) {
+ if (f(¤t->object_)) {
+ current->state_ = Node::PENDING;
+ LOG(HandleEvent("GlobalHandle::Pending", current->handle().location()));
+ }
+ }
+ }
+}
+
+
+void GlobalHandles::PostGarbageCollectionProcessing() {
+ // Process weak global handle callbacks. This must be done after the
+ // GC is completely done, because the callbacks may invoke arbitrary
+ // API functions.
+ // At the same time deallocate all DESTORYED nodes
+ ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
+ Node** p = &head_;
+ while (*p != NULL) {
+ (*p)->PostGarbageCollectionProcessing();
+ if ((*p)->state_ == Node::DESTROYED) {
+ // Delete the link.
+ Node* node = *p;
+ *p = node->next(); // Update the link.
+ delete node;
+ } else {
+ p = (*p)->next_addr();
+ }
+ }
+ set_first_free(NULL);
+}
+
+
+void GlobalHandles::IterateRoots(ObjectVisitor* v) {
+ // Traversal of global handles marked as NORMAL or NEAR_DEATH.
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ if (current->state_ == Node::NORMAL) {
+ v->VisitPointer(¤t->object_);
+ }
+ }
+}
+
+void GlobalHandles::TearDown() {
+ // Delete all the nodes in the linked list.
+ Node* current = head_;
+ while (current != NULL) {
+ Node* n = current;
+ current = current->next();
+ delete n;
+ }
+ // Reset the head and free_list.
+ set_head(NULL);
+ set_first_free(NULL);
+}
+
+
+int GlobalHandles::number_of_weak_handles_ = 0;
+int GlobalHandles::number_of_global_object_weak_handles_ = 0;
+
+GlobalHandles::Node* GlobalHandles::head_ = NULL;
+GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
+
+#ifdef DEBUG
+
+void GlobalHandles::PrintStats() {
+ int total = 0;
+ int weak = 0;
+ int pending = 0;
+ int near_death = 0;
+ int destroyed = 0;
+
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ total++;
+ if (current->state_ == Node::WEAK) weak++;
+ if (current->state_ == Node::PENDING) pending++;
+ if (current->state_ == Node::NEAR_DEATH) near_death++;
+ if (current->state_ == Node::DESTROYED) destroyed++;
+ }
+
+ PrintF("Global Handle Statistics:\n");
+ PrintF(" allocated memory = %dB\n", sizeof(Node) * total);
+ PrintF(" # weak = %d\n", weak);
+ PrintF(" # pending = %d\n", pending);
+ PrintF(" # near_death = %d\n", near_death);
+ PrintF(" # destroyed = %d\n", destroyed);
+ PrintF(" # total = %d\n", total);
+}
+
+void GlobalHandles::Print() {
+ PrintF("Global handles:\n");
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ PrintF(" handle %p to %p (weak=%d)\n", current->handle().location(),
+ *current->handle(), current->state_ == Node::WEAK);
+ }
+}
+
+#endif
+
+List<ObjectGroup*> GlobalHandles::object_groups_(4);
+
+void GlobalHandles::AddToGroup(void* id, Object** handle) {
+ for (int i = 0; i < object_groups_.length(); i++) {
+ ObjectGroup* entry = object_groups_[i];
+ if (entry->id_ == id) {
+ entry->objects_.Add(handle);
+ return;
+ }
+ }
+
+ // not found
+ ObjectGroup* new_entry = new ObjectGroup(id);
+ new_entry->objects_.Add(handle);
+ object_groups_.Add(new_entry);
+}
+
+
+void GlobalHandles::RemoveObjectGroups() {
+ for (int i = 0; i< object_groups_.length(); i++) {
+ delete object_groups_[i];
+ }
+ object_groups_.Clear();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_GLOBAL_HANDLES_H_
+#define V8_GLOBAL_HANDLES_H_
+
+#include "list-inl.h"
+
+namespace v8 { namespace internal {
+
+// Structure for tracking global handles.
+// A single list keeps all the allocated global handles.
+// Destroyed handles stay in the list but is added to the free list.
+// At GC the destroyed global handles are removed from the free list
+// and deallocated.
+
+// Callback function on handling weak global handles.
+// typedef bool (*WeakSlotCallback)(Object** pointer);
+
+// An object group is indexed by an id. An object group is treated like
+// a single JS object: if one of object in the group is alive,
+// all objects in the same group are considered alive.
+// An object group is used to simulate object relationship in a DOM tree.
+class ObjectGroup : public Malloced {
+ public:
+ explicit ObjectGroup(void* id) : id_(id), objects_(4) {}
+
+ void* id_;
+ List<Object**> objects_;
+};
+
+
+class GlobalHandles : public AllStatic {
+ public:
+ // Creates a new global handle that is alive until Destroy is called.
+ static Handle<Object> Create(Object* value);
+
+ // Destroy a global handle.
+ static void Destroy(Object** location);
+
+ // Make the global handle weak and set the callback parameter for the
+ // handle. When the garbage collector recognizes that only weak global
+ // handles point to an object the handles are cleared and the callback
+ // function is invoked (for each handle) with the handle and corresponding
+ // parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
+ // reason is that Smi::FromInt(0) does not change during garage collection.
+ static void MakeWeak(Object** location,
+ void* parameter,
+ WeakReferenceCallback callback);
+
+ // Returns the current number of weak handles.
+ static int NumberOfWeakHandles() { return number_of_weak_handles_; }
+
+ // Returns the current number of weak handles to global objects.
+ // These handles are also included in NumberOfWeakHandles().
+ static int NumberOfGlobalObjectWeakHandles() {
+ return number_of_global_object_weak_handles_;
+ }
+
+ // Clear the weakness of a global handle.
+ static void ClearWeakness(Object** location);
+
+ // Tells whether global handle is near death.
+ static bool IsNearDeath(Object** location);
+
+ // Tells whether global handle is weak.
+ static bool IsWeak(Object** location);
+
+ // Process pending weak handles.
+ static void PostGarbageCollectionProcessing();
+
+ // Iterates over all handles.
+ static void IterateRoots(ObjectVisitor* v);
+
+ // Iterates over all weak roots in heap.
+ static void IterateWeakRoots(ObjectVisitor* v);
+
+ // Mark the weak pointers based on the callback.
+ static void MarkWeakRoots(WeakSlotCallback f);
+
+ // Add an object to a group indexed by an id.
+ // Should only used in GC callback function before a collection.
+ // All groups are destroyed after a mark-compact collection.
+ static void AddToGroup(void* id, Object** location);
+
+ // Returns the object groups.
+ static List<ObjectGroup*>& ObjectGroups() {
+ return object_groups_;
+ }
+
+ // Remove bags, this should only happen after GC.
+ static void RemoveObjectGroups();
+
+ // Tear down the global handle structure.
+ static void TearDown();
+
+#ifdef DEBUG
+ static void PrintStats();
+ static void Print();
+#endif
+ private:
+ // Internal node structure, one for each global handle.
+ class Node;
+
+ // Field always containing the number of weak and near-death handles.
+ static int number_of_weak_handles_;
+
+ // Field always containing the number of weak and near-death handles
+ // to global objects. These objects are also included in
+ // number_of_weak_handles_.
+ static int number_of_global_object_weak_handles_;
+
+ // Global handles are kept in a single linked list pointed to by head_.
+ static Node* head_;
+ static Node* head() { return head_; }
+ static void set_head(Node* value) { head_ = value; }
+
+ // Free list for DESTROYED global handles not yet deallocated.
+ static Node* first_free_;
+ static Node* first_free() { return first_free_; }
+ static void set_first_free(Node* value) { first_free_ = value; }
+
+ // A list of object groups.
+ static List<ObjectGroup*> object_groups_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_GLOBAL_HANDLES_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// -----------------------------------------------------------------------------
+// Types
+// Windows is missing the stdint.h header file. Instead we define standard
+// integer types for Windows here.
+
+#ifdef WIN32
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t;
+typedef unsigned short uint16_t;
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#else
+#include <stdint.h> // for intptr_t
+#endif
+
+
+// TODO(1233523): Get rid of this code that conditionally introduces a
+// macro to allow us to check for platforms that use the old
+// non-adapted arguments calling conventions.
+#if defined(ARM) || defined(__arm__) || defined(__thumb__)
+#define USE_OLD_CALLING_CONVENTIONS
+#endif
+
+namespace v8 { namespace internal {
+
+// Support for alternative bool type. This is only enabled if the code is
+// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
+// For instance, 'bool b = "false";' results in b == true! This is a hidden
+// source of bugs.
+// However, redefining the bool type does have some negative impact on some
+// platforms. It gives rise to compiler warnings (i.e. with
+// MSVC) in the API header files when mixing code that uses the standard
+// bool with code that uses the redefined version.
+// This does not actually belong in the platform code, but needs to be
+// defined here because the platform code uses bool, and platform.h is
+// include very early in the main include file.
+
+#ifndef V8_GLOBALS_H_
+#define V8_GLOBALS_H_
+
+#ifdef USE_MYBOOL
+typedef unsigned int __my_bool__;
+#define bool __my_bool__ // use 'indirection' to avoid name clashes
+#endif
+
+typedef uint8_t byte;
+typedef byte* Address;
+
+// Code-point values in Unicode 4.0 are 21 bits wide.
+typedef uint16_t uc16;
+typedef signed int uc32;
+
+
+// -----------------------------------------------------------------------------
+// Constants
+
+#ifdef DEBUG
+const bool kDebug = true;
+#else
+const bool kDebug = false;
+#endif // DEBUG
+
+const int KB = 1024;
+const int MB = KB * KB;
+const int GB = KB * KB * KB;
+const int kMaxInt = 0x7FFFFFFF;
+const int kMinInt = -kMaxInt - 1;
+
+const int kCharSize = sizeof(char);
+const int kShortSize = sizeof(short);
+const int kIntSize = sizeof(int);
+const int kDoubleSize = sizeof(double);
+const int kPointerSize = sizeof(void*);
+
+const int kPointerSizeLog2 = 2;
+
+const int kObjectAlignmentBits = 2;
+const int kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1;
+const int kObjectAlignment = 1 << kObjectAlignmentBits;
+
+
+// Tag information for HeapObject.
+const int kHeapObjectTag = 1;
+const int kHeapObjectTagSize = 2;
+const int kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
+
+
+// Tag information for Smi.
+const int kSmiTag = 0;
+const int kSmiTagSize = 1;
+const int kSmiTagMask = (1 << kSmiTagSize) - 1;
+
+
+// Tag information for Failure.
+const int kFailureTag = 3;
+const int kFailureTagSize = 2;
+const int kFailureTagMask = (1 << kFailureTagSize) - 1;
+
+
+const int kBitsPerByte = 8;
+const int kBitsPerByteLog2 = 3;
+const int kBitsPerPointer = kPointerSize * kBitsPerByte;
+const int kBitsPerInt = kIntSize * kBitsPerByte;
+
+// Bits used by the mark-compact collector, PLEASE READ.
+//
+// The first word of a heap object is a map pointer. The last two bits are
+// tagged as '01' (kHeapObjectTag). We reuse the last two bits to mark an
+// object as live and/or overflowed:
+// last bit = 0, marked as alive
+// second bit = 1, overflowed
+// An object is only marked as overflowed when it is marked as live while
+// the marking stack is overflowed.
+
+const int kMarkingBit = 0; // marking bit
+const int kMarkingMask = (1 << kMarkingBit); // marking mask
+const int kOverflowBit = 1; // overflow bit
+const int kOverflowMask = (1 << kOverflowBit); // overflow mask
+
+
+// Zap-value: The value used for zapping dead objects. Should be a recognizable
+// illegal heap object pointer.
+const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
+const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
+const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
+
+// -----------------------------------------------------------------------------
+// Forward declarations for frequently used classes
+// (sorted alphabetically)
+
+class AccessorInfo;
+class Allocation;
+class Assembler;
+class BreakableStatement;
+class Code;
+class CodeGenerator;
+class CodeStub;
+class Context;
+class Debug;
+class Debugger;
+class DebugInfo;
+class Descriptor;
+class DescriptorArray;
+class Expression;
+class ExternalReference;
+class FixedArray;
+class FunctionEntry;
+class FunctionLiteral;
+class FunctionTemplateInfo;
+class Dictionary;
+class FreeStoreAllocationPolicy;
+template <typename T> class Handle;
+class Heap;
+class HeapObject;
+class IC;
+class InterceptorInfo;
+class IterationStatement;
+class JSArray;
+class JSFunction;
+class JSObject;
+class LabelCollector;
+class LargeObjectSpace;
+template <typename T, class P = FreeStoreAllocationPolicy> class List;
+class LookupResult;
+class MacroAssembler;
+class Map;
+class MapSpace;
+class MarkCompactCollector;
+class NewSpace;
+class Object;
+class OldSpace;
+class Property;
+class Proxy;
+class Scope;
+template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+class Script;
+class Slot;
+class Smi;
+class Statement;
+class String;
+class Struct;
+class SwitchStatement;
+class Visitor;
+class Variable;
+class VariableProxy;
+class RelocInfo;
+class Deserializer;
+class MessageLocation;
+class ObjectGroup;
+struct TickSample;
+class VirtualMemory;
+class Mutex;
+
+typedef bool (*WeakSlotCallback)(Object** pointer);
+
+// -----------------------------------------------------------------------------
+// Miscellaneous
+
+// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
+// consecutive and that NEW_SPACE is the first.
+enum AllocationSpace {
+ NEW_SPACE,
+ OLD_SPACE,
+ CODE_SPACE,
+ MAP_SPACE,
+ LO_SPACE,
+ LAST_SPACE = LO_SPACE
+};
+const int kSpaceTagSize = 3;
+const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+
+// A flag that indicates whether objects should be pretenured when
+// allocated (allocated directly into the old generation) or not
+// (allocated in the young generation if the object size and type
+// allows).
+enum PretenureFlag { NOT_TENURED, TENURED };
+
+enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+
+
+// A CodeDesc describes a buffer holding instructions and relocation
+// information. The instructions start at the beginning of the buffer
+// and grow forward, the relocation information starts at the end of
+// the buffer and grows backward.
+//
+// |<--------------- buffer_size ---------------->|
+// |<-- instr_size -->| |<-- reloc_size -->|
+// +==================+========+==================+
+// | instructions | free | reloc info |
+// +==================+========+==================+
+// ^
+// |
+// buffer
+
+struct CodeDesc {
+ byte* buffer;
+ int buffer_size;
+ int instr_size;
+ int reloc_size;
+};
+
+
+// Callback function on object slots, used for iterating heap object slots in
+// HeapObjects, global pointers to heap objects, etc. The callback allows the
+// callback function to change the value of the slot.
+typedef void (*ObjectSlotCallback)(HeapObject** pointer);
+
+
+// Callback function used for iterating objects in heap spaces,
+// for example, scanning heap objects.
+typedef int (*HeapObjectCallback)(HeapObject* obj);
+
+
+// Callback function used for checking constraints when copying/relocating
+// objects. Returns true if an object can be copied/relocated from its
+// old_addr to a new_addr.
+typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
+
+
+// Callback function on inline caches, used for iterating over inline caches
+// in compiled code.
+typedef void (*InlineCacheCallback)(Code* code, Address ic);
+
+
+// State for inline cache call sites. Aliased as IC::State.
+enum InlineCacheState {
+ // Has never been executed.
+ UNINITIALIZED,
+ // Has been executed but monomorhic state has been delayed.
+ PREMONOMORPHIC,
+ // Has been executed and only one receiver type has been seen.
+ MONOMORPHIC,
+ // Like MONOMORPHIC but check failed due to prototype.
+ MONOMORPHIC_PROTOTYPE_FAILURE,
+ // Multiple receiver types have been seen.
+ MEGAMORPHIC,
+ // Special states for debug break or step in prepare stubs.
+ DEBUG_BREAK,
+ DEBUG_PREPARE_STEP_IN
+};
+
+
+// Type of properties.
+enum PropertyType {
+ NORMAL = 0, // only in slow mode
+ MAP_TRANSITION = 1, // only in fast mode
+ CONSTANT_FUNCTION = 2, // only in fast mode
+ FIELD = 3, // only in fast mode
+ CALLBACKS = 4,
+ CONSTANT_TRANSITION = 5, // only in fast mode
+ INTERCEPTOR = 6
+};
+
+
+// Union used for fast testing of specific double values.
+union DoubleRepresentation {
+ double value;
+ int64_t bits;
+ DoubleRepresentation(double x) { value = x; }
+};
+
+
+// AccessorCallback
+struct AccessorDescriptor {
+ Object* (*getter)(Object* object, void* data);
+ Object* (*setter)(JSObject* object, Object* value, void* data);
+ void* data;
+};
+
+
+// Logging and profiling.
+// A StateTag represents a possible state of the VM. When compiled with
+// ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these.
+// Creating a VMState object enters a state by pushing on the stack, and
+// destroying a VMState object leaves a state by popping the current state
+// from the stack.
+
+#define STATE_TAG_LIST(V) \
+ V(JS) \
+ V(GC) \
+ V(COMPILER) \
+ V(OTHER)
+
+enum StateTag {
+#define DEF_STATE_TAG(name) name,
+ STATE_TAG_LIST(DEF_STATE_TAG)
+#undef DEF_STATE_TAG
+ // Pseudo-types.
+ state_tag_count
+};
+
+
+// -----------------------------------------------------------------------------
+// Macros
+
+// Testers for test.
+
+#define HAS_SMI_TAG(value) \
+ ((reinterpret_cast<int>(value) & kSmiTagMask) == kSmiTag)
+
+#define HAS_FAILURE_TAG(value) \
+ ((reinterpret_cast<int>(value) & kFailureTagMask) == kFailureTag)
+
+#define HAS_HEAP_OBJECT_TAG(value) \
+ ((reinterpret_cast<int>(value) & kHeapObjectTagMask) == kHeapObjectTag)
+
+// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
+#define OBJECT_SIZE_ALIGN(value) \
+ ((value + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+
+
+// The expression OFFSET_OF(type, field) computes the byte-offset
+// of the specified field relative to the containing type. This
+// corresponds to 'offsetof' (in stddef.h), except that it doesn't
+// use 0 or NULL, which causes a problem with the compiler warnings
+// we have enabled (which is also why 'offsetof' doesn't seem to work).
+// Here we simply use the non-zero value 4, which seems to work.
+#define OFFSET_OF(type, field) \
+ (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+
+
+// The expression ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use ARRAY_SIZE on statically allocated
+// arrays.
+#define ARRAY_SIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// The USE(x) template is used to silence C++ compiler warnings
+// issued for (yet) unused variables (typically parameters).
+template <typename T>
+static inline void USE(T) { }
+
+
+// FUNCTION_ADDR(f) gets the address of a C function f.
+#define FUNCTION_ADDR(f) \
+ (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
+
+
+// FUNCTION_CAST<F>(addr) casts an address into a function
+// of type F. Used to invoke generated code from within C.
+template <typename F>
+F FUNCTION_CAST(Address addr) {
+ return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
+}
+
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ DISALLOW_EVIL_CONSTRUCTORS(TypeName)
+
+
+// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
+// inside a C++ class and new and delete will be overloaded so logging is
+// performed.
+// This file (globals.h) is included before log.h, so we use direct calls to
+// the Logger rather than the LOG macro.
+#ifdef DEBUG
+#define TRACK_MEMORY(name) \
+ void* operator new(size_t size) { \
+ void* result = ::operator new(size); \
+ Logger::NewEvent(name, result, size); \
+ return result; \
+ } \
+ void operator delete(void* object) { \
+ Logger::DeleteEvent(name, object); \
+ ::operator delete(object); \
+ }
+#else
+#define TRACK_MEMORY(name)
+#endif
+
+// define used for helping GCC to make better inlining.
+#ifdef __GNUC__
+#if (__GNUC__ >= 4)
+#define INLINE(header) inline header __attribute__((always_inline))
+#else
+#define INLINE(header) inline __attribute__((always_inline)) header
+#endif
+#else
+#define INLINE(header) inline header
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_GLOBALS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef V8_HANDLES_INL_H_
+#define V8_HANDLES_INL_H_
+
+#include "handles.h"
+#include "api.h"
+
+namespace v8 { namespace internal {
+
+template<class T>
+Handle<T>::Handle(T* obj) {
+ location_ = reinterpret_cast<T**>(HandleScope::CreateHandle(obj));
+}
+
+
+template <class T>
+inline T* Handle<T>::operator*() const {
+ ASSERT(location_ != NULL);
+ ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
+ return *location_;
+}
+
+
+#ifdef DEBUG
+inline NoHandleAllocation::NoHandleAllocation() {
+ ImplementationUtilities::HandleScopeData* current =
+ ImplementationUtilities::CurrentHandleScope();
+ extensions_ = current->extensions;
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ current->limit = current->next;
+ current->extensions = -1;
+}
+
+
+inline NoHandleAllocation::~NoHandleAllocation() {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ ImplementationUtilities::CurrentHandleScope()->extensions = extensions_;
+}
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_HANDLES_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "natives.h"
+#include "runtime.h"
+
+namespace v8 { namespace internal {
+
+DECLARE_bool(allow_natives_syntax);
+
+#ifdef DEBUG
+DECLARE_bool(gc_greedy);
+#endif
+
+#define CALL_GC(RESULT) \
+ { \
+ Failure* __failure__ = Failure::cast(RESULT); \
+ if (!Heap::CollectGarbage(__failure__->requested(), \
+ __failure__->allocation_space())) { \
+ /* TODO(1181417): Fix this. */ \
+ V8::FatalProcessOutOfMemory("Handles"); \
+ } \
+ }
+
+
+// Don't use the following names: __object__, __failure__.
+#define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
+ GC_GREEDY_CHECK(); \
+ Object* __object__ = FUNCTION_CALL; \
+ if (__object__->IsFailure()) { \
+ if (__object__->IsRetryAfterGC()) { \
+ Failure* __failure__ = Failure::cast(__object__); \
+ if (!Heap::CollectGarbage(__failure__->requested(), \
+ __failure__->allocation_space())) { \
+ /* TODO(1181417): Fix this. */ \
+ V8::FatalProcessOutOfMemory("Handles"); \
+ } \
+ __object__ = FUNCTION_CALL; \
+ if (__object__->IsFailure()) { \
+ if (__object__->IsRetryAfterGC()) { \
+ /* TODO(1181417): Fix this. */ \
+ V8::FatalProcessOutOfMemory("Handles"); \
+ } \
+ return; \
+ } \
+ } else { \
+ return; \
+ } \
+ }
+
+
+
+
+Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
+ Handle<JSArray> array) {
+ CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
+}
+
+
+Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
+ Handle<FixedArray> second) {
+ CALL_HEAP_FUNCTION(first->UnionOfKeys(*second), FixedArray);
+}
+
+
+Handle<JSGlobalObject> ReinitializeJSGlobalObject(
+ Handle<JSFunction> constructor,
+ Handle<JSGlobalObject> global) {
+ CALL_HEAP_FUNCTION(Heap::ReinitializeJSGlobalObject(*constructor, *global),
+ JSGlobalObject);
+}
+
+
+void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
+ func->shared()->set_expected_nof_properties(nof);
+ if (func->has_initial_map()) {
+ Handle<Map> new_initial_map =
+ Factory::CopyMap(Handle<Map>(func->initial_map()));
+ new_initial_map->set_unused_property_fields(nof);
+ func->set_initial_map(*new_initial_map);
+ }
+}
+
+
+void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
+ CALL_HEAP_FUNCTION_VOID(func->SetPrototype(*value));
+}
+
+
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+ int estimate) {
+ // TODO(1231235): We need dynamic feedback to estimate the number
+ // of expected properties in an object. The static hack below
+ // is barely a solution.
+ shared->set_expected_nof_properties(estimate + 2);
+}
+
+
+void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
+ int estimate) {
+ // TODO(1231235): We need dynamic feedback to estimate the number
+ // of expected properties in an object. The static hack below
+ // is barely a solution.
+ SetExpectedNofProperties(func, estimate + 2);
+}
+
+
+void NormalizeProperties(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties());
+}
+
+
+void NormalizeElements(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION_VOID(object->NormalizeElements());
+}
+
+
+void TransformToFastProperties(Handle<JSObject> object,
+ int unused_property_fields) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->TransformToFastProperties(unused_property_fields));
+}
+
+
+void FlattenString(Handle<String> string) {
+ if (string->IsFlat()) return;
+ CALL_HEAP_FUNCTION_VOID(String::cast(*string)->Flatten());
+ ASSERT(string->IsFlat());
+}
+
+
+Handle<Object> SetPrototype(Handle<JSFunction> function,
+ Handle<Object> prototype) {
+ CALL_HEAP_FUNCTION(Accessors::FunctionSetPrototype(*function,
+ *prototype,
+ NULL),
+ Object);
+}
+
+
+void AddProperty(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION_VOID(object->AddProperty(*key, *value, attributes));
+}
+
+Handle<Object> SetProperty(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes), Object);
+}
+
+
+Handle<Object> SetProperty(Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(Runtime::SetObjectProperty(object, key, value, attributes),
+ Object);
+}
+
+
+Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key,
+ *value,
+ attributes),
+ Object);
+}
+
+
+Handle<Object> GetProperty(Handle<JSObject> obj,
+ const char* name) {
+ Handle<String> str = Factory::LookupAsciiSymbol(name);
+ CALL_HEAP_FUNCTION(obj->GetProperty(*str), Object);
+}
+
+
+Handle<Object> GetProperty(Handle<Object> obj,
+ Handle<Object> key) {
+ CALL_HEAP_FUNCTION(Runtime::GetObjectProperty(obj, *key), Object);
+}
+
+
+Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ PropertyAttributes* attributes) {
+ CALL_HEAP_FUNCTION(holder->GetPropertyWithInterceptor(*receiver,
+ *name,
+ attributes),
+ Object);
+}
+
+
+Handle<Object> GetPrototype(Handle<Object> obj) {
+ Handle<Object> result(obj->GetPrototype());
+ return result;
+}
+
+
+Handle<Object> DeleteElement(Handle<JSObject> obj,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION(obj->DeleteElement(index), Object);
+}
+
+
+Handle<Object> DeleteProperty(Handle<JSObject> obj,
+ Handle<String> prop) {
+ CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop), Object);
+}
+
+
+Handle<String> SubString(Handle<String> str, int start, int end) {
+ CALL_HEAP_FUNCTION(str->Slice(start, end), String);
+}
+
+
+Handle<Object> SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value) {
+ GC_GREEDY_CHECK();
+ Object* obj = object->SetElement(index, *value);
+ // If you set an element then the object may need to get a new map
+ // which will cause it to grow, which will cause an allocation.
+ // If you know that the object will not grow then perhaps this check
+ // does not apply and you may have to split this method into two
+ // versions.
+ ASSERT(Heap::IsAllocationAllowed());
+ if (obj->IsFailure()) {
+ CALL_GC(obj);
+ obj = object->SetElement(index, *value);
+ if (obj->IsFailure()) {
+ V8::FatalProcessOutOfMemory("Handles"); // TODO(1181417): Fix this.
+ }
+ }
+ return value;
+}
+
+
+Handle<JSObject> Copy(Handle<JSObject> obj, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(obj->Copy(pretenure), JSObject);
+}
+
+
+// Wrappers for scripts are kept alive and cached in weak global
+// handles referred from proxy objects held by the scripts as long as
+// they are used. When they are not used anymore, the garbage
+// collector will call the weak callback on the global handle
+// associated with the wrapper and get rid of both the wrapper and the
+// handle.
+static void ClearWrapperCache(Persistent<v8::Object> handle, void*) {
+ Handle<Object> cache = Utils::OpenHandle(*handle);
+ JSValue* wrapper = JSValue::cast(*cache);
+ Proxy* proxy = Script::cast(wrapper->value())->wrapper();
+ ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
+ proxy->set_proxy(0);
+ GlobalHandles::Destroy(cache.location());
+ Counters::script_wrappers.Decrement();
+}
+
+
+Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
+ Handle<Object> cache(reinterpret_cast<Object**>(script->wrapper()->proxy()));
+ if (!cache.is_null()) {
+ // Return the script wrapper directly from the cache.
+ return Handle<JSValue>(JSValue::cast(*cache));
+ }
+
+ // Construct a new script wrapper.
+ Counters::script_wrappers.Increment();
+ Handle<JSFunction> constructor = Top::script_function();
+ Handle<JSValue> result =
+ Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+ result->set_value(*script);
+
+ // Create a new weak global handle and use it to cache the wrapper
+ // for future use. The cache will automatically be cleared by the
+ // garbage collector when it is not used anymore.
+ Handle<Object> handle = GlobalHandles::Create(*result);
+ GlobalHandles::MakeWeak(handle.location(), NULL, &ClearWrapperCache);
+ script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
+ return result;
+}
+
+
+#undef CALL_HEAP_FUNCTION
+#undef CALL_GC
+
+
+// Compute the property keys from the interceptor.
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> object) {
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
+ Handle<Object> data(interceptor->data());
+ v8::AccessorInfo info(
+ v8::Utils::ToLocal(receiver),
+ v8::Utils::ToLocal(data),
+ v8::Utils::ToLocal(object));
+ v8::Handle<v8::Array> result;
+ if (!interceptor->enumerator()->IsUndefined()) {
+ v8::NamedPropertyEnumerator enum_fun =
+ v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
+ LOG(ApiObjectAccess("interceptor-named-enum", *object));
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = enum_fun(info);
+ }
+ }
+ return result;
+}
+
+
+// Compute the element keys from the interceptor.
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> object) {
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ Handle<Object> data(interceptor->data());
+ v8::AccessorInfo info(
+ v8::Utils::ToLocal(receiver),
+ v8::Utils::ToLocal(data),
+ v8::Utils::ToLocal(object));
+ v8::Handle<v8::Array> result;
+ if (!interceptor->enumerator()->IsUndefined()) {
+ v8::IndexedPropertyEnumerator enum_fun =
+ v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
+ LOG(ApiObjectAccess("interceptor-indexed-enum", *object));
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = enum_fun(info);
+ }
+ }
+ return result;
+}
+
+
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
+ Handle<FixedArray> content = Factory::empty_fixed_array();
+
+ // Check access rights if required.
+ if (object->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*object, Heap::undefined_value(), v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ return content;
+ }
+
+ JSObject* arguments_boilerplate =
+ Top::context()->global_context()->arguments_boilerplate();
+ JSFunction* arguments_function =
+ JSFunction::cast(arguments_boilerplate->map()->constructor());
+ bool allow_enumeration = (object->map()->constructor() != arguments_function);
+
+ // Only collect keys if access is permitted.
+ if (allow_enumeration) {
+ for (Handle<Object> p = object;
+ *p != Heap::null_value();
+ p = Handle<Object>(p->GetPrototype())) {
+ Handle<JSObject> current(JSObject::cast(*p));
+
+ // Compute the property keys.
+ content = UnionOfKeys(content, GetEnumPropertyKeys(current));
+
+ // Add the property keys from the interceptor.
+ if (current->HasNamedInterceptor()) {
+ v8::Handle<v8::Array> result =
+ GetKeysForNamedInterceptor(object, current);
+ if (!result.IsEmpty())
+ content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+ }
+
+ // Compute the element keys.
+ Handle<FixedArray> element_keys =
+ Factory::NewFixedArray(current->NumberOfEnumElements());
+ current->GetEnumElementKeys(*element_keys);
+ content = UnionOfKeys(content, element_keys);
+
+ // Add the element keys from the interceptor.
+ if (current->HasIndexedInterceptor()) {
+ v8::Handle<v8::Array> result =
+ GetKeysForIndexedInterceptor(object, current);
+ if (!result.IsEmpty())
+ content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+ }
+ }
+ }
+ return content;
+}
+
+
+Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
+ Counters::for_in.Increment();
+
+ Handle<FixedArray> content = GetKeysInFixedArrayFor(object);
+
+ // Allocate the JSArray with the result.
+ Handle<JSArray> obj = Factory::NewJSArray(content->length());
+ Handle<JSArray>::cast(obj)->SetContent(*content);
+ return Handle<JSArray>::cast(obj);
+}
+
+
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
+ int index = 0;
+ if (object->HasFastProperties()) {
+ if (object->map()->instance_descriptors()->HasEnumCache()) {
+ Counters::enum_cache_hits.Increment();
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()));
+ }
+ Counters::enum_cache_misses.Increment();
+ int num_enum = object->NumberOfEnumProperties();
+ Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
+ Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+ for (DescriptorReader r(object->map()->instance_descriptors());
+ !r.eos();
+ r.advance()) {
+ if (!r.IsTransition() && !r.IsDontEnum()) {
+ (*storage)->set(index, r.GetKey());
+ (*sort_array)->set(index, Smi::FromInt(r.GetDetails().index()));
+ index++;
+ }
+ }
+ (*storage)->SortPairs(*sort_array);
+ Handle<FixedArray> bridge_storage =
+ Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ desc->SetEnumCache(*bridge_storage, *storage);
+ ASSERT(storage->length() == index);
+ return storage;
+ } else {
+ int num_enum = object->NumberOfEnumProperties();
+ Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
+ Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+ object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
+ return storage;
+ }
+}
+
+
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ // Compile the source information to a code object.
+ ASSERT(!shared->is_compiled());
+ bool result = Compiler::CompileLazy(shared);
+ ASSERT(result != Top::has_pending_exception());
+ if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
+ return result;
+}
+
+
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
+ // Compile the source information to a code object.
+ Handle<SharedFunctionInfo> shared(function->shared());
+ return CompileLazyShared(shared, flag);
+}
+
+
+OptimizedObjectForAddingMultipleProperties::
+OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
+ bool condition) {
+ object_ = object;
+ if (condition && object_->HasFastProperties()) {
+ // Normalize the properties of object to avoid n^2 behavior
+ // when extending the object multiple properties.
+ unused_property_fields_ = object->map()->unused_property_fields();
+ NormalizeProperties(object_);
+ has_been_transformed_ = true;
+
+ } else {
+ has_been_transformed_ = false;
+ }
+}
+
+
+OptimizedObjectForAddingMultipleProperties::
+~OptimizedObjectForAddingMultipleProperties() {
+ // Reoptimize the object to allow fast property access.
+ if (has_been_transformed_) {
+ TransformToFastProperties(object_, unused_property_fields_);
+ }
+}
+
+
+void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) {
+ HandleScope scope;
+ Handle<FixedArray> info(FixedArray::cast(fun->shared()->lazy_load_data()));
+ int index = Smi::cast(info->get(0))->value();
+ ASSERT(index >= 0);
+ Handle<Context> compile_context(Context::cast(info->get(1)));
+ Handle<Context> function_context(Context::cast(info->get(2)));
+ Handle<Context> security_context(Context::cast(info->get(3)));
+ Handle<Object> receiver(compile_context->global()->builtins());
+
+ Vector<const char> name = Natives::GetScriptName(index);
+
+ Handle<JSFunction> boilerplate;
+
+ if (!Bootstrapper::NativesCacheLookup(name, &boilerplate)) {
+ Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ Handle<String> script_name = Factory::NewStringFromAscii(name);
+ bool allow_natives_syntax = FLAG_allow_natives_syntax;
+ FLAG_allow_natives_syntax = true;
+ boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+ FLAG_allow_natives_syntax = allow_natives_syntax;
+ // If the compilation failed (possibly due to stack overflows), we
+ // should never enter the result in the natives cache. Instead we
+ // return from the function without marking the function as having
+ // been lazily loaded.
+ if (boilerplate.is_null()) {
+ *pending_exception = true;
+ return;
+ }
+ Bootstrapper::NativesCacheAdd(name, boilerplate);
+ }
+
+ // We shouldn't get here if compiling the script failed.
+ ASSERT(!boilerplate.is_null());
+
+ // When the debugger running in its own context touches lazy loaded
+ // functions loading can be triggered. In that case ensure that the
+ // execution of the boilerplate is in the correct context.
+ SaveContext save;
+ if (!Debug::debug_context().is_null() &&
+ Top::context() == *Debug::debug_context()) {
+ Top::set_context(*compile_context);
+ Top::set_security_context(*security_context);
+ }
+
+ // Reset the lazy load data before running the script to make sure
+ // not to get recursive lazy loading.
+ fun->shared()->set_lazy_load_data(Heap::undefined_value());
+
+ // Run the script.
+ Handle<JSFunction> script_fun(
+ Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
+ Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
+
+ // If lazy loading failed, restore the unloaded state of fun.
+ if (*pending_exception) fun->shared()->set_lazy_load_data(*info);
+}
+
+
+void SetupLazy(Handle<JSFunction> fun,
+ int index,
+ Handle<Context> compile_context,
+ Handle<Context> function_context,
+ Handle<Context> security_context) {
+ Handle<FixedArray> arr = Factory::NewFixedArray(4);
+ arr->set(0, Smi::FromInt(index));
+ arr->set(1, *compile_context); // Compile in this context
+ arr->set(2, *function_context); // Set function context to this
+ arr->set(3, *security_context); // Receiver for call
+ fun->shared()->set_lazy_load_data(*arr);
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HANDLES_H_
+#define V8_HANDLES_H_
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// A Handle provides a reference to an object that survives relocation by
+// the garbage collector.
+// Handles are only valid withing a HandleScope.
+// When a handle is created for an object a cell is allocated in the heap.
+
+template<class T>
+class Handle {
+ public:
+ INLINE(Handle(T** location)) { location_ = location; }
+ INLINE(explicit Handle(T* obj));
+
+ INLINE(Handle()) : location_(NULL) {}
+
+ // Constructor for handling automatic up casting.
+ // Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
+ template <class S> Handle(Handle<S> handle) {
+#ifdef DEBUG
+ T* a = NULL;
+ S* b = NULL;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+#endif
+ location_ = reinterpret_cast<T**>(handle.location());
+ }
+
+ INLINE(T* operator ->() const) { return operator*(); }
+
+ // Check if this handle refers to the exact same object as the other handle.
+ bool is_identical_to(const Handle<T> other) const {
+ return operator*() == *other;
+ }
+
+ // Provides the C++ dereference operator.
+ INLINE(T* operator*() const);
+
+ // Returns the address to where the raw pointer is stored.
+ T** location() const {
+ ASSERT(location_ == NULL ||
+ reinterpret_cast<Address>(*location_) != kZapValue);
+ return location_;
+ }
+
+ template <class S> static Handle<T> cast(Handle<S> that) {
+ T::cast(*that);
+ return Handle<T>(reinterpret_cast<T**>(that.location()));
+ }
+
+ static Handle<T> null() { return Handle<T>(); }
+ bool is_null() {return location_ == NULL; }
+
+ // Closes the given scope, but lets this handle escape. See
+ // implementation in api.h.
+ inline Handle<T> EscapeFrom(HandleScope* scope);
+
+ private:
+ T** location_;
+};
+
+
+// ----------------------------------------------------------------------------
+// Handle operations.
+// They might invoke garbage collection. The result is an handle to
+// an object of expected type, or the handle is an error if running out
+// of space or encounting an internal error.
+
+void NormalizeProperties(Handle<JSObject> object);
+void NormalizeElements(Handle<JSObject> object);
+void TransformToFastProperties(Handle<JSObject> object,
+ int unused_property_fields);
+void FlattenString(Handle<String> str);
+
+void AddProperty(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+Handle<Object> SetProperty(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+Handle<Object> SetProperty(Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+Handle<Object> SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value);
+
+Handle<Object> GetProperty(Handle<JSObject> obj,
+ const char* name);
+
+Handle<Object> GetProperty(Handle<Object> obj,
+ Handle<Object> key);
+
+Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ PropertyAttributes* attributes);
+
+Handle<Object> GetPrototype(Handle<Object> obj);
+
+Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
+Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
+
+Handle<JSObject> Copy(Handle<JSObject> obj, PretenureFlag = NOT_TENURED);
+
+// Get the JS object corresponding to the given script; create it
+// if none exists.
+Handle<JSValue> GetScriptWrapper(Handle<Script> script);
+
+// Computes the enumerable keys from interceptors. Used for debug mirrors and
+// by GetKeysInFixedArrayFor below.
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> object);
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> object);
+// Computes the enumerable keys for a JSObject. Used for implementing
+// "for (n in object) { }".
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object);
+Handle<JSArray> GetKeysFor(Handle<JSObject> object);
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
+
+// Computes the union of keys and return the result.
+// Used for implementing "for (n in object) { }"
+Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
+ Handle<FixedArray> second);
+
+Handle<String> SubString(Handle<String> str, int start, int end);
+
+
+// Sets the expected number of properties for the function's instances.
+void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
+
+// Sets the prototype property for a function instance.
+void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
+
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+ int estimate);
+void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
+ int estimate);
+
+
+Handle<JSGlobalObject> ReinitializeJSGlobalObject(
+ Handle<JSFunction> constructor,
+ Handle<JSGlobalObject> global);
+
+Handle<Object> SetPrototype(Handle<JSFunction> function,
+ Handle<Object> prototype);
+
+
+// Do lazy compilation of the given function. Returns true on success
+// and false if the compilation resulted in a stack overflow.
+enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag);
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
+
+// These deal with lazily loaded properties.
+void SetupLazy(Handle<JSFunction> fun,
+ int index,
+ Handle<Context> compile_context,
+ Handle<Context> function_context,
+ Handle<Context> security_context);
+void LoadLazy(Handle<JSFunction> fun, bool* pending_exception);
+
+class NoHandleAllocation BASE_EMBEDDED {
+ public:
+#ifndef DEBUG
+ NoHandleAllocation() {}
+ ~NoHandleAllocation() {}
+#else
+ inline NoHandleAllocation();
+ inline ~NoHandleAllocation();
+ private:
+ int extensions_;
+#endif
+};
+
+
+// ----------------------------------------------------------------------------
+
+
+// Stack allocated wrapper call for optimizing adding multiple
+// properties to an object.
+class OptimizedObjectForAddingMultipleProperties BASE_EMBEDDED {
+ public:
+ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
+ bool condition = true);
+ ~OptimizedObjectForAddingMultipleProperties();
+ private:
+ bool has_been_transformed_; // Tells whether the object has been transformed.
+ int unused_property_fields_; // Captures the unused number of field.
+ Handle<JSObject> object_; // The object being optimized.
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HANDLES_H_
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "hashmap.h"
+
+namespace v8 { namespace internal {
+
+
+static inline bool IsPowerOf2(uint32_t x) {
+ ASSERT(x != 0);
+ return (x & (x - 1)) == 0;
+}
+
+
+Allocator HashMap::DefaultAllocator;
+
+
+HashMap::HashMap() {
+ allocator_ = NULL;
+ match_ = NULL;
+}
+
+
+HashMap::HashMap(MatchFun match,
+ Allocator* allocator,
+ uint32_t initial_capacity) {
+ allocator_ = allocator;
+ match_ = match;
+ Initialize(initial_capacity);
+}
+
+
+HashMap::~HashMap() {
+ if (allocator_) {
+ allocator_->Delete(map_);
+ }
+}
+
+
+HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
+ // Find a matching entry.
+ Entry* p = Probe(key, hash);
+ if (p->key != NULL) {
+ return p;
+ }
+
+ // No entry found; insert one if necessary.
+ if (insert) {
+ p->key = key;
+ p->value = NULL;
+ p->hash = hash;
+ occupancy_++;
+
+ // Grow the map if we reached >= 80% occupancy.
+ if (occupancy_ + occupancy_/4 >= capacity_) {
+ Resize();
+ p = Probe(key, hash);
+ }
+
+ return p;
+ }
+
+ // No entry found and none inserted.
+ return NULL;
+}
+
+
+void HashMap::Clear() {
+ // Mark all entries as empty.
+ const Entry* end = map_end();
+ for (Entry* p = map_; p < end; p++) {
+ p->key = NULL;
+ }
+ occupancy_ = 0;
+}
+
+
+HashMap::Entry* HashMap::Start() const {
+ return Next(map_ - 1);
+}
+
+
+HashMap::Entry* HashMap::Next(Entry* p) const {
+ const Entry* end = map_end();
+ ASSERT(map_ - 1 <= p && p < end);
+ for (p++; p < end; p++) {
+ if (p->key != NULL) {
+ return p;
+ }
+ }
+ return NULL;
+}
+
+
+HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
+ ASSERT(key != NULL);
+
+ ASSERT(IsPowerOf2(capacity_));
+ Entry* p = map_ + (hash & (capacity_ - 1));
+ const Entry* end = map_end();
+ ASSERT(map_ <= p && p < end);
+
+ ASSERT(occupancy_ < capacity_); // guarantees loop termination
+ while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
+ p++;
+ if (p >= end) {
+ p = map_;
+ }
+ }
+
+ return p;
+}
+
+
+void HashMap::Initialize(uint32_t capacity) {
+ ASSERT(IsPowerOf2(capacity));
+ map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
+ if (map_ == NULL) V8::FatalProcessOutOfMemory("HashMap::Initialize");
+ capacity_ = capacity;
+ Clear();
+}
+
+
+void HashMap::Resize() {
+ Entry* map = map_;
+ uint32_t n = occupancy_;
+
+ // Allocate larger map.
+ Initialize(capacity_ * 2);
+
+ // Rehash all current entries.
+ for (Entry* p = map; n > 0; p++) {
+ if (p->key != NULL) {
+ Lookup(p->key, p->hash, true)->value = p->value;
+ n--;
+ }
+ }
+
+ // Delete old map.
+ allocator_->Delete(map);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HASHMAP_H_
+#define V8_HASHMAP_H_
+
+namespace v8 { namespace internal {
+
+
+// Allocator defines the memory allocator interface
+// used by HashMap and implements a default allocator.
+class Allocator BASE_EMBEDDED {
+ public:
+ virtual ~Allocator() {}
+ virtual void* New(size_t size) { return Malloced::New(size); }
+ virtual void Delete(void* p) { Malloced::Delete(p); }
+};
+
+
+class HashMap {
+ public:
+ static Allocator DefaultAllocator;
+
+ typedef bool (*MatchFun) (void* key1, void* key2);
+
+ // Dummy constructor. This constructor doesn't set up the hash
+ // map properly so don't use it unless you have good reason.
+ HashMap();
+
+ // initial_capacity is the size of the initial hash map;
+ // it must be a power of 2 (and thus must not be 0).
+ HashMap(MatchFun match,
+ Allocator* allocator = &DefaultAllocator,
+ uint32_t initial_capacity = 8);
+
+ ~HashMap();
+
+ // HashMap entries are (key, value, hash) tripplets.
+ // Some clients may not need to use the value slot
+ // (e.g. implementers of sets, where the key is the value).
+ struct Entry {
+ void* key;
+ void* value;
+ uint32_t hash; // the full hash value for key
+ };
+
+ // If an entry with matching key is found, Lookup()
+ // returns that entry. If no matching entry is found,
+ // but insert is set, a new entry is inserted with
+ // corresponding key, key hash, and NULL value.
+ // Otherwise, NULL is returned.
+ Entry* Lookup(void* key, uint32_t hash, bool insert);
+
+ // Empties the hash map (occupancy() == 0).
+ void Clear();
+
+ // The number of (non-empty) entries in the table.
+ uint32_t occupancy() const { return occupancy_; }
+
+ // The capacity of the table. The implementation
+ // makes sure that occupancy is at most 80% of
+ // the table capacity.
+ uint32_t capacity() const { return capacity_; }
+
+ // Iteration
+ //
+ // for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
+ // ...
+ // }
+ //
+ // If entries are inserted during iteration, the effect of
+ // calling Next() is undefined.
+ Entry* Start() const;
+ Entry* Next(Entry* p) const;
+
+ private:
+ Allocator* allocator_;
+ MatchFun match_;
+ Entry* map_;
+ uint32_t capacity_;
+ uint32_t occupancy_;
+
+ Entry* map_end() const { return map_ + capacity_; }
+ Entry* Probe(void* key, uint32_t hash);
+ void Initialize(uint32_t capacity);
+ void Resize();
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HASHMAP_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_INL_H_
+#define V8_HEAP_INL_H_
+
+#include "log.h"
+#include "v8-counters.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DECLARE_bool(gc_greedy);
+DECLARE_int(gc_interval);
+#endif
+
+
+int Heap::MaxHeapObjectSize() {
+ return Page::kMaxHeapObjectSize;
+}
+
+
+Object* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space) {
+ ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+#ifdef DEBUG
+ if (FLAG_gc_interval >= 0 &&
+ !disallow_allocation_failure_ &&
+ Heap::allocation_timeout_-- <= 0) {
+ return Failure::RetryAfterGC(size_in_bytes, space);
+ }
+ Counters::objs_since_last_full.Increment();
+ Counters::objs_since_last_young.Increment();
+#endif
+ if (NEW_SPACE == space) {
+ return new_space_->AllocateRaw(size_in_bytes);
+ }
+
+ Object* result;
+ if (OLD_SPACE == space) {
+ result = old_space_->AllocateRaw(size_in_bytes);
+ } else if (CODE_SPACE == space) {
+ result = code_space_->AllocateRaw(size_in_bytes);
+ } else if (LO_SPACE == space) {
+ result = lo_space_->AllocateRaw(size_in_bytes);
+ } else {
+ ASSERT(MAP_SPACE == space);
+ result = map_space_->AllocateRaw(size_in_bytes);
+ }
+ if (result->IsFailure()) old_gen_exhausted_ = true;
+ return result;
+}
+
+
+Object* Heap::NumberFromInt32(int32_t value) {
+ if (Smi::IsValid(value)) return Smi::FromInt(value);
+ // Bypass NumberFromDouble to avoid various redundant checks.
+ return AllocateHeapNumber(FastI2D(value));
+}
+
+
+Object* Heap::NumberFromUint32(uint32_t value) {
+ if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
+ return Smi::FromInt((int32_t)value);
+ }
+ // Bypass NumberFromDouble to avoid various redundant checks.
+ return AllocateHeapNumber(FastUI2D(value));
+}
+
+
+Object* Heap::AllocateRawMap(int size_in_bytes) {
+#ifdef DEBUG
+ Counters::objs_since_last_full.Increment();
+ Counters::objs_since_last_young.Increment();
+#endif
+ Object* result = map_space_->AllocateRaw(size_in_bytes);
+ if (result->IsFailure()) old_gen_exhausted_ = true;
+ return result;
+}
+
+
+bool Heap::InNewSpace(Object* object) {
+ return new_space_->Contains(object);
+}
+
+
+bool Heap::InFromSpace(Object* object) {
+ return new_space_->FromSpaceContains(object);
+}
+
+
+bool Heap::InToSpace(Object* object) {
+ return new_space_->ToSpaceContains(object);
+}
+
+
+bool Heap::ShouldBePromoted(Address old_address, int object_size) {
+ // An object should be promoted if:
+ // - the object has survived a scavenge operation or
+ // - to space is already 25% full.
+ return old_address < new_space_->age_mark()
+ || (new_space_->Size() + object_size) >= (new_space_->Capacity() >> 2);
+}
+
+
+void Heap::RecordWrite(Address address, int offset) {
+ if (new_space_->Contains(address)) return;
+ ASSERT(!new_space_->FromSpaceContains(address));
+ SLOW_ASSERT(Contains(address + offset));
+ Page::SetRSet(address, offset);
+}
+
+
+Object* Heap::AllocatePropertyStorageForMap(Map* map) {
+ if (map->unused_property_fields() > 0) {
+ return AllocateFixedArray(map->unused_property_fields());
+ }
+ return Heap::empty_fixed_array();
+}
+
+
+#define GC_GREEDY_CHECK() \
+ ASSERT(!FLAG_gc_greedy \
+ || v8::internal::Heap::disallow_allocation_failure() \
+ || v8::internal::Heap::CollectGarbage(0, NEW_SPACE))
+
+
+// Do not use the identifier __object__ in a call to this macro.
+//
+// Call the function FUNCTION_CALL. If it fails with a RetryAfterGC
+// failure, call the garbage collector and retry the function. If the
+// garbage collector cannot reclaim the required space or the second
+// call fails with a RetryAfterGC failure, fail with out of memory.
+// If there is any other failure, return a null handle. If either
+// call succeeds, return a handle to the functions return value.
+//
+// Note that this macro always returns or raises a fatal error.
+#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \
+ do { \
+ GC_GREEDY_CHECK(); \
+ Object* __object__ = FUNCTION_CALL; \
+ if (__object__->IsFailure()) { \
+ if (__object__->IsRetryAfterGC()) { \
+ if (!Heap::CollectGarbage( \
+ Failure::cast(__object__)->requested(), \
+ Failure::cast(__object__)->allocation_space())) { \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \
+ } \
+ __object__ = FUNCTION_CALL; \
+ if (__object__->IsFailure()) { \
+ if (__object__->IsRetryAfterGC()) { \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \
+ } \
+ return Handle<TYPE>(); \
+ } \
+ } else { \
+ return Handle<TYPE>(); \
+ } \
+ } \
+ return Handle<TYPE>(TYPE::cast(__object__)); \
+ } while (false)
+
+
+#ifdef DEBUG
+
+inline bool Heap::allow_allocation(bool new_state) {
+ bool old = allocation_allowed_;
+ allocation_allowed_ = new_state;
+ return old;
+}
+
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "global-handles.h"
+#include "jsregexp.h"
+#include "mark-compact.h"
+#include "natives.h"
+#include "scanner.h"
+#include "scopeinfo.h"
+#include "v8threads.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations");
+DEFINE_bool(gc_verbose, false, "print stuff during garbage collection");
+DEFINE_bool(heap_stats, false, "report heap statistics before and after GC");
+DEFINE_bool(code_stats, false, "report code statistics after GC");
+DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC");
+DEFINE_bool(print_handles, false, "report handles after GC");
+DEFINE_bool(print_global_handles, false, "report global handles after GC");
+DEFINE_bool(print_rset, false, "print remembered sets before GC");
+#endif
+
+DEFINE_int(new_space_size, 0, "size of (each semispace in) the new generation");
+DEFINE_int(old_space_size, 0, "size of the old generation");
+
+DEFINE_bool(gc_global, false, "always perform global GCs");
+DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations");
+DEFINE_bool(trace_gc, false,
+ "print one trace line following each garbage collection");
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+DECLARE_bool(log_gc);
+#endif
+
+
+#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
+ ROOT_LIST(ROOT_ALLOCATION)
+#undef ROOT_ALLOCATION
+
+
+#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
+ STRUCT_LIST(STRUCT_ALLOCATION)
+#undef STRUCT_ALLOCATION
+
+
+#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
+ SYMBOL_LIST(SYMBOL_ALLOCATION)
+#undef SYMBOL_ALLOCATION
+
+
+NewSpace* Heap::new_space_ = NULL;
+OldSpace* Heap::old_space_ = NULL;
+OldSpace* Heap::code_space_ = NULL;
+MapSpace* Heap::map_space_ = NULL;
+LargeObjectSpace* Heap::lo_space_ = NULL;
+
+int Heap::promoted_space_limit_ = 0;
+int Heap::old_gen_exhausted_ = false;
+
+// semispace_size_ should be a power of 2 and old_generation_size_ should be
+// a multiple of Page::kPageSize.
+int Heap::semispace_size_ = 1*MB;
+int Heap::old_generation_size_ = 512*MB;
+int Heap::initial_semispace_size_ = 256*KB;
+
+GCCallback Heap::global_gc_prologue_callback_ = NULL;
+GCCallback Heap::global_gc_epilogue_callback_ = NULL;
+
+// Variables set based on semispace_size_ and old_generation_size_ in
+// ConfigureHeap.
+int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
+
+// Double the new space after this many scavenge collections.
+int Heap::new_space_growth_limit_ = 8;
+int Heap::scavenge_count_ = 0;
+Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
+
+#ifdef DEBUG
+bool Heap::allocation_allowed_ = true;
+int Heap::mc_count_ = 0;
+int Heap::gc_count_ = 0;
+
+int Heap::allocation_timeout_ = 0;
+bool Heap::disallow_allocation_failure_ = false;
+#endif // DEBUG
+
+
+int Heap::Capacity() {
+ if (!HasBeenSetup()) return 0;
+
+ return new_space_->Capacity() +
+ old_space_->Capacity() +
+ code_space_->Capacity() +
+ map_space_->Capacity();
+}
+
+
+int Heap::Available() {
+ if (!HasBeenSetup()) return 0;
+
+ return new_space_->Available() +
+ old_space_->Available() +
+ code_space_->Available() +
+ map_space_->Available();
+}
+
+
+bool Heap::HasBeenSetup() {
+ return new_space_ != NULL &&
+ old_space_ != NULL &&
+ code_space_ != NULL &&
+ map_space_ != NULL &&
+ lo_space_ != NULL;
+}
+
+
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
+ // Is global GC requested?
+ if (space != NEW_SPACE || FLAG_gc_global) {
+ Counters::gc_compactor_caused_by_request.Increment();
+ return MARK_COMPACTOR;
+ }
+
+ // Is enough data promoted to justify a global GC?
+ if (PromotedSpaceSize() > promoted_space_limit_) {
+ Counters::gc_compactor_caused_by_promoted_data.Increment();
+ return MARK_COMPACTOR;
+ }
+
+ // Have allocation in OLD and LO failed?
+ if (old_gen_exhausted_) {
+ Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+ return MARK_COMPACTOR;
+ }
+
+ // Is there enough space left in OLD to guarantee that a scavenge can
+ // succeed?
+ //
+ // Note that old_space_->MaxAvailable() undercounts the memory available
+ // for object promotion. It counts only the bytes that the memory
+ // allocator has not yet allocated from the OS and assigned to any space,
+ // and does not count available bytes already in the old space or code
+ // space. Undercounting is safe---we may get an unrequested full GC when
+ // a scavenge would have succeeded.
+ if (old_space_->MaxAvailable() <= new_space_->Size()) {
+ Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+ return MARK_COMPACTOR;
+ }
+
+ // Default
+ return SCAVENGER;
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+void Heap::ReportStatisticsBeforeGC() {
+ // Heap::ReportHeapStatistics will also log NewSpace statistics when
+ // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
+ // following logic is used to avoid double logging.
+#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+ if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
+ if (FLAG_heap_stats) {
+ ReportHeapStatistics("Before GC");
+ } else if (FLAG_log_gc) {
+ new_space_->ReportStatistics();
+ }
+ if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
+#elif defined(DEBUG)
+ if (FLAG_heap_stats) {
+ new_space_->CollectStatistics();
+ ReportHeapStatistics("Before GC");
+ new_space_->ClearHistograms();
+ }
+#elif defined(ENABLE_LOGGING_AND_PROFILING)
+ if (FLAG_log_gc) {
+ new_space_->CollectStatistics();
+ new_space_->ReportStatistics();
+ new_space_->ClearHistograms();
+ }
+#endif
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+void Heap::ReportStatisticsAfterGC() {
+ // Similar to the before GC, we use some complicated logic to ensure that
+ // NewSpace statistics are logged exactly once when --log-gc is turned on.
+#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+ if (FLAG_heap_stats) {
+ ReportHeapStatistics("After GC");
+ } else if (FLAG_log_gc) {
+ new_space_->ReportStatistics();
+ }
+#elif defined(DEBUG)
+ if (FLAG_heap_stats) ReportHeapStatistics("After GC");
+#elif defined(ENABLE_LOGGING_AND_PROFILING)
+ if (FLAG_log_gc) new_space_->ReportStatistics();
+#endif
+}
+#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+
+
+void Heap::GarbageCollectionPrologue() {
+ RegExpImpl::NewSpaceCollectionPrologue();
+#ifdef DEBUG
+ ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ allow_allocation(false);
+ gc_count_++;
+
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+
+ if (FLAG_gc_verbose) Print();
+
+ if (FLAG_print_rset) {
+ // By definition, code space does not have remembered set bits that we
+ // care about.
+ old_space_->PrintRSet();
+ map_space_->PrintRSet();
+ lo_space_->PrintRSet();
+ }
+#endif
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ ReportStatisticsBeforeGC();
+#endif
+}
+
+int Heap::SizeOfObjects() {
+ return new_space_->Size() +
+ old_space_->Size() +
+ code_space_->Size() +
+ map_space_->Size() +
+ lo_space_->Size();
+}
+
+void Heap::GarbageCollectionEpilogue() {
+#ifdef DEBUG
+ allow_allocation(true);
+ ZapFromSpace();
+
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+
+ if (FLAG_print_global_handles) GlobalHandles::Print();
+ if (FLAG_print_handles) PrintHandles();
+ if (FLAG_gc_verbose) Print();
+ if (FLAG_code_stats) ReportCodeStatistics("After GC");
+#endif
+
+ Counters::alive_after_last_gc.Set(SizeOfObjects());
+
+ SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_);
+ Counters::symbol_table_capacity.Set(symbol_table->Capacity());
+ Counters::number_of_symbols.Set(symbol_table->NumberOfElements());
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ ReportStatisticsAfterGC();
+#endif
+}
+
+
+// GCTracer collects and prints ONE line after each garbage collector
+// invocation IFF --trace_gc is used.
+
+class GCTracer BASE_EMBEDDED {
+ public:
+ GCTracer() : start_time_(0.0), start_size_(0.0) {
+ if (!FLAG_trace_gc) return;
+ start_time_ = OS::TimeCurrentMillis();
+ start_size_ = SizeOfHeapObjects();
+ }
+
+ ~GCTracer() {
+ if (!FLAG_trace_gc) return;
+ // Printf ONE line iff flag is set.
+ PrintF("%s %.1f -> %.1f MB, %d ms.\n",
+ CollectorString(),
+ start_size_, SizeOfHeapObjects(),
+ static_cast<int>(OS::TimeCurrentMillis() - start_time_));
+ }
+
+ // Sets the collector.
+ void set_collector(GarbageCollector collector) {
+ collector_ = collector;
+ }
+
+ private:
+
+ // Returns a string matching the collector.
+ const char* CollectorString() {
+ switch (collector_) {
+ case SCAVENGER:
+ return "Scavenge";
+ case MARK_COMPACTOR:
+ return MarkCompactCollector::HasCompacted() ? "Mark-compact"
+ : "Mark-sweep";
+ }
+ return "Unknown GC";
+ }
+
+ // Returns size of object in heap (in MB).
+ double SizeOfHeapObjects() {
+ return (static_cast<double>(Heap::SizeOfObjects())) / MB;
+ }
+
+ double start_time_; // Timestamp set in the constructor.
+ double start_size_; // Size of objects in heap set in constructor.
+ GarbageCollector collector_; // Type of collector.
+};
+
+
+
+bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
+ // The VM is in the GC state until exiting this function.
+ VMState state(GC);
+
+#ifdef DEBUG
+ // Reset the allocation timeout to the GC interval, but make sure to
+ // allow at least a few allocations after a collection. The reason
+ // for this is that we have a lot of allocation sequences and we
+ // assume that a garbage collection will allow the subsequent
+ // allocation attempts to go through.
+ allocation_timeout_ = Max(6, FLAG_gc_interval);
+#endif
+
+ { GCTracer tracer;
+ GarbageCollectionPrologue();
+
+ GarbageCollector collector = SelectGarbageCollector(space);
+ tracer.set_collector(collector);
+
+ StatsRate* rate = (collector == SCAVENGER)
+ ? &Counters::gc_scavenger
+ : &Counters::gc_compactor;
+ rate->Start();
+ PerformGarbageCollection(space, collector);
+ rate->Stop();
+
+ GarbageCollectionEpilogue();
+ }
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (FLAG_log_gc) HeapProfiler::WriteSample();
+#endif
+
+ switch (space) {
+ case NEW_SPACE:
+ return new_space_->Available() >= requested_size;
+ case OLD_SPACE:
+ return old_space_->Available() >= requested_size;
+ case CODE_SPACE:
+ return code_space_->Available() >= requested_size;
+ case MAP_SPACE:
+ return map_space_->Available() >= requested_size;
+ case LO_SPACE:
+ return lo_space_->Available() >= requested_size;
+ }
+ return false;
+}
+
+
+void Heap::PerformGarbageCollection(AllocationSpace space,
+ GarbageCollector collector) {
+ if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
+ ASSERT(!allocation_allowed_);
+ global_gc_prologue_callback_();
+ }
+
+ if (collector == MARK_COMPACTOR) {
+ MarkCompact();
+
+ int promoted_space_size = PromotedSpaceSize();
+ promoted_space_limit_ =
+ promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
+ old_gen_exhausted_ = false;
+
+ // If we have used the mark-compact collector to collect the new
+ // space, and it has not compacted the new space, we force a
+ // separate scavenge collection. THIS IS A HACK. It covers the
+ // case where (1) a new space collection was requested, (2) the
+ // collector selection policy selected the mark-compact collector,
+ // and (3) the mark-compact collector policy selected not to
+ // compact the new space. In that case, there is no more (usable)
+ // free space in the new space after the collection compared to
+ // before.
+ if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
+ Scavenge();
+ }
+ } else {
+ Scavenge();
+ }
+ Counters::objs_since_last_young.Set(0);
+
+ // Process weak handles post gc.
+ GlobalHandles::PostGarbageCollectionProcessing();
+
+ if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
+ ASSERT(!allocation_allowed_);
+ global_gc_epilogue_callback_();
+ }
+}
+
+
+void Heap::MarkCompact() {
+ gc_state_ = MARK_COMPACT;
+#ifdef DEBUG
+ mc_count_++;
+#endif
+ LOG(ResourceEvent("markcompact", "begin"));
+
+ MarkCompactPrologue();
+
+ MarkCompactCollector::CollectGarbage();
+
+ MarkCompactEpilogue();
+
+ LOG(ResourceEvent("markcompact", "end"));
+
+ gc_state_ = NOT_IN_GC;
+
+ Shrink();
+
+ Counters::objs_since_last_full.Set(0);
+}
+
+
+void Heap::MarkCompactPrologue() {
+ RegExpImpl::OldSpaceCollectionPrologue();
+ Top::MarkCompactPrologue();
+ ThreadManager::MarkCompactPrologue();
+}
+
+
+void Heap::MarkCompactEpilogue() {
+ Top::MarkCompactEpilogue();
+ ThreadManager::MarkCompactEpilogue();
+}
+
+
+Object* Heap::FindCodeObject(Address a) {
+ Object* obj = code_space_->FindObject(a);
+ if (obj->IsFailure()) {
+ obj = lo_space_->FindObject(a);
+ }
+ return obj;
+}
+
+
+// Helper class for copying HeapObjects
+class CopyVisitor: public ObjectVisitor {
+ public:
+
+ void VisitPointer(Object** p) {
+ CopyObject(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) CopyObject(p);
+ }
+
+ private:
+ void CopyObject(Object** p) {
+ if (!Heap::InFromSpace(*p)) return;
+ Heap::CopyObject(reinterpret_cast<HeapObject**>(p));
+ }
+};
+
+
+// Shared state read by the scavenge collector and set by CopyObject.
+static Address promoted_top = NULL;
+
+
+#ifdef DEBUG
+// Visitor class to verify pointers in code space do not point into
+// new space.
+class VerifyCodeSpacePointersVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object**end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
+ }
+ }
+ }
+};
+#endif
+
+void Heap::Scavenge() {
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ VerifyCodeSpacePointersVisitor v;
+ HeapObjectIterator it(code_space_);
+ while (it.has_next()) {
+ HeapObject* object = it.next();
+ if (object->IsCode()) {
+ Code::cast(object)->ConvertICTargetsFromAddressToObject();
+ }
+ object->Iterate(&v);
+ if (object->IsCode()) {
+ Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+ }
+ }
+ }
+#endif
+
+ gc_state_ = SCAVENGE;
+
+ // Implements Cheney's copying algorithm
+ LOG(ResourceEvent("scavenge", "begin"));
+
+ scavenge_count_++;
+ if (new_space_->Capacity() < new_space_->MaximumCapacity() &&
+ scavenge_count_ > new_space_growth_limit_) {
+ // Double the size of the new space, and double the limit. The next
+ // doubling attempt will occur after the current new_space_growth_limit_
+ // more collections.
+ // TODO(1240712): NewSpace::Double has a return value which is
+ // ignored here.
+ new_space_->Double();
+ new_space_growth_limit_ *= 2;
+ }
+
+ // Flip the semispaces. After flipping, to space is empty, from space has
+ // live objects.
+ new_space_->Flip();
+ new_space_->ResetAllocationInfo();
+
+ // We need to sweep newly copied objects which can be in either the to space
+ // or the old space. For to space objects, we use a mark. Newly copied
+ // objects lie between the mark and the allocation top. For objects
+ // promoted to old space, we write their addresses downward from the top of
+ // the new space. Sweeping newly promoted objects requires an allocation
+ // pointer and a mark. Note that the allocation pointer 'top' actually
+ // moves downward from the high address in the to space.
+ //
+ // There is guaranteed to be enough room at the top of the to space for the
+ // addresses of promoted objects: every object promoted frees up its size in
+ // bytes from the top of the new space, and objects are at least one pointer
+ // in size. Using the new space to record promoted addresses makes the
+ // scavenge collector agnostic to the allocation strategy (eg, linear or
+ // free-list) used in old space.
+ Address new_mark = new_space_->ToSpaceLow();
+ Address promoted_mark = new_space_->ToSpaceHigh();
+ promoted_top = new_space_->ToSpaceHigh();
+
+ CopyVisitor copy_visitor;
+ // Copy roots.
+ IterateRoots(©_visitor);
+
+ // Copy objects reachable from the old generation. By definition, there
+ // are no intergenerational pointers in code space.
+ IterateRSet(old_space_, &CopyObject);
+ IterateRSet(map_space_, &CopyObject);
+ lo_space_->IterateRSet(&CopyObject);
+
+ bool has_processed_weak_pointers = false;
+
+ while (true) {
+ ASSERT(new_mark <= new_space_->top());
+ ASSERT(promoted_mark >= promoted_top);
+
+ // Copy objects reachable from newly copied objects.
+ while (new_mark < new_space_->top() || promoted_mark > promoted_top) {
+ // Sweep newly copied objects in the to space. The allocation pointer
+ // can change during sweeping.
+ Address previous_top = new_space_->top();
+ SemiSpaceIterator new_it(new_space_, new_mark);
+ while (new_it.has_next()) {
+ new_it.next()->Iterate(©_visitor);
+ }
+ new_mark = previous_top;
+
+ // Sweep newly copied objects in the old space. The promotion 'top'
+ // pointer could change during sweeping.
+ previous_top = promoted_top;
+ for (Address current = promoted_mark - kPointerSize;
+ current >= previous_top;
+ current -= kPointerSize) {
+ HeapObject* object = HeapObject::cast(Memory::Object_at(current));
+ object->Iterate(©_visitor);
+ UpdateRSet(object);
+ }
+ promoted_mark = previous_top;
+ }
+
+ if (has_processed_weak_pointers) break; // We are done.
+ // Copy objects reachable from weak pointers.
+ GlobalHandles::IterateWeakRoots(©_visitor);
+ has_processed_weak_pointers = true;
+ }
+
+ // Set age mark.
+ new_space_->set_age_mark(new_mark);
+
+ LOG(ResourceEvent("scavenge", "end"));
+
+ gc_state_ = NOT_IN_GC;
+}
+
+
+void Heap::ClearRSetRange(Address start, int size_in_bytes) {
+ uint32_t start_bit;
+ Address start_word_address =
+ Page::ComputeRSetBitPosition(start, 0, &start_bit);
+ uint32_t end_bit;
+ Address end_word_address =
+ Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
+ 0,
+ &end_bit);
+
+ // We want to clear the bits in the starting word starting with the
+ // first bit, and in the ending word up to and including the last
+ // bit. Build a pair of bitmasks to do that.
+ uint32_t start_bitmask = start_bit - 1;
+ uint32_t end_bitmask = ~((end_bit << 1) - 1);
+
+ // If the start address and end address are the same, we mask that
+ // word once, otherwise mask the starting and ending word
+ // separately and all the ones in between.
+ if (start_word_address == end_word_address) {
+ Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
+ } else {
+ Memory::uint32_at(start_word_address) &= start_bitmask;
+ Memory::uint32_at(end_word_address) &= end_bitmask;
+ start_word_address += kIntSize;
+ memset(start_word_address, 0, end_word_address - start_word_address);
+ }
+}
+
+
+class UpdateRSetVisitor: public ObjectVisitor {
+ public:
+
+ void VisitPointer(Object** p) {
+ UpdateRSet(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Update a store into slots [start, end), used (a) to update remembered
+ // set when promoting a young object to old space or (b) to rebuild
+ // remembered sets after a mark-compact collection.
+ for (Object** p = start; p < end; p++) UpdateRSet(p);
+ }
+ private:
+
+ void UpdateRSet(Object** p) {
+ // The remembered set should not be set. It should be clear for objects
+ // newly copied to old space, and it is cleared before rebuilding in the
+ // mark-compact collector.
+ ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
+ if (Heap::InNewSpace(*p)) {
+ Page::SetRSet(reinterpret_cast<Address>(p), 0);
+ }
+ }
+};
+
+
+int Heap::UpdateRSet(HeapObject* obj) {
+ ASSERT(!InNewSpace(obj));
+ // Special handling of fixed arrays to iterate the body based on the start
+ // address and offset. Just iterating the pointers as in UpdateRSetVisitor
+ // will not work because Page::SetRSet needs to have the start of the
+ // object.
+ if (obj->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(obj);
+ int length = array->length();
+ for (int i = 0; i < length; i++) {
+ int offset = FixedArray::kHeaderSize + i * kPointerSize;
+ ASSERT(!Page::IsRSetSet(obj->address(), offset));
+ if (Heap::InNewSpace(array->get(i))) {
+ Page::SetRSet(obj->address(), offset);
+ }
+ }
+ } else if (!obj->IsCode()) {
+ // Skip code object, we know it does not contain inter-generational
+ // pointers.
+ UpdateRSetVisitor v;
+ obj->Iterate(&v);
+ }
+ return obj->Size();
+}
+
+
+void Heap::RebuildRSets() {
+ // By definition, we do not care about remembered set bits in code space.
+ map_space_->ClearRSet();
+ RebuildRSets(map_space_);
+
+ old_space_->ClearRSet();
+ RebuildRSets(old_space_);
+
+ Heap::lo_space_->ClearRSet();
+ RebuildRSets(lo_space_);
+}
+
+
+void Heap::RebuildRSets(PagedSpace* space) {
+ HeapObjectIterator it(space);
+ while (it.has_next()) Heap::UpdateRSet(it.next());
+}
+
+
+void Heap::RebuildRSets(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ while (it.has_next()) Heap::UpdateRSet(it.next());
+}
+
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+void Heap::RecordCopiedObject(HeapObject* obj) {
+ bool should_record = false;
+#ifdef DEBUG
+ should_record = FLAG_heap_stats;
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ should_record = should_record || FLAG_log_gc;
+#endif
+ if (should_record) {
+ if (new_space_->Contains(obj)) {
+ new_space_->RecordAllocation(obj);
+ } else {
+ new_space_->RecordPromotion(obj);
+ }
+ }
+}
+#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+
+
+HeapObject* Heap::MigrateObject(HeapObject** source_p,
+ HeapObject* target,
+ int size) {
+ void** src = reinterpret_cast<void**>((*source_p)->address());
+ void** dst = reinterpret_cast<void**>(target->address());
+ int counter = size/kPointerSize - 1;
+ do {
+ *dst++ = *src++;
+ } while (counter-- > 0);
+
+ // Set forwarding pointers, cannot use Map::cast because it asserts
+ // the value type to be Map.
+ (*source_p)->set_map(reinterpret_cast<Map*>(target));
+
+ // Update NewSpace stats if necessary.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ RecordCopiedObject(target);
+#endif
+
+ return target;
+}
+
+
+void Heap::CopyObject(HeapObject** p) {
+ ASSERT(InFromSpace(*p));
+
+ HeapObject* object = *p;
+
+ // We use the first word (where the map pointer usually is) of a
+ // HeapObject to record the forwarding pointer. A forwarding pointer can
+ // point to the old space, the code space, or the to space of the new
+ // generation.
+ HeapObject* first_word = object->map();
+
+ // If the first word (where the map pointer is) is not a map pointer, the
+ // object has already been copied. We do not use first_word->IsMap()
+ // because we know that first_word always has the heap object tag.
+ if (first_word->map()->instance_type() != MAP_TYPE) {
+ *p = first_word;
+ return;
+ }
+
+ // Optimization: Bypass ConsString objects where the right-hand side is
+ // Heap::empty_string(). We do not use object->IsConsString because we
+ // already know that object has the heap object tag.
+ InstanceType type = Map::cast(first_word)->instance_type();
+ if (type < FIRST_NONSTRING_TYPE &&
+ String::cast(object)->representation_tag() == kConsStringTag &&
+ ConsString::cast(object)->second() == Heap::empty_string()) {
+ object = HeapObject::cast(ConsString::cast(object)->first());
+ *p = object;
+ // After patching *p we have to repeat the checks that object is in the
+ // active semispace of the young generation and not already copied.
+ if (!InFromSpace(object)) return;
+ first_word = object->map();
+ if (first_word->map()->instance_type() != MAP_TYPE) {
+ *p = first_word;
+ return;
+ }
+ type = Map::cast(first_word)->instance_type();
+ }
+
+ int object_size = object->SizeFromMap(Map::cast(first_word));
+ Object* result;
+ // If the object should be promoted, we try to copy it to old space.
+ if (ShouldBePromoted(object->address(), object_size)) {
+ // Heap numbers and sequential strings are promoted to code space, all
+ // other object types are promoted to old space. We do not use
+ // object->IsHeapNumber() and object->IsSeqString() because we already
+ // know that object has the heap object tag.
+ bool has_pointers =
+ type != HEAP_NUMBER_TYPE &&
+ (type >= FIRST_NONSTRING_TYPE ||
+ String::cast(object)->representation_tag() != kSeqStringTag);
+ if (has_pointers) {
+ result = old_space_->AllocateRaw(object_size);
+ } else {
+ result = code_space_->AllocateRaw(object_size);
+ }
+
+ if (!result->IsFailure()) {
+ *p = MigrateObject(p, HeapObject::cast(result), object_size);
+ if (has_pointers) {
+ // Record the object's address at the top of the to space, to allow
+ // it to be swept by the scavenger.
+ promoted_top -= kPointerSize;
+ Memory::Object_at(promoted_top) = *p;
+ } else {
+#ifdef DEBUG
+ // Objects promoted to the code space should not have pointers to
+ // new space.
+ VerifyCodeSpacePointersVisitor v;
+ (*p)->Iterate(&v);
+#endif
+ }
+ return;
+ }
+ }
+
+ // The object should remain in new space or the old space allocation failed.
+ result = new_space_->AllocateRaw(object_size);
+ // Failed allocation at this point is utterly unexpected.
+ ASSERT(!result->IsFailure());
+ *p = MigrateObject(p, HeapObject::cast(result), object_size);
+}
+
+
+Object* Heap::AllocatePartialMap(InstanceType instance_type,
+ int instance_size) {
+ Object* result = AllocateRawMap(Map::kSize);
+ if (result->IsFailure()) return result;
+
+ // Map::cast cannot be used due to uninitialized map field.
+ reinterpret_cast<Map*>(result)->set_map(meta_map());
+ reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
+ reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+ reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
+ return result;
+}
+
+
+Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+ Object* result = AllocateRawMap(Map::kSize);
+ if (result->IsFailure()) return result;
+
+ Map* map = reinterpret_cast<Map*>(result);
+ map->set_map(meta_map());
+ map->set_instance_type(instance_type);
+ map->set_prototype(null_value());
+ map->set_constructor(null_value());
+ map->set_instance_size(instance_size);
+ map->set_instance_descriptors(DescriptorArray::cast(empty_fixed_array()));
+ map->set_code_cache(empty_fixed_array());
+ map->set_unused_property_fields(0);
+ map->set_bit_field(0);
+ return map;
+}
+
+
+bool Heap::CreateInitialMaps() {
+ Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
+ if (obj->IsFailure()) return false;
+
+ // Map::cast cannot be used due to uninitialized map field.
+ meta_map_ = reinterpret_cast<Map*>(obj);
+ meta_map()->set_map(meta_map());
+
+ obj = AllocatePartialMap(FIXED_ARRAY_TYPE, Array::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ fixed_array_map_ = Map::cast(obj);
+
+ obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
+ if (obj->IsFailure()) return false;
+ oddball_map_ = Map::cast(obj);
+
+ // Allocate the empty array
+ obj = AllocateEmptyFixedArray();
+ if (obj->IsFailure()) return false;
+ empty_fixed_array_ = FixedArray::cast(obj);
+
+ obj = Allocate(oddball_map(), CODE_SPACE);
+ if (obj->IsFailure()) return false;
+ null_value_ = obj;
+
+ // Fix the instance_descriptors for the existing maps.
+ DescriptorArray* empty_descriptors =
+ DescriptorArray::cast(empty_fixed_array());
+
+ meta_map()->set_instance_descriptors(empty_descriptors);
+ meta_map()->set_code_cache(empty_fixed_array());
+
+ fixed_array_map()->set_instance_descriptors(empty_descriptors);
+ fixed_array_map()->set_code_cache(empty_fixed_array());
+
+ oddball_map()->set_instance_descriptors(empty_descriptors);
+ oddball_map()->set_code_cache(empty_fixed_array());
+
+ // Fix prototype object for existing maps.
+ meta_map()->set_prototype(null_value());
+ meta_map()->set_constructor(null_value());
+
+ fixed_array_map()->set_prototype(null_value());
+ fixed_array_map()->set_constructor(null_value());
+ oddball_map()->set_prototype(null_value());
+ oddball_map()->set_constructor(null_value());
+
+ obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
+ if (obj->IsFailure()) return false;
+ heap_number_map_ = Map::cast(obj);
+
+ obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
+ if (obj->IsFailure()) return false;
+ proxy_map_ = Map::cast(obj);
+
+#define ALLOCATE_STRING_MAP(type, size, name) \
+ obj = AllocateMap(type, size); \
+ if (obj->IsFailure()) return false; \
+ name##_map_ = Map::cast(obj);
+ STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
+#undef ALLOCATE_STRING_MAP
+
+ obj = AllocateMap(SHORT_STRING_TYPE, TwoByteString::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ undetectable_short_string_map_ = Map::cast(obj);
+ undetectable_short_string_map_->set_is_undetectable();
+
+ obj = AllocateMap(MEDIUM_STRING_TYPE, TwoByteString::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ undetectable_medium_string_map_ = Map::cast(obj);
+ undetectable_medium_string_map_->set_is_undetectable();
+
+ obj = AllocateMap(LONG_STRING_TYPE, TwoByteString::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ undetectable_long_string_map_ = Map::cast(obj);
+ undetectable_long_string_map_->set_is_undetectable();
+
+ obj = AllocateMap(SHORT_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ undetectable_short_ascii_string_map_ = Map::cast(obj);
+ undetectable_short_ascii_string_map_->set_is_undetectable();
+
+ obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ undetectable_medium_ascii_string_map_ = Map::cast(obj);
+ undetectable_medium_ascii_string_map_->set_is_undetectable();
+
+ obj = AllocateMap(LONG_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ undetectable_long_ascii_string_map_ = Map::cast(obj);
+ undetectable_long_ascii_string_map_->set_is_undetectable();
+
+ obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ byte_array_map_ = Map::cast(obj);
+
+ obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ code_map_ = Map::cast(obj);
+
+ obj = AllocateMap(FILLER_TYPE, kPointerSize);
+ if (obj->IsFailure()) return false;
+ one_word_filler_map_ = Map::cast(obj);
+
+ obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
+ if (obj->IsFailure()) return false;
+ two_word_filler_map_ = Map::cast(obj);
+
+#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \
+ obj = AllocateMap(NAME##_TYPE, Name::kSize); \
+ if (obj->IsFailure()) return false; \
+ name##_map_ = Map::cast(obj);
+ STRUCT_LIST(ALLOCATE_STRUCT_MAP)
+#undef ALLOCATE_STRUCT_MAP
+
+ obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
+ if (obj->IsFailure()) return false;
+ hash_table_map_ = Map::cast(obj);
+
+ obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
+ if (obj->IsFailure()) return false;
+ context_map_ = Map::cast(obj);
+
+ obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
+ if (obj->IsFailure()) return false;
+ global_context_map_ = Map::cast(obj);
+
+ obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ if (obj->IsFailure()) return false;
+ boilerplate_function_map_ = Map::cast(obj);
+
+ obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
+ if (obj->IsFailure()) return false;
+ shared_function_info_map_ = Map::cast(obj);
+
+ return true;
+}
+
+
+Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate heap numbers in paged
+ // spaces.
+ STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+ AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
+ Object* result = AllocateRaw(HeapNumber::kSize, space);
+ if (result->IsFailure()) return result;
+
+ HeapObject::cast(result)->set_map(heap_number_map());
+ HeapNumber::cast(result)->set_value(value);
+ return result;
+}
+
+
+Object* Heap::AllocateHeapNumber(double value) {
+ // This version of AllocateHeapNumber is optimized for
+ // allocation in new space.
+ STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+ ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ Object* result = new_space_->AllocateRaw(HeapNumber::kSize);
+ if (result->IsFailure()) return result;
+ HeapObject::cast(result)->set_map(heap_number_map());
+ HeapNumber::cast(result)->set_value(value);
+ return result;
+}
+
+
+Object* Heap::CreateOddball(Map* map,
+ const char* to_string,
+ Object* to_number) {
+ Object* result = Allocate(map, CODE_SPACE);
+ if (result->IsFailure()) return result;
+ return Oddball::cast(result)->Initialize(to_string, to_number);
+}
+
+
+bool Heap::CreateApiObjects() {
+ Object* obj;
+
+ obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (obj->IsFailure()) return false;
+ neander_map_ = Map::cast(obj);
+
+ obj = Heap::AllocateJSObjectFromMap(neander_map_);
+ if (obj->IsFailure()) return false;
+ Object* elements = AllocateFixedArray(2);
+ if (elements->IsFailure()) return false;
+ FixedArray::cast(elements)->set(0, Smi::FromInt(0));
+ JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
+ message_listeners_ = JSObject::cast(obj);
+
+ obj = Heap::AllocateJSObjectFromMap(neander_map_);
+ if (obj->IsFailure()) return false;
+ elements = AllocateFixedArray(2);
+ if (elements->IsFailure()) return false;
+ FixedArray::cast(elements)->set(0, Smi::FromInt(0));
+ JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
+ debug_event_listeners_ = JSObject::cast(obj);
+
+ return true;
+}
+
+void Heap::CreateFixedStubs() {
+ // Here we create roots for fixed stubs. They are needed at GC
+ // for cooking and uncooking (check out frames.cc).
+ // The eliminates the need for doing dictionary lookup in the
+ // stub cache for these stubs.
+ HandleScope scope;
+ {
+ CEntryStub stub;
+ c_entry_code_ = *stub.GetCode();
+ }
+ {
+ CEntryDebugBreakStub stub;
+ c_entry_debug_break_code_ = *stub.GetCode();
+ }
+ {
+ JSEntryStub stub;
+ js_entry_code_ = *stub.GetCode();
+ }
+ {
+ JSConstructEntryStub stub;
+ js_construct_entry_code_ = *stub.GetCode();
+ }
+}
+
+
+bool Heap::CreateInitialObjects() {
+ Object* obj;
+
+ // The -0 value must be set before NumberFromDouble works.
+ obj = AllocateHeapNumber(-0.0, TENURED);
+ if (obj->IsFailure()) return false;
+ minus_zero_value_ = obj;
+ ASSERT(signbit(minus_zero_value_->Number()) != 0);
+
+ obj = AllocateHeapNumber(OS::nan_value(), TENURED);
+ if (obj->IsFailure()) return false;
+ nan_value_ = obj;
+
+ obj = NumberFromDouble(INFINITY, TENURED);
+ if (obj->IsFailure()) return false;
+ infinity_value_ = obj;
+
+ obj = NumberFromDouble(-INFINITY, TENURED);
+ if (obj->IsFailure()) return false;
+ negative_infinity_value_ = obj;
+
+ obj = NumberFromDouble(DBL_MAX, TENURED);
+ if (obj->IsFailure()) return false;
+ number_max_value_ = obj;
+
+ // C++ doesn't provide a constant for the smallest denormalized
+ // double (approx. 5e-324) but only the smallest normalized one
+ // which is somewhat bigger (approx. 2e-308). So we have to do
+ // this raw conversion hack.
+ uint64_t min_value_bits = 1L;
+ double min_value = *reinterpret_cast<double*>(&min_value_bits);
+ obj = NumberFromDouble(min_value, TENURED);
+ if (obj->IsFailure()) return false;
+ number_min_value_ = obj;
+
+ obj = Allocate(oddball_map(), CODE_SPACE);
+ if (obj->IsFailure()) return false;
+ undefined_value_ = obj;
+ ASSERT(!InNewSpace(undefined_value()));
+
+ // Allocate initial symbol table.
+ obj = SymbolTable::Allocate(kInitialSymbolTableSize);
+ if (obj->IsFailure()) return false;
+ symbol_table_ = obj;
+
+ // Assign the print strings for oddballs after creating symboltable.
+ Object* symbol = LookupAsciiSymbol("undefined");
+ if (symbol->IsFailure()) return false;
+ Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol));
+ Oddball::cast(undefined_value_)->set_to_number(nan_value_);
+
+ // Assign the print strings for oddballs after creating symboltable.
+ symbol = LookupAsciiSymbol("null");
+ if (symbol->IsFailure()) return false;
+ Oddball::cast(null_value_)->set_to_string(String::cast(symbol));
+ Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0));
+
+ // Allocate the null_value
+ obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
+ if (obj->IsFailure()) return false;
+
+ obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
+ if (obj->IsFailure()) return false;
+ true_value_ = obj;
+
+ obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
+ if (obj->IsFailure()) return false;
+ false_value_ = obj;
+
+ obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
+ if (obj->IsFailure()) return false;
+ the_hole_value_ = obj;
+
+ // Allocate the empty string.
+ obj = AllocateRawAsciiString(0, TENURED);
+ if (obj->IsFailure()) return false;
+ empty_string_ = String::cast(obj);
+
+#define SYMBOL_INITIALIZE(name, string) \
+ obj = LookupAsciiSymbol(string); \
+ if (obj->IsFailure()) return false; \
+ (name##_) = String::cast(obj);
+ SYMBOL_LIST(SYMBOL_INITIALIZE)
+#undef SYMBOL_INITIALIZE
+
+ // Allocate the proxy for __proto__.
+ obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
+ if (obj->IsFailure()) return false;
+ prototype_accessors_ = Proxy::cast(obj);
+
+ // Allocate the code_stubs dictionary.
+ obj = Dictionary::Allocate(4);
+ if (obj->IsFailure()) return false;
+ code_stubs_ = Dictionary::cast(obj);
+
+ // Allocate the non_monomorphic_cache used in stub-cache.cc
+ obj = Dictionary::Allocate(4);
+ if (obj->IsFailure()) return false;
+ non_monomorphic_cache_ = Dictionary::cast(obj);
+
+ CreateFixedStubs();
+
+ // Allocate the number->string conversion cache
+ obj = AllocateFixedArray(kNumberStringCacheSize * 2);
+ if (obj->IsFailure()) return false;
+ number_string_cache_ = FixedArray::cast(obj);
+
+ // Allocate cache for single character strings.
+ obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
+ if (obj->IsFailure()) return false;
+ single_character_string_cache_ = FixedArray::cast(obj);
+
+ // Allocate cache for external strings pointing to native source code.
+ obj = AllocateFixedArray(Natives::GetBuiltinsCount());
+ if (obj->IsFailure()) return false;
+ natives_source_cache_ = FixedArray::cast(obj);
+
+ return true;
+}
+
+
+static inline int double_get_hash(double d) {
+ DoubleRepresentation rep(d);
+ return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
+ (Heap::kNumberStringCacheSize - 1));
+}
+
+
+static inline int smi_get_hash(Smi* smi) {
+ return (smi->value() & (Heap::kNumberStringCacheSize - 1));
+}
+
+
+
+Object* Heap::GetNumberStringCache(Object* number) {
+ int hash;
+ if (number->IsSmi()) {
+ hash = smi_get_hash(Smi::cast(number));
+ } else {
+ hash = double_get_hash(number->Number());
+ }
+ Object* key = number_string_cache_->get(hash * 2);
+ if (key == number) {
+ return String::cast(number_string_cache_->get(hash * 2 + 1));
+ } else if (key->IsHeapNumber() &&
+ number->IsHeapNumber() &&
+ key->Number() == number->Number()) {
+ return String::cast(number_string_cache_->get(hash * 2 + 1));
+ }
+ return undefined_value();
+}
+
+
+void Heap::SetNumberStringCache(Object* number, String* string) {
+ int hash;
+ if (number->IsSmi()) {
+ hash = smi_get_hash(Smi::cast(number));
+ number_string_cache_->set(hash * 2, number, FixedArray::SKIP_WRITE_BARRIER);
+ } else {
+ hash = double_get_hash(number->Number());
+ number_string_cache_->set(hash * 2, number);
+ }
+ number_string_cache_->set(hash * 2 + 1, string);
+}
+
+
+Object* Heap::SmiOrNumberFromDouble(double value,
+ bool new_object,
+ PretenureFlag pretenure) {
+ // We need to distinguish the minus zero value and this cannot be
+ // done after conversion to int. Doing this by comparing bit
+ // patterns is faster than using fpclassify() et al.
+ static const DoubleRepresentation plus_zero(0.0);
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation nan(OS::nan_value());
+ ASSERT(minus_zero_value_ != NULL);
+ ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
+
+ DoubleRepresentation rep(value);
+ if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
+ if (rep.bits == minus_zero.bits) {
+ return new_object ? AllocateHeapNumber(-0.0, pretenure)
+ : minus_zero_value_;
+ }
+ if (rep.bits == nan.bits) {
+ return new_object
+ ? AllocateHeapNumber(OS::nan_value(), pretenure)
+ : nan_value_;
+ }
+
+ // Try to represent the value as a tagged small integer.
+ int int_value = FastD2I(value);
+ if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+ return Smi::FromInt(int_value);
+ }
+
+ // Materialize the value in the heap.
+ return AllocateHeapNumber(value, pretenure);
+}
+
+
+Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
+ return SmiOrNumberFromDouble(value,
+ true /* number object must be new */,
+ pretenure);
+}
+
+
+Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
+ return SmiOrNumberFromDouble(value,
+ false /* use preallocated NaN, -0.0 */,
+ pretenure);
+}
+
+
+Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate proxies in paged spaces.
+ STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
+ AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ Object* result = Allocate(proxy_map(), space);
+ if (result->IsFailure()) return result;
+
+ Proxy::cast(result)->set_proxy(proxy);
+ return result;
+}
+
+
+Object* Heap::AllocateSharedFunctionInfo(Object* name) {
+ Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
+ if (result->IsFailure()) return result;
+
+ SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
+ share->set_name(name);
+ Code* illegal = Builtins::builtin(Builtins::Illegal);
+ share->set_code(illegal);
+ share->set_expected_nof_properties(0);
+ share->set_length(0);
+ share->set_formal_parameter_count(0);
+ share->set_instance_class_name(Object_symbol());
+ share->set_function_data(undefined_value());
+ share->set_lazy_load_data(undefined_value());
+ share->set_script(undefined_value());
+ share->set_start_position_and_type(0);
+ share->set_debug_info(undefined_value());
+ return result;
+}
+
+
+Object* Heap::AllocateConsString(String* first, String* second) {
+ int length = first->length() + second->length();
+ bool is_ascii = first->is_ascii() && second->is_ascii();
+
+ // If the resulting string is small make a flat string.
+ if (length < ConsString::kMinLength) {
+ Object* result = is_ascii
+ ? AllocateRawAsciiString(length)
+ : AllocateRawTwoByteString(length);
+ if (result->IsFailure()) return result;
+ // Copy the characters into the new object.
+ String* string_result = String::cast(result);
+ int first_length = first->length();
+ // Copy the content of the first string.
+ for (int i = 0; i < first_length; i++) {
+ string_result->Set(i, first->Get(i));
+ }
+ int second_length = second->length();
+ // Copy the content of the first string.
+ for (int i = 0; i < second_length; i++) {
+ string_result->Set(first_length + i, second->Get(i));
+ }
+ return result;
+ }
+
+ Map* map;
+ if (length <= String::kMaxShortStringSize) {
+ map = is_ascii ? short_cons_ascii_string_map()
+ : short_cons_string_map();
+ } else if (length <= String::kMaxMediumStringSize) {
+ map = is_ascii ? medium_cons_ascii_string_map()
+ : medium_cons_string_map();
+ } else {
+ map = is_ascii ? long_cons_ascii_string_map()
+ : long_cons_string_map();
+ }
+
+ Object* result = Allocate(map, NEW_SPACE);
+ if (result->IsFailure()) return result;
+
+ ConsString* cons_string = ConsString::cast(result);
+ cons_string->set_first(first);
+ cons_string->set_second(second);
+ cons_string->set_length(length);
+
+ return result;
+}
+
+
+Object* Heap::AllocateSlicedString(String* buffer, int start, int end) {
+ int length = end - start;
+
+ // If the resulting string is small make a sub string.
+ if (end - start <= SlicedString::kMinLength) {
+ return Heap::AllocateSubString(buffer, start, end);
+ }
+
+ Map* map;
+ if (length <= String::kMaxShortStringSize) {
+ map = buffer->is_ascii() ? short_sliced_ascii_string_map()
+ : short_sliced_string_map();
+ } else if (length <= String::kMaxMediumStringSize) {
+ map = buffer->is_ascii() ? medium_sliced_ascii_string_map()
+ : medium_sliced_string_map();
+ } else {
+ map = buffer->is_ascii() ? long_sliced_ascii_string_map()
+ : long_sliced_string_map();
+ }
+
+ Object* result = Allocate(map, NEW_SPACE);
+ if (result->IsFailure()) return result;
+
+ SlicedString* sliced_string = SlicedString::cast(result);
+ sliced_string->set_buffer(buffer);
+ sliced_string->set_start(start);
+ sliced_string->set_length(length);
+
+ return result;
+}
+
+
+Object* Heap::AllocateSubString(String* buffer, int start, int end) {
+ int length = end - start;
+
+ // Make an attempt to flatten the buffer to reduce access time.
+ buffer->TryFlatten();
+
+ Object* result = buffer->is_ascii()
+ ? AllocateRawAsciiString(length)
+ : AllocateRawTwoByteString(length);
+ if (result->IsFailure()) return result;
+
+ // Copy the characters into the new object.
+ String* string_result = String::cast(result);
+ for (int i = 0; i < length; i++) {
+ string_result->Set(i, buffer->Get(start + i));
+ }
+ return result;
+}
+
+
+Object* Heap::AllocateExternalStringFromAscii(
+ ExternalAsciiString::Resource* resource) {
+ Map* map;
+ int length = resource->length();
+ if (length <= String::kMaxShortStringSize) {
+ map = short_external_ascii_string_map();
+ } else if (length <= String::kMaxMediumStringSize) {
+ map = medium_external_ascii_string_map();
+ } else {
+ map = long_external_ascii_string_map();
+ }
+
+ Object* result = Allocate(map, NEW_SPACE);
+ if (result->IsFailure()) return result;
+
+ ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
+ external_string->set_length(length);
+ external_string->set_resource(resource);
+
+ return result;
+}
+
+
+Object* Heap::AllocateExternalStringFromTwoByte(
+ ExternalTwoByteString::Resource* resource) {
+ Map* map;
+ int length = resource->length();
+ if (length <= String::kMaxShortStringSize) {
+ map = short_external_string_map();
+ } else if (length <= String::kMaxMediumStringSize) {
+ map = medium_external_string_map();
+ } else {
+ map = long_external_string_map();
+ }
+
+ Object* result = Allocate(map, NEW_SPACE);
+ if (result->IsFailure()) return result;
+
+ ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
+ external_string->set_length(length);
+ external_string->set_resource(resource);
+
+ return result;
+}
+
+
+Object* Heap:: LookupSingleCharacterStringFromCode(uint16_t code) {
+ if (code <= String::kMaxAsciiCharCode) {
+ Object* value = Heap::single_character_string_cache()->get(code);
+ if (value != Heap::undefined_value()) return value;
+ Object* result = Heap::AllocateRawAsciiString(1);
+ if (result->IsFailure()) return result;
+ String::cast(result)->Set(0, code);
+ Heap::single_character_string_cache()->set(code, result);
+ return result;
+ }
+ Object* result = Heap::AllocateRawTwoByteString(1);
+ if (result->IsFailure()) return result;
+ String::cast(result)->Set(0, code);
+ return result;
+}
+
+
+Object* Heap::AllocateByteArray(int length) {
+ int size = ByteArray::SizeFor(length);
+ AllocationSpace space = size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
+
+ Object* result = AllocateRaw(size, space);
+ if (result->IsFailure()) return result;
+
+ reinterpret_cast<Array*>(result)->set_map(byte_array_map());
+ reinterpret_cast<Array*>(result)->set_length(length);
+ return result;
+}
+
+
+Object* Heap::CreateCode(const CodeDesc& desc,
+ ScopeInfo<>* sinfo,
+ Code::Flags flags) {
+ // Compute size
+ int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
+ int sinfo_size = 0;
+ if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
+ int obj_size = Code::SizeFor(body_size, sinfo_size);
+ AllocationSpace space =
+ (obj_size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
+
+ Object* result = AllocateRaw(obj_size, space);
+ if (result->IsFailure()) return result;
+
+ // Initialize the object
+ HeapObject::cast(result)->set_map(code_map());
+ Code* code = Code::cast(result);
+ code->set_instruction_size(desc.instr_size);
+ code->set_relocation_size(desc.reloc_size);
+ code->set_sinfo_size(sinfo_size);
+ code->set_flags(flags);
+ code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
+ code->CopyFrom(desc); // migrate generated code
+ if (sinfo != NULL) sinfo->Serialize(code); // write scope info
+
+#ifdef DEBUG
+ code->Verify();
+#endif
+
+ CPU::FlushICache(code->instruction_start(), code->instruction_size());
+
+ return code;
+}
+
+
+Object* Heap::CopyCode(Code* code) {
+ // Allocate an object the same size as the code object.
+ int obj_size = code->Size();
+ AllocationSpace space =
+ (obj_size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
+ Object* result = AllocateRaw(obj_size, space);
+ if (result->IsFailure()) return result;
+
+ // Copy code object.
+ Address old_addr = code->address();
+ Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
+ memcpy(new_addr, old_addr, obj_size);
+
+ // Relocate the copy.
+ Code* new_code = Code::cast(result);
+ new_code->Relocate(new_addr - old_addr);
+
+ CPU::FlushICache(new_code->instruction_start(), new_code->instruction_size());
+
+ return new_code;
+}
+
+
+Object* Heap::Allocate(Map* map, AllocationSpace space) {
+ ASSERT(gc_state_ == NOT_IN_GC);
+ ASSERT(map->instance_type() != MAP_TYPE);
+ Object* result = AllocateRaw(map->instance_size(), space);
+ if (result->IsFailure()) return result;
+ HeapObject::cast(result)->set_map(map);
+ return result;
+}
+
+
+Object* Heap::InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype) {
+ ASSERT(!prototype->IsMap());
+ function->initialize_properties();
+ function->initialize_elements();
+ function->set_shared(shared);
+ function->set_prototype_or_initial_map(prototype);
+ function->set_context(undefined_value());
+ function->set_literals(empty_fixed_array());
+ return function;
+}
+
+
+Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
+ // Allocate the prototype.
+ Object* prototype =
+ AllocateJSObject(Top::context()->global_context()->object_function());
+ if (prototype->IsFailure()) return prototype;
+ // When creating the prototype for the function we must set its
+ // constructor to the function.
+ Object* result =
+ JSObject::cast(prototype)->SetProperty(constructor_symbol(),
+ function,
+ DONT_ENUM);
+ if (result->IsFailure()) return result;
+ return prototype;
+}
+
+
+Object* Heap::AllocateFunction(Map* function_map,
+ SharedFunctionInfo* shared,
+ Object* prototype) {
+ Object* result = Allocate(function_map, OLD_SPACE);
+ if (result->IsFailure()) return result;
+ return InitializeFunction(JSFunction::cast(result), shared, prototype);
+}
+
+
+Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
+ // This allocation is odd since allocate an argument object
+ // based on the arguments_boilerplate.
+ // We do this to ensure fast allocation and map sharing.
+
+ // This calls Copy directly rather than using Heap::AllocateRaw so we
+ // duplicate the check here.
+ ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+
+ JSObject* boilerplate =
+ Top::context()->global_context()->arguments_boilerplate();
+ Object* result = boilerplate->Copy();
+ if (result->IsFailure()) return result;
+
+ Object* obj = JSObject::cast(result)->properties();
+ FixedArray::cast(obj)->set(arguments_callee_index, callee);
+ FixedArray::cast(obj)->set(arguments_length_index, Smi::FromInt(length));
+
+ // Allocate the fixed array.
+ obj = Heap::AllocateFixedArray(length);
+ if (obj->IsFailure()) return obj;
+ JSObject::cast(result)->set_elements(FixedArray::cast(obj));
+
+ // Check the state of the object
+ ASSERT(JSObject::cast(result)->HasFastProperties());
+ ASSERT(JSObject::cast(result)->HasFastElements());
+
+ return result;
+}
+
+
+Object* Heap::AllocateInitialMap(JSFunction* fun) {
+ ASSERT(!fun->has_initial_map());
+
+ // First create a new map.
+ Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (map_obj->IsFailure()) return map_obj;
+
+ // Fetch or allocate prototype.
+ Object* prototype;
+ if (fun->has_instance_prototype()) {
+ prototype = fun->instance_prototype();
+ } else {
+ prototype = AllocateFunctionPrototype(fun);
+ if (prototype->IsFailure()) return prototype;
+ }
+ Map* map = Map::cast(map_obj);
+ map->set_unused_property_fields(fun->shared()->expected_nof_properties());
+ map->set_prototype(prototype);
+ return map;
+}
+
+
+void Heap::InitializeJSObjectFromMap(JSObject* obj,
+ FixedArray* properties,
+ Map* map) {
+ obj->set_properties(properties);
+ obj->initialize_elements();
+ // TODO(1240798): Initialize the object's body using valid initial values
+ // according to the object's initial map. For example, if the map's
+ // instance type is JS_ARRAY_TYPE, the length field should be initialized
+ // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
+ // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
+ // verification code has to cope with (temporarily) invalid objects. See
+ // for example, JSArray::JSArrayVerify).
+ obj->InitializeBody(map->instance_size());
+}
+
+
+Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
+ // JSFunctions should be allocated using AllocateFunction to be
+ // properly initialized.
+ ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
+
+ // Allocate the backing storage for the properties.
+ Object* properties = AllocatePropertyStorageForMap(map);
+ if (properties->IsFailure()) return properties;
+
+ // Allocate the JSObject.
+ AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
+ Object* obj = Allocate(map, space);
+ if (obj->IsFailure()) return obj;
+
+ // Initialize the JSObject.
+ InitializeJSObjectFromMap(JSObject::cast(obj),
+ FixedArray::cast(properties),
+ map);
+ return obj;
+}
+
+
+Object* Heap::AllocateJSObject(JSFunction* constructor,
+ PretenureFlag pretenure) {
+ // Allocate the initial map if absent.
+ if (!constructor->has_initial_map()) {
+ Object* initial_map = AllocateInitialMap(constructor);
+ if (initial_map->IsFailure()) return initial_map;
+ constructor->set_initial_map(Map::cast(initial_map));
+ Map::cast(initial_map)->set_constructor(constructor);
+ }
+ // Allocate the object based on the constructors initial map.
+ return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
+}
+
+
+Object* Heap::ReinitializeJSGlobalObject(JSFunction* constructor,
+ JSGlobalObject* object) {
+ // Allocate initial map if absent.
+ if (!constructor->has_initial_map()) {
+ Object* initial_map = AllocateInitialMap(constructor);
+ if (initial_map->IsFailure()) return initial_map;
+ constructor->set_initial_map(Map::cast(initial_map));
+ Map::cast(initial_map)->set_constructor(constructor);
+ }
+
+ Map* map = constructor->initial_map();
+
+ // Check that the already allocated object has the same size as
+ // objects allocated using the constructor.
+ ASSERT(map->instance_size() == object->map()->instance_size());
+
+ // Allocate the backing storage for the properties.
+ Object* properties = AllocatePropertyStorageForMap(map);
+ if (properties->IsFailure()) return properties;
+
+ // Reset the map for the object.
+ object->set_map(constructor->initial_map());
+
+ // Reinitialize the object from the constructor map.
+ InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
+ return object;
+}
+
+
+Object* Heap::AllocateStringFromAscii(Vector<const char> string,
+ PretenureFlag pretenure) {
+ Object* result = AllocateRawAsciiString(string.length(), pretenure);
+ if (result->IsFailure()) return result;
+
+ // Copy the characters into the new object.
+ AsciiString* string_result = AsciiString::cast(result);
+ for (int i = 0; i < string.length(); i++) {
+ string_result->AsciiStringSet(i, string[i]);
+ }
+ return result;
+}
+
+
+Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
+ PretenureFlag pretenure) {
+ // Count the number of characters in the UTF-8 string and check if
+ // it is an ASCII string.
+ Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
+ decoder->Reset(string.start(), string.length());
+ int chars = 0;
+ bool is_ascii = true;
+ while (decoder->has_more()) {
+ uc32 r = decoder->GetNext();
+ if (r > String::kMaxAsciiCharCode) is_ascii = false;
+ chars++;
+ }
+
+ // If the string is ascii, we do not need to convert the characters
+ // since UTF8 is backwards compatible with ascii.
+ if (is_ascii) return AllocateStringFromAscii(string, pretenure);
+
+ Object* result = AllocateRawTwoByteString(chars, pretenure);
+ if (result->IsFailure()) return result;
+
+ // Convert and copy the characters into the new object.
+ String* string_result = String::cast(result);
+ decoder->Reset(string.start(), string.length());
+ for (int i = 0; i < chars; i++) {
+ uc32 r = decoder->GetNext();
+ string_result->Set(i, r);
+ }
+ return result;
+}
+
+
+Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
+ PretenureFlag pretenure) {
+ // Check if the string is an ASCII string.
+ int i = 0;
+ while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
+
+ Object* result;
+ if (i == string.length()) { // It's an ASCII string.
+ result = AllocateRawAsciiString(string.length(), pretenure);
+ } else { // It's not an ASCII string.
+ result = AllocateRawTwoByteString(string.length(), pretenure);
+ }
+ if (result->IsFailure()) return result;
+
+ // Copy the characters into the new object, which may be either ASCII or
+ // UTF-16.
+ String* string_result = String::cast(result);
+ for (int i = 0; i < string.length(); i++) {
+ string_result->Set(i, string[i]);
+ }
+ return result;
+}
+
+
+Map* Heap::SymbolMapForString(String* string) {
+ // If the string is in new space it cannot be used as a symbol.
+ if (InNewSpace(string)) return NULL;
+
+ // Find the corresponding symbol map for strings.
+ Map* map = string->map();
+
+ if (map == short_ascii_string_map()) return short_ascii_symbol_map();
+ if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
+ if (map == long_ascii_string_map()) return long_ascii_symbol_map();
+
+ if (map == short_string_map()) return short_symbol_map();
+ if (map == medium_string_map()) return medium_symbol_map();
+ if (map == long_string_map()) return long_symbol_map();
+
+ if (map == short_cons_string_map()) return short_cons_symbol_map();
+ if (map == medium_cons_string_map()) return medium_cons_symbol_map();
+ if (map == long_cons_string_map()) return long_cons_symbol_map();
+
+ if (map == short_cons_ascii_string_map()) {
+ return short_cons_ascii_symbol_map();
+ }
+ if (map == medium_cons_ascii_string_map()) {
+ return medium_cons_ascii_symbol_map();
+ }
+ if (map == long_cons_ascii_string_map()) {
+ return long_cons_ascii_symbol_map();
+ }
+
+ if (map == short_sliced_string_map()) return short_sliced_symbol_map();
+ if (map == medium_sliced_string_map()) return short_sliced_symbol_map();
+ if (map == long_sliced_string_map()) return short_sliced_symbol_map();
+
+ if (map == short_sliced_ascii_string_map()) {
+ return short_sliced_ascii_symbol_map();
+ }
+ if (map == medium_sliced_ascii_string_map()) {
+ return short_sliced_ascii_symbol_map();
+ }
+ if (map == long_sliced_ascii_string_map()) {
+ return short_sliced_ascii_symbol_map();
+ }
+
+ if (map == short_external_string_map()) return short_external_string_map();
+ if (map == medium_external_string_map()) return medium_external_string_map();
+ if (map == long_external_string_map()) return long_external_string_map();
+
+ if (map == short_external_ascii_string_map()) {
+ return short_external_ascii_string_map();
+ }
+ if (map == medium_external_ascii_string_map()) {
+ return medium_external_ascii_string_map();
+ }
+ if (map == long_external_ascii_string_map()) {
+ return long_external_ascii_string_map();
+ }
+
+ // No match found.
+ return NULL;
+}
+
+
+Object* Heap::AllocateSymbol(unibrow::CharacterStream* buffer,
+ int chars,
+ int hash) {
+ // Ensure the chars matches the number of characters in the buffer.
+ ASSERT(static_cast<unsigned>(chars) == buffer->Length());
+ // Determine whether the string is ascii.
+ bool is_ascii = true;
+ while (buffer->has_more()) {
+ if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
+ }
+ buffer->Rewind();
+
+ // Compute map and object size.
+ int size;
+ Map* map;
+
+ if (is_ascii) {
+ if (chars <= String::kMaxShortStringSize) {
+ map = short_ascii_symbol_map();
+ } else if (chars <= String::kMaxMediumStringSize) {
+ map = medium_ascii_symbol_map();
+ } else {
+ map = long_ascii_symbol_map();
+ }
+ size = AsciiString::SizeFor(chars);
+ } else {
+ if (chars <= String::kMaxShortStringSize) {
+ map = short_symbol_map();
+ } else if (chars <= String::kMaxMediumStringSize) {
+ map = medium_symbol_map();
+ } else {
+ map = long_symbol_map();
+ }
+ size = TwoByteString::SizeFor(chars);
+ }
+
+ // Allocate string.
+ AllocationSpace space = (size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
+ Object* result = AllocateRaw(size, space);
+ if (result->IsFailure()) return result;
+
+ reinterpret_cast<HeapObject*>(result)->set_map(map);
+ // The hash value contains the length of the string.
+ String::cast(result)->set_length_field(hash);
+
+ ASSERT_EQ(size, String::cast(result)->Size());
+
+ // Fill in the characters.
+ for (int i = 0; i < chars; i++) {
+ String::cast(result)->Set(i, buffer->GetNext());
+ }
+ return result;
+}
+
+
+Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
+ AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
+ int size = AsciiString::SizeFor(length);
+ if (size > MaxHeapObjectSize()) {
+ space = LO_SPACE;
+ }
+
+ // Use AllocateRaw rather than Allocate because the object's size cannot be
+ // determined from the map.
+ Object* result = AllocateRaw(size, space);
+ if (result->IsFailure()) return result;
+
+ // Determine the map based on the string's length.
+ Map* map;
+ if (length <= String::kMaxShortStringSize) {
+ map = short_ascii_string_map();
+ } else if (length <= String::kMaxMediumStringSize) {
+ map = medium_ascii_string_map();
+ } else {
+ map = long_ascii_string_map();
+ }
+
+ // Partially initialize the object.
+ HeapObject::cast(result)->set_map(map);
+ String::cast(result)->set_length(length);
+ ASSERT_EQ(size, HeapObject::cast(result)->Size());
+ return result;
+}
+
+
+Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
+ AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
+ int size = TwoByteString::SizeFor(length);
+ if (size > MaxHeapObjectSize()) {
+ space = LO_SPACE;
+ }
+
+ // Use AllocateRaw rather than Allocate because the object's size cannot be
+ // determined from the map.
+ Object* result = AllocateRaw(size, space);
+ if (result->IsFailure()) return result;
+
+ // Determine the map based on the string's length.
+ Map* map;
+ if (length <= String::kMaxShortStringSize) {
+ map = short_string_map();
+ } else if (length <= String::kMaxMediumStringSize) {
+ map = medium_string_map();
+ } else {
+ map = long_string_map();
+ }
+
+ // Partially initialize the object.
+ HeapObject::cast(result)->set_map(map);
+ String::cast(result)->set_length(length);
+ ASSERT_EQ(size, HeapObject::cast(result)->Size());
+ return result;
+}
+
+
+Object* Heap::AllocateEmptyFixedArray() {
+ int size = FixedArray::SizeFor(0);
+ Object* result = AllocateRaw(size, CODE_SPACE);
+ if (result->IsFailure()) return result;
+ // Initialize the object.
+ reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+ reinterpret_cast<Array*>(result)->set_length(0);
+ return result;
+}
+
+
+Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+ ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length == 0) return empty_fixed_array();
+
+ int size = FixedArray::SizeFor(length);
+ Object* result;
+ if (size > MaxHeapObjectSize()) {
+ result = lo_space_->AllocateRawFixedArray(size);
+ } else {
+ AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ result = AllocateRaw(size, space);
+ }
+ if (result->IsFailure()) return result;
+
+ // Initialize the object.
+ reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+ FixedArray* array = FixedArray::cast(result);
+ array->set_length(length);
+ for (int index = 0; index < length; index++) array->set_undefined(index);
+ return array;
+}
+
+
+Object* Heap::AllocateFixedArrayWithHoles(int length) {
+ if (length == 0) return empty_fixed_array();
+ int size = FixedArray::SizeFor(length);
+ Object* result = size > MaxHeapObjectSize()
+ ? lo_space_->AllocateRawFixedArray(size)
+ : AllocateRaw(size, NEW_SPACE);
+ if (result->IsFailure()) return result;
+
+ // Initialize the object.
+ reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+ FixedArray* array = FixedArray::cast(result);
+ array->set_length(length);
+ for (int index = 0; index < length; index++) array->set_the_hole(index);
+ return array;
+}
+
+
+Object* Heap::AllocateHashTable(int length) {
+ Object* result = Heap::AllocateFixedArray(length);
+ if (result->IsFailure()) return result;
+ reinterpret_cast<Array*>(result)->set_map(hash_table_map());
+ ASSERT(result->IsDictionary());
+ return result;
+}
+
+
+Object* Heap::AllocateGlobalContext() {
+ Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
+ if (result->IsFailure()) return result;
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map(global_context_map());
+ ASSERT(context->IsGlobalContext());
+ ASSERT(result->IsContext());
+ return result;
+}
+
+
+Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
+ ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
+ Object* result = Heap::AllocateFixedArray(length);
+ if (result->IsFailure()) return result;
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map(context_map());
+ context->set_closure(function);
+ context->set_fcontext(context);
+ context->set_previous(NULL);
+ context->set_extension(NULL);
+ context->set_global(function->context()->global());
+ ASSERT(!context->IsGlobalContext());
+ ASSERT(context->is_function_context());
+ ASSERT(result->IsContext());
+ return result;
+}
+
+
+Object* Heap::AllocateWithContext(Context* previous, JSObject* extension) {
+ Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
+ if (result->IsFailure()) return result;
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map(context_map());
+ context->set_closure(previous->closure());
+ context->set_fcontext(previous->fcontext());
+ context->set_previous(previous);
+ context->set_extension(extension);
+ context->set_global(previous->global());
+ ASSERT(!context->IsGlobalContext());
+ ASSERT(!context->is_function_context());
+ ASSERT(result->IsContext());
+ return result;
+}
+
+
+Object* Heap::AllocateStruct(InstanceType type) {
+ Map* map;
+ switch (type) {
+#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
+STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ default:
+ UNREACHABLE();
+ return Failure::InternalError();
+ }
+ int size = map->instance_size();
+ AllocationSpace space =
+ (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_SPACE;
+ Object* result = Heap::Allocate(map, space);
+ if (result->IsFailure()) return result;
+ Struct::cast(result)->InitializeBody(size);
+ return result;
+}
+
+
+#ifdef DEBUG
+
+void Heap::Print() {
+ if (!HasBeenSetup()) return;
+ Top::PrintStack();
+ new_space_->Print();
+ old_space_->Print();
+ code_space_->Print();
+ map_space_->Print();
+ lo_space_->Print();
+}
+
+
+void Heap::ReportCodeStatistics(const char* title) {
+ PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
+ PagedSpace::ResetCodeStatistics();
+ // We do not look for code in new space, map space, or old space. If code
+ // somehow ends up in those spaces, we would miss it here.
+ code_space_->CollectCodeStatistics();
+ lo_space_->CollectCodeStatistics();
+ PagedSpace::ReportCodeStatistics();
+}
+
+
+// This function expects that NewSpace's allocated objects histogram is
+// populated (via a call to CollectStatistics or else as a side effect of a
+// just-completed scavenge collection).
+void Heap::ReportHeapStatistics(const char* title) {
+ USE(title);
+ PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
+ title, gc_count_);
+ PrintF("mark-compact GC : %d\n", mc_count_);
+ PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
+
+ PrintF("\n");
+ PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
+ GlobalHandles::PrintStats();
+ PrintF("\n");
+
+ PrintF("Heap statistics : ");
+ MemoryAllocator::ReportStatistics();
+ PrintF("To space : ");
+ new_space_->ReportStatistics();
+ PrintF("Old space : ");
+ old_space_->ReportStatistics();
+ PrintF("Code space : ");
+ code_space_->ReportStatistics();
+ PrintF("Map space : ");
+ map_space_->ReportStatistics();
+ PrintF("Large object space : ");
+ lo_space_->ReportStatistics();
+ PrintF(">>>>>> ========================================= >>>>>>\n");
+}
+
+#endif // DEBUG
+
+bool Heap::Contains(HeapObject* value) {
+ return Contains(value->address());
+}
+
+
+bool Heap::Contains(Address addr) {
+ if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ return HasBeenSetup() &&
+ (new_space_->ToSpaceContains(addr) ||
+ old_space_->Contains(addr) ||
+ code_space_->Contains(addr) ||
+ map_space_->Contains(addr) ||
+ lo_space_->SlowContains(addr));
+}
+
+
+bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
+ return InSpace(value->address(), space);
+}
+
+
+bool Heap::InSpace(Address addr, AllocationSpace space) {
+ if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (!HasBeenSetup()) return false;
+
+ switch (space) {
+ case NEW_SPACE:
+ return new_space_->ToSpaceContains(addr);
+ case OLD_SPACE:
+ return old_space_->Contains(addr);
+ case CODE_SPACE:
+ return code_space_->Contains(addr);
+ case MAP_SPACE:
+ return map_space_->Contains(addr);
+ case LO_SPACE:
+ return lo_space_->SlowContains(addr);
+ }
+
+ return false;
+}
+
+
+#ifdef DEBUG
+void Heap::Verify() {
+ ASSERT(HasBeenSetup());
+
+ VerifyPointersVisitor visitor;
+ Heap::IterateRoots(&visitor);
+
+ Heap::new_space_->Verify();
+ Heap::old_space_->Verify();
+ Heap::code_space_->Verify();
+ Heap::map_space_->Verify();
+ Heap::lo_space_->Verify();
+}
+#endif // DEBUG
+
+
+Object* Heap::LookupSymbol(Vector<const char> string) {
+ Object* symbol = NULL;
+ Object* new_table =
+ SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol);
+ if (new_table->IsFailure()) return new_table;
+ symbol_table_ = new_table;
+ ASSERT(symbol != NULL);
+ return symbol;
+}
+
+
+Object* Heap::LookupSymbol(String* string) {
+ if (string->IsSymbol()) return string;
+ Object* symbol = NULL;
+ Object* new_table =
+ SymbolTable::cast(symbol_table_)->LookupString(string, &symbol);
+ if (new_table->IsFailure()) return new_table;
+ symbol_table_ = new_table;
+ ASSERT(symbol != NULL);
+ return symbol;
+}
+
+
+#ifdef DEBUG
+void Heap::ZapFromSpace() {
+ ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
+ for (Address a = new_space_->FromSpaceLow();
+ a < new_space_->FromSpaceHigh();
+ a += kPointerSize) {
+ Memory::Address_at(a) = kFromSpaceZapValue;
+ }
+}
+#endif // DEBUG
+
+
+void Heap::IterateRSetRange(Address object_start,
+ Address object_end,
+ Address rset_start,
+ ObjectSlotCallback copy_object_func) {
+ Address object_address = object_start;
+ Address rset_address = rset_start;
+
+ // Loop over all the pointers in [object_start, object_end).
+ while (object_address < object_end) {
+ uint32_t rset_word = Memory::uint32_at(rset_address);
+
+ if (rset_word != 0) {
+ // Bits were set.
+ uint32_t result_rset = rset_word;
+
+ // Loop over all the bits in the remembered set word. Though
+ // remembered sets are sparse, faster (eg, binary) search for
+ // set bits does not seem to help much here.
+ for (int bit_offset = 0; bit_offset < kBitsPerInt; bit_offset++) {
+ uint32_t bitmask = 1 << bit_offset;
+ // Do not dereference pointers at or past object_end.
+ if ((rset_word & bitmask) != 0 && object_address < object_end) {
+ Object** object_p = reinterpret_cast<Object**>(object_address);
+ if (Heap::InFromSpace(*object_p)) {
+ copy_object_func(reinterpret_cast<HeapObject**>(object_p));
+ }
+ // If this pointer does not need to be remembered anymore, clear
+ // the remembered set bit.
+ if (!Heap::InToSpace(*object_p)) result_rset &= ~bitmask;
+ }
+ object_address += kPointerSize;
+ }
+
+ // Update the remembered set if it has changed.
+ if (result_rset != rset_word) {
+ Memory::uint32_at(rset_address) = result_rset;
+ }
+ } else {
+ // No bits in the word were set. This is the common case.
+ object_address += kPointerSize * kBitsPerInt;
+ }
+
+ rset_address += kIntSize;
+ }
+}
+
+
+void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
+ ASSERT(Page::is_rset_in_use());
+ ASSERT(space == old_space_ || space == map_space_);
+
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* page = it.next();
+ IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
+ page->RSetStart(), copy_object_func);
+ }
+}
+
+
+#ifdef DEBUG
+#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
+#else
+#define SYNCHRONIZE_TAG(tag)
+#endif
+
+void Heap::IterateRoots(ObjectVisitor* v) {
+ IterateStrongRoots(v);
+ v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_));
+ SYNCHRONIZE_TAG("symbol_table");
+}
+
+
+void Heap::IterateStrongRoots(ObjectVisitor* v) {
+#define ROOT_ITERATE(type, name) \
+ v->VisitPointer(reinterpret_cast<Object**>(&name##_));
+ STRONG_ROOT_LIST(ROOT_ITERATE);
+#undef ROOT_ITERATE
+ SYNCHRONIZE_TAG("strong_root_list");
+
+#define STRUCT_MAP_ITERATE(NAME, Name, name) \
+ v->VisitPointer(reinterpret_cast<Object**>(&name##_map_));
+ STRUCT_LIST(STRUCT_MAP_ITERATE);
+#undef STRUCT_MAP_ITERATE
+ SYNCHRONIZE_TAG("struct_map");
+
+#define SYMBOL_ITERATE(name, string) \
+ v->VisitPointer(reinterpret_cast<Object**>(&name##_));
+ SYMBOL_LIST(SYMBOL_ITERATE)
+#undef SYMBOL_ITERATE
+ SYNCHRONIZE_TAG("symbol");
+
+ Bootstrapper::Iterate(v);
+ SYNCHRONIZE_TAG("bootstrapper");
+ Top::Iterate(v);
+ SYNCHRONIZE_TAG("top");
+ Debug::Iterate(v);
+ SYNCHRONIZE_TAG("debug");
+
+ // Iterate over local handles in handle scopes.
+ HandleScopeImplementer::Iterate(v);
+ SYNCHRONIZE_TAG("handlescope");
+
+ // Iterate over the builtin code objects and code stubs in the heap. Note
+ // that it is not strictly necessary to iterate over code objects on
+ // scavenge collections. We still do it here because this same function
+ // is used by the mark-sweep collector and the deserializer.
+ Builtins::IterateBuiltins(v);
+ SYNCHRONIZE_TAG("builtins");
+
+ // Iterate over global handles.
+ GlobalHandles::IterateRoots(v);
+ SYNCHRONIZE_TAG("globalhandles");
+
+ // Iterate over pointers being held by inactive threads.
+ ThreadManager::Iterate(v);
+ SYNCHRONIZE_TAG("threadmanager");
+}
+#undef SYNCHRONIZE_TAG
+
+
+// Flag is set when the heap has been configured. The heap can be repeatedly
+// configured through the API until it is setup.
+static bool heap_configured = false;
+
+// TODO(1236194): Since the heap size is configurable on the command line
+// and through the API, we should gracefully handle the case that the heap
+// size is not big enough to fit all the initial objects.
+bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
+ if (HasBeenSetup()) return false;
+
+ if (semispace_size > 0) semispace_size_ = semispace_size;
+ if (old_gen_size > 0) old_generation_size_ = old_gen_size;
+
+ // The new space size must be a power of two to support single-bit testing
+ // for containment.
+ semispace_size_ = NextPowerOf2(semispace_size_);
+ initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
+ young_generation_size_ = 2 * semispace_size_;
+
+ // The old generation is paged.
+ old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
+
+ heap_configured = true;
+ return true;
+}
+
+
+int Heap::PromotedSpaceSize() {
+ return old_space_->Size()
+ + code_space_->Size()
+ + map_space_->Size()
+ + lo_space_->Size();
+}
+
+
+bool Heap::Setup(bool create_heap_objects) {
+ // Initialize heap spaces and initial maps and objects. Whenever something
+ // goes wrong, just return false. The caller should check the results and
+ // call Heap::TearDown() to release allocated memory.
+ //
+ // If the heap is not yet configured (eg, through the API), configure it.
+ // Configuration is based on the flags new-space-size (really the semispace
+ // size) and old-space-size if set or the initial values of semispace_size_
+ // and old_generation_size_ otherwise.
+ if (!heap_configured) {
+ if (!ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size)) return false;
+ }
+
+ // Setup memory allocator and allocate an initial chunk of memory. The
+ // initial chunk is double the size of the new space to ensure that we can
+ // find a pair of semispaces that are contiguous and aligned to their size.
+ if (!MemoryAllocator::Setup(MaxCapacity())) return false;
+ void* chunk
+ = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
+ if (chunk == NULL) return false;
+
+ // Put the initial chunk of the old space at the start of the initial
+ // chunk, then the two new space semispaces, then the initial chunk of
+ // code space. Align the pair of semispaces to their size, which must be
+ // a power of 2.
+ ASSERT(IsPowerOf2(young_generation_size_));
+ Address old_space_start = reinterpret_cast<Address>(chunk);
+ Address new_space_start = RoundUp(old_space_start, young_generation_size_);
+ Address code_space_start = new_space_start + young_generation_size_;
+ int old_space_size = new_space_start - old_space_start;
+ int code_space_size = young_generation_size_ - old_space_size;
+
+ // Initialize new space.
+ new_space_ = new NewSpace(initial_semispace_size_, semispace_size_);
+ if (new_space_ == NULL) return false;
+ if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
+
+ // Initialize old space, set the maximum capacity to the old generation
+ // size.
+ old_space_ = new OldSpace(old_generation_size_, OLD_SPACE);
+ if (old_space_ == NULL) return false;
+ if (!old_space_->Setup(old_space_start, old_space_size)) return false;
+
+ // Initialize the code space, set its maximum capacity to the old
+ // generation size.
+ code_space_ = new OldSpace(old_generation_size_, CODE_SPACE);
+ if (code_space_ == NULL) return false;
+ if (!code_space_->Setup(code_space_start, code_space_size)) return false;
+
+ // Initialize map space.
+ map_space_ = new MapSpace(kMaxMapSpaceSize);
+ if (map_space_ == NULL) return false;
+ // Setting up a paged space without giving it a virtual memory range big
+ // enough to hold at least a page will cause it to allocate.
+ if (!map_space_->Setup(NULL, 0)) return false;
+
+ lo_space_ = new LargeObjectSpace();
+ if (lo_space_ == NULL) return false;
+ if (!lo_space_->Setup()) return false;
+
+ if (create_heap_objects) {
+ // Create initial maps.
+ if (!CreateInitialMaps()) return false;
+ if (!CreateApiObjects()) return false;
+
+ // Create initial objects
+ if (!CreateInitialObjects()) return false;
+ }
+
+ LOG(IntEvent("heap-capacity", Capacity()));
+ LOG(IntEvent("heap-available", Available()));
+
+ return true;
+}
+
+
+void Heap::TearDown() {
+ GlobalHandles::TearDown();
+
+ if (new_space_ != NULL) {
+ new_space_->TearDown();
+ delete new_space_;
+ new_space_ = NULL;
+ }
+
+ if (old_space_ != NULL) {
+ old_space_->TearDown();
+ delete old_space_;
+ old_space_ = NULL;
+ }
+
+ if (code_space_ != NULL) {
+ code_space_->TearDown();
+ delete code_space_;
+ code_space_ = NULL;
+ }
+
+ if (map_space_ != NULL) {
+ map_space_->TearDown();
+ delete map_space_;
+ map_space_ = NULL;
+ }
+
+ if (lo_space_ != NULL) {
+ lo_space_->TearDown();
+ delete lo_space_;
+ lo_space_ = NULL;
+ }
+
+ MemoryAllocator::TearDown();
+}
+
+
+void Heap::Shrink() {
+ // Try to shrink map, old, and code spaces.
+ map_space_->Shrink();
+ old_space_->Shrink();
+ code_space_->Shrink();
+}
+
+
+#ifdef DEBUG
+
+class PrintHandleVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++)
+ PrintF(" handle %p to %p\n", p, *p);
+ }
+};
+
+void Heap::PrintHandles() {
+ PrintF("Handles:\n");
+ PrintHandleVisitor v;
+ HandleScopeImplementer::Iterate(&v);
+}
+
+#endif
+
+
+HeapIterator::HeapIterator() {
+ Init();
+}
+
+
+HeapIterator::~HeapIterator() {
+ Shutdown();
+}
+
+
+void HeapIterator::Init() {
+ // Start the iteration.
+ space_iterator_ = new SpaceIterator();
+ object_iterator_ = space_iterator_->next();
+}
+
+
+void HeapIterator::Shutdown() {
+ // Make sure the last iterator is deallocated.
+ delete space_iterator_;
+ space_iterator_ = NULL;
+ object_iterator_ = NULL;
+}
+
+
+bool HeapIterator::has_next() {
+ // No iterator means we are done.
+ if (object_iterator_ == NULL) return false;
+
+ if (object_iterator_->has_next_object()) {
+ // If the current iterator has more objects we are fine.
+ return true;
+ } else {
+ // Go though the spaces looking for one that has objects.
+ while (space_iterator_->has_next()) {
+ object_iterator_ = space_iterator_->next();
+ if (object_iterator_->has_next_object()) {
+ return true;
+ }
+ }
+ }
+ // Done with the last space.
+ object_iterator_ = NULL;
+ return false;
+}
+
+
+HeapObject* HeapIterator::next() {
+ if (has_next()) {
+ return object_iterator_->next_object();
+ } else {
+ return NULL;
+ }
+}
+
+
+void HeapIterator::reset() {
+ // Restart the iterator.
+ Shutdown();
+ Init();
+}
+
+
+//
+// HeapProfiler class implementation.
+//
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ info[type].increment_number(1);
+ info[type].increment_bytes(obj->Size());
+}
+#endif
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void HeapProfiler::WriteSample() {
+ LOG(HeapSampleBeginEvent("Heap", "allocated"));
+
+ HistogramInfo info[LAST_TYPE+1];
+#define DEF_TYPE_NAME(name) info[name].set_name(#name);
+ INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ CollectStats(iterator.next(), info);
+ }
+
+ // Lump all the string types together.
+ int string_number = 0;
+ int string_bytes = 0;
+#define INCREMENT_SIZE(type, size, name) \
+ string_number += info[type].number(); \
+ string_bytes += info[type].bytes();
+ STRING_TYPE_LIST(INCREMENT_SIZE)
+#undef INCREMENT_SIZE
+ if (string_bytes > 0) {
+ LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+ }
+
+ for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+ if (info[i].bytes() > 0) {
+ LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ info[i].bytes()));
+ }
+ }
+
+ LOG(HeapSampleEndEvent("Heap", "allocated"));
+}
+
+
+#endif
+
+
+
+#ifdef DEBUG
+
+static bool search_for_any_global;
+static Object* search_target;
+static bool found_target;
+static List<Object*> object_stack(20);
+
+
+// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+static const int kMarkTag = 2;
+
+static void MarkObjectRecursively(Object** p);
+class MarkObjectVisitor : public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ MarkObjectRecursively(p);
+ }
+ }
+};
+
+static MarkObjectVisitor mark_visitor;
+
+static void MarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (!map->IsHeapObject()) return; // visited before
+
+ if (found_target) return; // stop if target found
+ object_stack.Add(obj);
+ if ((search_for_any_global && obj->IsJSGlobalObject()) ||
+ (!search_for_any_global && (obj == search_target))) {
+ found_target = true;
+ return;
+ }
+
+ if (obj->IsCode()) {
+ Code::cast(obj)->ConvertICTargetsFromAddressToObject();
+ }
+
+ // not visited yet
+ Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
+
+ Address map_addr = map_p->address();
+
+ obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+
+ MarkObjectRecursively(&map);
+
+ obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
+ &mark_visitor);
+
+ if (!found_target) // don't pop if found the target
+ object_stack.RemoveLast();
+}
+
+
+static void UnmarkObjectRecursively(Object** p);
+class UnmarkObjectVisitor : public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ UnmarkObjectRecursively(p);
+ }
+ }
+};
+
+static UnmarkObjectVisitor unmark_visitor;
+
+static void UnmarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (map->IsHeapObject()) return; // unmarked already
+
+ Address map_addr = reinterpret_cast<Address>(map);
+
+ map_addr -= kMarkTag;
+
+ ASSERT_TAG_ALIGNED(map_addr);
+
+ HeapObject* map_p = HeapObject::FromAddress(map_addr);
+
+ obj->set_map(reinterpret_cast<Map*>(map_p));
+
+ UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
+
+ obj->IterateBody(Map::cast(map_p)->instance_type(),
+ obj->SizeFromMap(Map::cast(map_p)),
+ &unmark_visitor);
+
+ if (obj->IsCode()) {
+ Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
+ }
+}
+
+
+static void MarkRootObjectRecursively(Object** root) {
+ if (search_for_any_global) {
+ ASSERT(search_target == NULL);
+ } else {
+ ASSERT(search_target->IsHeapObject());
+ }
+ found_target = false;
+ object_stack.Clear();
+
+ MarkObjectRecursively(root);
+ UnmarkObjectRecursively(root);
+
+ if (found_target) {
+ PrintF("=====================================\n");
+ PrintF("==== Path to object ====\n");
+ PrintF("=====================================\n\n");
+
+ ASSERT(!object_stack.is_empty());
+ for (int i = 0; i < object_stack.length(); i++) {
+ if (i > 0) PrintF("\n |\n |\n V\n\n");
+ Object* obj = object_stack[i];
+ obj->Print();
+ }
+ PrintF("=====================================\n");
+ }
+}
+
+
+// Helper class for visiting HeapObjects recursively.
+class MarkRootVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ // Visit all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ MarkRootObjectRecursively(p);
+ }
+ }
+};
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to a specific heap object and prints it.
+void Heap::TracePathToObject() {
+ search_target = NULL;
+ search_for_any_global = false;
+
+ MarkRootVisitor root_visitor;
+ IterateRoots(&root_visitor);
+}
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to any global object and prints it. Useful for
+// determining the source for leaks of global objects.
+void Heap::TracePathToGlobal() {
+ search_target = NULL;
+ search_for_any_global = true;
+
+ MarkRootVisitor root_visitor;
+ IterateRoots(&root_visitor);
+}
+#endif
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_H_
+#define V8_HEAP_H_
+
+namespace v8 { namespace internal {
+
+// Defines all the roots in Heap.
+#define STRONG_ROOT_LIST(V) \
+ V(Map, meta_map) \
+ V(Map, heap_number_map) \
+ V(Map, short_string_map) \
+ V(Map, medium_string_map) \
+ V(Map, long_string_map) \
+ V(Map, short_ascii_string_map) \
+ V(Map, medium_ascii_string_map) \
+ V(Map, long_ascii_string_map) \
+ V(Map, short_symbol_map) \
+ V(Map, medium_symbol_map) \
+ V(Map, long_symbol_map) \
+ V(Map, short_ascii_symbol_map) \
+ V(Map, medium_ascii_symbol_map) \
+ V(Map, long_ascii_symbol_map) \
+ V(Map, short_cons_symbol_map) \
+ V(Map, medium_cons_symbol_map) \
+ V(Map, long_cons_symbol_map) \
+ V(Map, short_cons_ascii_symbol_map) \
+ V(Map, medium_cons_ascii_symbol_map) \
+ V(Map, long_cons_ascii_symbol_map) \
+ V(Map, short_sliced_symbol_map) \
+ V(Map, medium_sliced_symbol_map) \
+ V(Map, long_sliced_symbol_map) \
+ V(Map, short_sliced_ascii_symbol_map) \
+ V(Map, medium_sliced_ascii_symbol_map) \
+ V(Map, long_sliced_ascii_symbol_map) \
+ V(Map, short_external_symbol_map) \
+ V(Map, medium_external_symbol_map) \
+ V(Map, long_external_symbol_map) \
+ V(Map, short_external_ascii_symbol_map) \
+ V(Map, medium_external_ascii_symbol_map) \
+ V(Map, long_external_ascii_symbol_map) \
+ V(Map, short_cons_string_map) \
+ V(Map, medium_cons_string_map) \
+ V(Map, long_cons_string_map) \
+ V(Map, short_cons_ascii_string_map) \
+ V(Map, medium_cons_ascii_string_map) \
+ V(Map, long_cons_ascii_string_map) \
+ V(Map, short_sliced_string_map) \
+ V(Map, medium_sliced_string_map) \
+ V(Map, long_sliced_string_map) \
+ V(Map, short_sliced_ascii_string_map) \
+ V(Map, medium_sliced_ascii_string_map) \
+ V(Map, long_sliced_ascii_string_map) \
+ V(Map, short_external_string_map) \
+ V(Map, medium_external_string_map) \
+ V(Map, long_external_string_map) \
+ V(Map, short_external_ascii_string_map) \
+ V(Map, medium_external_ascii_string_map) \
+ V(Map, long_external_ascii_string_map) \
+ V(Map, undetectable_short_string_map) \
+ V(Map, undetectable_medium_string_map) \
+ V(Map, undetectable_long_string_map) \
+ V(Map, undetectable_short_ascii_string_map) \
+ V(Map, undetectable_medium_ascii_string_map) \
+ V(Map, undetectable_long_ascii_string_map) \
+ V(Map, byte_array_map) \
+ V(Map, fixed_array_map) \
+ V(Map, hash_table_map) \
+ V(Map, context_map) \
+ V(Map, global_context_map) \
+ V(Map, code_map) \
+ V(Map, oddball_map) \
+ V(Map, boilerplate_function_map) \
+ V(Map, shared_function_info_map) \
+ V(Map, proxy_map) \
+ V(Map, one_word_filler_map) \
+ V(Map, two_word_filler_map) \
+ V(Object, nan_value) \
+ V(Object, infinity_value) \
+ V(Object, negative_infinity_value) \
+ V(Object, number_max_value) \
+ V(Object, number_min_value) \
+ V(Object, undefined_value) \
+ V(Object, minus_zero_value) \
+ V(Object, null_value) \
+ V(Object, true_value) \
+ V(Object, false_value) \
+ V(String, empty_string) \
+ V(FixedArray, empty_fixed_array) \
+ V(Object, the_hole_value) \
+ V(Map, neander_map) \
+ V(JSObject, message_listeners) \
+ V(Proxy, prototype_accessors) \
+ V(JSObject, debug_event_listeners) \
+ V(Dictionary, code_stubs) \
+ V(Dictionary, non_monomorphic_cache) \
+ V(Code, js_entry_code) \
+ V(Code, js_construct_entry_code) \
+ V(Code, c_entry_code) \
+ V(Code, c_entry_debug_break_code) \
+ V(FixedArray, number_string_cache) \
+ V(FixedArray, single_character_string_cache) \
+ V(FixedArray, natives_source_cache)
+
+#define ROOT_LIST(V) \
+ STRONG_ROOT_LIST(V) \
+ V(Object, symbol_table)
+
+#define SYMBOL_LIST(V) \
+ V(Array_symbol, "Array") \
+ V(Object_symbol, "Object") \
+ V(Proto_symbol, "__proto__") \
+ V(StringImpl_symbol, "StringImpl") \
+ V(arguments_symbol, "arguments") \
+ V(arguments_shadow_symbol, ".arguments") \
+ V(call_symbol, "call") \
+ V(apply_symbol, "apply") \
+ V(caller_symbol, "caller") \
+ V(boolean_symbol, "boolean") \
+ V(callee_symbol, "callee") \
+ V(constructor_symbol, "constructor") \
+ V(code_symbol, ".code") \
+ V(result_symbol, ".result") \
+ V(catch_var_symbol, ".catch-var") \
+ V(finally_state_symbol, ".finally-state") \
+ V(empty_symbol, "") \
+ V(eval_symbol, "eval") \
+ V(function_symbol, "function") \
+ V(length_symbol, "length") \
+ V(name_symbol, "name") \
+ V(number_symbol, "number") \
+ V(object_symbol, "object") \
+ V(prototype_symbol, "prototype") \
+ V(string_symbol, "string") \
+ V(this_symbol, "this") \
+ V(to_string_symbol, "toString") \
+ V(char_at_symbol, "CharAt") \
+ V(undefined_symbol, "undefined") \
+ V(value_of_symbol, "valueOf") \
+ V(CreateObjectLiteralBoilerplate_symbol, "CreateObjectLiteralBoilerplate") \
+ V(CreateArrayLiteral_symbol, "CreateArrayLiteral") \
+ V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
+ V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
+ V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
+ V(illegal_access_symbol, "illegal access") \
+ V(out_of_memory_symbol, "out-of-memory") \
+ V(illegal_execution_state_symbol, "illegal execution state") \
+ V(get_symbol, "get") \
+ V(set_symbol, "set") \
+ V(function_class_symbol, "Function") \
+ V(illegal_argument_symbol, "illegal argument") \
+ V(MakeReferenceError_symbol, "MakeReferenceError") \
+ V(MakeSyntaxError_symbol, "MakeSyntaxError") \
+ V(MakeTypeError_symbol, "MakeTypeError") \
+ V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
+ V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
+ V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
+ V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
+ V(illegal_return_symbol, "illegal_return") \
+ V(illegal_break_symbol, "illegal_break") \
+ V(illegal_continue_symbol, "illegal_continue") \
+ V(unknown_label_symbol, "unknown_label") \
+ V(redeclaration_symbol, "redeclaration") \
+ V(failure_symbol, "<failure>") \
+ V(space_symbol, " ") \
+ V(exec_symbol, "exec") \
+ V(zero_symbol, "0")
+
+
+// The all static Heap captures the interface to the global object heap.
+// All JavaScript contexts by this process share the same object heap.
+
+class Heap : public AllStatic {
+ public:
+ // Configure heap size before setup. Return false if the heap has been
+ // setup already.
+ static bool ConfigureHeap(int semispace_size, int old_gen_size);
+
+ // Initializes the global object heap. If create_heap_objects is true,
+ // also creates the basic non-mutable objects.
+ // Returns whether it succeeded.
+ static bool Setup(bool create_heap_objects);
+
+ // Destroys all memory allocated by the heap.
+ static void TearDown();
+
+ // Returns whether Setup has been called.
+ static bool HasBeenSetup();
+
+ // Returns the maximum heap capacity.
+ static int MaxCapacity() {
+ return young_generation_size_ + old_generation_size_;
+ }
+ static int SemiSpaceSize() { return semispace_size_; }
+ static int InitialSemiSpaceSize() { return initial_semispace_size_; }
+ static int YoungGenerationSize() { return young_generation_size_; }
+ static int OldGenerationSize() { return old_generation_size_; }
+
+ // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+ // more spaces are needed until it reaches the limit.
+ static int Capacity();
+
+ // Returns the available bytes in space w/o growing.
+ // Heap doesn't guarantee that it can allocate an object that requires
+ // all available bytes. Check MaxHeapObjectSize() instead.
+ static int Available();
+
+ // Returns the maximum object size that heap supports. Objects larger than
+ // the maximum heap object size are allocated in a large object space.
+ static inline int MaxHeapObjectSize();
+
+ // Returns of size of all objects residing in the heap.
+ static int SizeOfObjects();
+
+ // Return the starting address and a mask for the new space. And-masking an
+ // address with the mask will result in the start address of the new space
+ // for all addresses in either semispace.
+ static Address NewSpaceStart() { return new_space_->start(); }
+ static uint32_t NewSpaceMask() { return new_space_->mask(); }
+ static Address NewSpaceTop() { return new_space_->top(); }
+
+ static NewSpace* new_space() { return new_space_; }
+ static OldSpace* old_space() { return old_space_; }
+ static OldSpace* code_space() { return code_space_; }
+ static MapSpace* map_space() { return map_space_; }
+ static LargeObjectSpace* lo_space() { return lo_space_; }
+
+ static Address* NewSpaceAllocationTopAddress() {
+ return new_space_->allocation_top_address();
+ }
+ static Address* NewSpaceAllocationLimitAddress() {
+ return new_space_->allocation_limit_address();
+ }
+
+ // Allocates and initializes a new JavaScript object based on a
+ // constructor.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateJSObject(JSFunction* constructor,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates the function prototype.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateFunctionPrototype(JSFunction* function);
+
+ // Reinitialize a JSGlobalObject based on a constructor. The JSObject
+ // must have the same size as objects allocated using the
+ // constructor. The JSObject is reinitialized and behaves as an
+ // object that has been freshly allocated using the constructor.
+ static Object* ReinitializeJSGlobalObject(JSFunction* constructor,
+ JSGlobalObject* global);
+
+ // Allocates and initializes a new JavaScript object based on a map.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateJSObjectFromMap(Map* map,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a heap object based on the map.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static Object* Allocate(Map* map, AllocationSpace space);
+
+ // Allocates a JS Map in the heap.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static Object* AllocateMap(InstanceType instance_type, int instance_size);
+
+ // Allocates a partial map for bootstrapping.
+ static Object* AllocatePartialMap(InstanceType instance_type,
+ int instance_size);
+
+ // Allocate a map for the specified function
+ static Object* AllocateInitialMap(JSFunction* fun);
+
+ // Allocates and fully initializes a String. There are two String
+ // encodings: ASCII and two byte. One should choose between the three string
+ // allocation functions based on the encoding of the string buffer used to
+ // initialized the string.
+ // - ...FromAscii initializes the string from a buffer that is ASCII
+ // encoded (it does not check that the buffer is ASCII encoded) and the
+ // result will be ASCII encoded.
+ // - ...FromUTF8 initializes the string from a buffer that is UTF-8
+ // encoded. If the characters are all single-byte characters, the
+ // result will be ASCII encoded, otherwise it will converted to two
+ // byte.
+ // - ...FromTwoByte initializes the string from a buffer that is two-byte
+ // encoded. If the characters are all single-byte characters, the
+ // result will be converted to ASCII, otherwise it will be left as
+ // two-byte.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateStringFromAscii(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED);
+ static Object* AllocateStringFromUtf8(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED);
+ static Object* AllocateStringFromTwoByte(
+ Vector<const uc16> str,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a symbol in old space based on the character stream.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static Object* AllocateSymbol(unibrow::CharacterStream* buffer,
+ int chars,
+ int hash);
+
+ // Allocates and partially initializes a String. There are two String
+ // encodings: ASCII and two byte. These functions allocate a string of the
+ // given length and set its map and length fields. The characters of the
+ // string are uninitialized.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateRawAsciiString(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+ static Object* AllocateRawTwoByteString(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Computes a single character string where the character has code.
+ // A cache is used for ascii codes.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed. Please note this does not perform a garbage collection.
+ static Object* LookupSingleCharacterStringFromCode(uint16_t code);
+
+ // Allocate a byte array of the specified length
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please not this does not perform a garbage collection.
+ static Object* AllocateByteArray(int length);
+
+ // Allocates a fixed array initialized with undefined values
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateFixedArray(int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+
+ // Allocates a fixed array initialized with the hole values.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateFixedArrayWithHoles(int length);
+
+ // AllocateHashTable is identical to AllocateFixedArray except
+ // that the resulting object has hash_table_map as map.
+ static Object* AllocateHashTable(int length);
+
+ // Allocate a global (but otherwise uninitialized) context.
+ static Object* AllocateGlobalContext();
+
+ // Allocate a function context.
+ static Object* AllocateFunctionContext(int length, JSFunction* closure);
+
+ // Allocate a 'with' context.
+ static Object* AllocateWithContext(Context* previous, JSObject* extension);
+
+ // Allocates a new utility object in the old generation.
+ static Object* AllocateStruct(InstanceType type);
+
+
+ // Initializes a function with a shared part and prototype.
+ // Returns the function.
+ // Note: this code was factored out of AllocateFunction such that
+ // other parts of the VM could use it. Specifically, a function that creates
+ // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
+ // Please note this does not perform a garbage collection.
+ static Object* InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype);
+
+ // Allocates a function initialized with a shared part.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateFunction(Map* function_map,
+ SharedFunctionInfo* shared,
+ Object* prototype);
+
+ // Indicies for direct access into argument objects.
+ static const int arguments_callee_index = 0;
+ static const int arguments_length_index = 1;
+
+ // Allocates an arguments object - optionally with an elements array.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateArgumentsObject(Object* callee, int length);
+
+ // Converts a double into either a Smi or a HeapNumber object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* NewNumberFromDouble(double value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Same as NewNumberFromDouble, but may return a preallocated/immutable
+ // number object (e.g., minus_zero_value_, nan_value_)
+ static Object* NumberFromDouble(double value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocated a HeapNumber from value.
+ static Object* AllocateHeapNumber(double value, PretenureFlag pretenure);
+ static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED
+
+ // Converts an int into either a Smi or a HeapNumber object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static inline Object* NumberFromInt32(int32_t value);
+
+ // Converts an int into either a Smi or a HeapNumber object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static inline Object* NumberFromUint32(uint32_t value);
+
+ // Allocates a new proxy object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateProxy(Address proxy,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a new SharedFunctionInfo object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateSharedFunctionInfo(Object* name);
+
+ // Allocates a new cons string object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateConsString(String* first, String* second);
+
+ // Allocates a new sliced string object which is a slice of an underlying
+ // string buffer stretching from the index start (inclusive) to the index
+ // end (exclusive).
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateSlicedString(String* buffer, int start, int end);
+
+ // Allocates a new sub string object which is a substring of an underlying
+ // string buffer stretching from the index start (inclusive) to the index
+ // end (exclusive).
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateSubString(String* buffer, int start, int end);
+
+ // Allocate a new external string object, which is backed by a string
+ // resource that resides outside the V8 heap.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateExternalStringFromAscii(
+ ExternalAsciiString::Resource* resource);
+ static Object* AllocateExternalStringFromTwoByte(
+ ExternalTwoByteString::Resource* resource);
+
+ // Allocates an uninitialized object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static inline Object* AllocateRaw(int size_in_bytes, AllocationSpace space);
+
+ // Makes a new native code object
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static Object* CreateCode(const CodeDesc& desc,
+ ScopeInfo<>* sinfo,
+ Code::Flags flags);
+
+ static Object* CopyCode(Code* code);
+ // Finds the symbol for string in the symbol table.
+ // If not found, a new symbol is added to the table and returned.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static Object* LookupSymbol(Vector<const char> str);
+ static Object* LookupAsciiSymbol(const char* str) {
+ return LookupSymbol(CStrVector(str));
+ }
+ static Object* LookupSymbol(String* str);
+
+ // Compute the matching symbol map for a string if possible.
+ // NULL is returned if string is in new space or not flattened.
+ static Map* SymbolMapForString(String* str);
+
+ // Converts the given boolean condition to JavaScript boolean value.
+ static Object* ToBoolean(bool condition) {
+ return condition ? true_value() : false_value();
+ }
+
+ // Code that should be run before and after each GC. Includes some
+ // reporting/verification activities when compiled with DEBUG set.
+ static void GarbageCollectionPrologue();
+ static void GarbageCollectionEpilogue();
+
+ // Performs garbage collection operation.
+ // Returns whether required_space bytes are available after the collection.
+ static bool CollectGarbage(int required_space, AllocationSpace space);
+
+ // Utility to invoke the scavenger. This is needed in test code to
+ // ensure correct callback for weak global handles.
+ static void PerformScavenge() {
+ PerformGarbageCollection(NEW_SPACE, SCAVENGER);
+ }
+
+ static void SetGlobalGCPrologueCallback(GCCallback callback) {
+ global_gc_prologue_callback_ = callback;
+ }
+ static void SetGlobalGCEpilogueCallback(GCCallback callback) {
+ global_gc_epilogue_callback_ = callback;
+ }
+
+ // Heap roots
+#define ROOT_ACCESSOR(type, name) static type* name() { return name##_; }
+ ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+// Utility type maps
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ static Map* name##_map() { return name##_map_; }
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, str) static String* name() { return name##_; }
+ SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+ // Iterates over all roots in the heap.
+ static void IterateRoots(ObjectVisitor* v);
+ // Iterates over all strong roots in the heap.
+ static void IterateStrongRoots(ObjectVisitor* v);
+
+ // Iterates remembered set of an old space.
+ static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
+
+ // Iterates a range of remembered set addresses starting with rset_start
+ // corresponding to the range of allocated pointers
+ // [object_start, object_end).
+ static void IterateRSetRange(Address object_start,
+ Address object_end,
+ Address rset_start,
+ ObjectSlotCallback copy_object_func);
+
+ // Returns whether the object resides in new space.
+ static inline bool InNewSpace(Object* object);
+ static inline bool InFromSpace(Object* object);
+ static inline bool InToSpace(Object* object);
+
+ // Checks whether an address/object in the heap (including auxiliary
+ // area and unused area).
+ static bool Contains(Address addr);
+ static bool Contains(HeapObject* value);
+
+ // Checks whether an address/object in a space.
+ // Currently used by tests and heap verification only.
+ static bool InSpace(Address addr, AllocationSpace space);
+ static bool InSpace(HeapObject* value, AllocationSpace space);
+
+ // Sets the stub_cache_ (only used when expanding the dictionary).
+ static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
+
+ // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
+ static void set_non_monomorphic_cache(Dictionary* value) {
+ non_monomorphic_cache_ = value;
+ }
+
+#ifdef DEBUG
+ static void Print();
+ static void PrintHandles();
+
+ // Verify the heap is in its normal state before or after a GC.
+ static void Verify();
+
+ // Report heap statistics.
+ static void ReportHeapStatistics(const char* title);
+ static void ReportCodeStatistics(const char* title);
+
+ // Fill in bogus values in from space
+ static void ZapFromSpace();
+#endif
+
+ // Makes a new symbol object
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this function does not perform a garbage collection.
+ static Object* CreateSymbol(const char* str, int length, int hash);
+ static Object* CreateSymbol(String* str);
+
+ // Write barrier support for address[offset] = o.
+ inline static void RecordWrite(Address address, int offset);
+
+ // Given an address in the heap, returns a pointer to the object which
+ // body contains the address. Returns Failure::Exception() if the
+ // operation fails.
+ static Object* FindCodeObject(Address a);
+
+ // Invoke Shrink on shrinkable spaces.
+ static void Shrink();
+
+ enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+ static inline HeapState gc_state() { return gc_state_; }
+
+#ifdef DEBUG
+ static bool IsAllocationAllowed() { return allocation_allowed_; }
+ static inline bool allow_allocation(bool enable);
+
+ static bool disallow_allocation_failure() {
+ return disallow_allocation_failure_;
+ }
+
+ static void TracePathToObject();
+ static void TracePathToGlobal();
+#endif
+
+ // Helper for Serialization/Deserialization that restricts memory allocation
+ // to the predictable LINEAR_ONLY policy
+ static void SetLinearAllocationOnly(bool linear_only) {
+ old_space_->SetLinearAllocationOnly(linear_only);
+ code_space_->SetLinearAllocationOnly(linear_only);
+ map_space_->SetLinearAllocationOnly(linear_only);
+ }
+
+ // Callback function pased to Heap::Iterate etc. Copies an object if
+ // necessary, the object might be promoted to an old space. The caller must
+ // ensure the precondition that the object is (a) a heap object and (b) in
+ // the heap's from space.
+ static void CopyObject(HeapObject** p);
+
+ // Clear a range of remembered set addresses corresponding to the object
+ // area address 'start' with size 'size_in_bytes', eg, when adding blocks
+ // to the free list.
+ static void ClearRSetRange(Address start, int size_in_bytes);
+
+ // Rebuild remembered set in old and map spaces.
+ static void RebuildRSets();
+
+ //
+ // Support for the API.
+ //
+
+ static bool CreateApiObjects();
+
+ // Attempt to find the number in a small cache. If we finds it, return
+ // the string representation of the number. Otherwise return undefined.
+ static Object* GetNumberStringCache(Object* number);
+
+ // Update the cache with a new number-string pair.
+ static void SetNumberStringCache(Object* number, String* str);
+
+ // Entries in the cache. Must be a power of 2.
+ static const int kNumberStringCacheSize = 64;
+
+ private:
+ static int semispace_size_;
+ static int initial_semispace_size_;
+ static int young_generation_size_;
+ static int old_generation_size_;
+
+ static int new_space_growth_limit_;
+ static int scavenge_count_;
+
+ static const int kMaxMapSpaceSize = 8*MB;
+
+ static NewSpace* new_space_;
+ static OldSpace* old_space_;
+ static OldSpace* code_space_;
+ static MapSpace* map_space_;
+ static LargeObjectSpace* lo_space_;
+ static HeapState gc_state_;
+
+ // Returns the size of object residing in non new spaces.
+ static int PromotedSpaceSize();
+
+#ifdef DEBUG
+ static bool allocation_allowed_;
+ static int mc_count_; // how many mark-compact collections happened
+ static int gc_count_; // how many gc happened
+
+ // If the --gc-interval flag is set to a positive value, this
+ // variable holds the value indicating the number of allocations
+ // remain until the next failure and garbage collection.
+ static int allocation_timeout_;
+
+ // Do we expect to be able to handle allocation failure at this
+ // time?
+ static bool disallow_allocation_failure_;
+#endif // DEBUG
+
+ // Promotion limit that trigger a global GC
+ static int promoted_space_limit_;
+
+ // Indicates that an allocation has failed in the old generation since the
+ // last GC.
+ static int old_gen_exhausted_;
+
+ // Declare all the roots
+#define ROOT_DECLARATION(type, name) static type* name##_;
+ ROOT_LIST(ROOT_DECLARATION)
+#undef ROOT_DECLARATION
+
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) static Map* name##_map_;
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+
+#define SYMBOL_DECLARATION(name, str) static String* name##_;
+ SYMBOL_LIST(SYMBOL_DECLARATION)
+#undef SYMBOL_DECLARATION
+
+ // GC callback function, called before and after mark-compact GC.
+ // Allocations in the callback function are disallowed.
+ static GCCallback global_gc_prologue_callback_;
+ static GCCallback global_gc_epilogue_callback_;
+
+ // Checks whether a global GC is necessary
+ static GarbageCollector SelectGarbageCollector(AllocationSpace space);
+
+ // Performs garbage collection
+ static void PerformGarbageCollection(AllocationSpace space,
+ GarbageCollector collector);
+
+ // Returns either a Smi or a Number object from 'value'. If 'new_object'
+ // is false, it may return a preallocated immutable object.
+ static Object* SmiOrNumberFromDouble(double value,
+ bool new_object,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocate an uninitialized object in map space. The behavior is
+ // identical to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that
+ // (a) it doesn't have to test the allocation space argument and (b) can
+ // reduce code size (since both AllocateRaw and AllocateRawMap are
+ // inlined).
+ static inline Object* AllocateRawMap(int size_in_bytes);
+
+ // Allocate storage for JSObject properties.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ static inline Object* AllocatePropertyStorageForMap(Map* map);
+
+ // Initializes a JSObject based on its map.
+ static void InitializeJSObjectFromMap(JSObject* obj,
+ FixedArray* properties,
+ Map* map);
+
+ static bool CreateInitialMaps();
+ static bool CreateInitialObjects();
+ static void CreateFixedStubs();
+ static Object* CreateOddball(Map* map,
+ const char* to_string,
+ Object* to_number);
+
+ // Allocate empty fixed array.
+ static Object* AllocateEmptyFixedArray();
+
+ // Performs a minor collection in new generation.
+ static void Scavenge();
+
+ // Performs a major collection in the whole heap.
+ static void MarkCompact();
+
+ // Code to be run before and after mark-compact.
+ static void MarkCompactPrologue();
+ static void MarkCompactEpilogue();
+
+ // Helper function used by CopyObject to copy a source object to an
+ // allocated target object and update the forwarding pointer in the source
+ // object. Returns the target object.
+ static HeapObject* MigrateObject(HeapObject** source_p,
+ HeapObject* target,
+ int size);
+
+ // Helper function that governs the promotion policy from new space to
+ // old. If the object's old address lies below the new space's age
+ // mark or if we've already filled the bottom 1/16th of the to space,
+ // we try to promote this object.
+ static inline bool ShouldBePromoted(Address old_address, int object_size);
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ // Record the copy of an object in the NewSpace's statistics.
+ static void RecordCopiedObject(HeapObject* obj);
+
+ // Record statistics before and after garbage collection.
+ static void ReportStatisticsBeforeGC();
+ static void ReportStatisticsAfterGC();
+#endif
+
+ // Update an old object's remembered set
+ static int UpdateRSet(HeapObject* obj);
+
+ // Rebuild remembered set in an old space.
+ static void RebuildRSets(PagedSpace* space);
+
+ // Rebuild remembered set in the large object space.
+ static void RebuildRSets(LargeObjectSpace* space);
+
+ static const int kInitialSymbolTableSize = 2048;
+
+ friend class Factory;
+ friend class DisallowAllocationFailure;
+};
+
+
+#ifdef DEBUG
+// Visitor class to verify interior pointers that do not have remembered set
+// bits. All heap object pointers have to point into the heap to a location
+// that has a map pointer at its first word. Caveat: Heap::Contains is an
+// approximation because it can return true for objects in a heap space but
+// above the allocation pointer.
+class VerifyPointersVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ ASSERT(Heap::Contains(object));
+ ASSERT(object->map()->IsMap());
+ }
+ }
+ }
+};
+
+
+// Visitor class to verify interior pointers that have remembered set bits.
+// As VerifyPointersVisitor but also checks that remembered set bits are
+// always set for pointers into new space.
+class VerifyPointersAndRSetVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ ASSERT(Heap::Contains(object));
+ ASSERT(object->map()->IsMap());
+ if (Heap::InNewSpace(object)) {
+ ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
+ }
+ }
+ }
+ }
+};
+#endif
+
+
+// A HeapIterator provides iteration over the whole heap It aggregates a the
+// specific iterators for the different spaces as these can only iterate over
+// one space only.
+
+class HeapIterator BASE_EMBEDDED {
+ public:
+ explicit HeapIterator();
+ virtual ~HeapIterator();
+
+ bool has_next();
+ HeapObject* next();
+ void reset();
+
+ private:
+ // Perform the initialization.
+ void Init();
+
+ // Perform all necessary shutdown (destruction) work.
+ void Shutdown();
+
+ // Space iterator for iterating all the spaces.
+ SpaceIterator* space_iterator_;
+ // Object iterator for the space currently being iterated.
+ ObjectIterator* object_iterator_;
+};
+
+
+// ----------------------------------------------------------------------------
+// Marking stack for tracing live objects.
+
+class MarkingStack {
+ public:
+ void Initialize(Address low, Address high) {
+ top_ = low_ = reinterpret_cast<HeapObject**>(low);
+ high_ = reinterpret_cast<HeapObject**>(high);
+ overflowed_ = false;
+ }
+
+ bool is_full() { return top_ >= high_; }
+
+ bool is_empty() { return top_ <= low_; }
+
+ bool overflowed() { return overflowed_; }
+
+ void clear_overflowed() { overflowed_ = false; }
+
+ void Push(HeapObject* p) {
+ ASSERT(!is_full());
+ *(top_++) = p;
+ if (is_full()) overflowed_ = true;
+ }
+
+ HeapObject* Pop() {
+ ASSERT(!is_empty());
+ return *(--top_);
+ }
+
+ private:
+ HeapObject** low_;
+ HeapObject** top_;
+ HeapObject** high_;
+ bool overflowed_;
+};
+
+
+// ----------------------------------------------------------------------------
+// Functions and constants used for marking live objects.
+//
+
+// Many operations (eg, Object::Size()) are based on an object's map. When
+// objects are marked as live or overflowed, their map pointer is changed.
+// Use clear_mark_bit and/or clear_overflow_bit to recover the original map
+// word.
+static inline intptr_t clear_mark_bit(intptr_t map_word) {
+ return map_word | kMarkingMask;
+}
+
+
+static inline intptr_t clear_overflow_bit(intptr_t map_word) {
+ return map_word & ~kOverflowMask;
+}
+
+
+// True if the object is marked live.
+static inline bool is_marked(HeapObject* obj) {
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ return (map_word & kMarkingMask) == 0;
+}
+
+
+// Mutate an object's map pointer to indicate that the object is live.
+static inline void set_mark(HeapObject* obj) {
+ ASSERT(!is_marked(obj));
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ obj->set_map(reinterpret_cast<Map*>(map_word & ~kMarkingMask));
+}
+
+
+// Mutate an object's map pointer to remove the indication that the object
+// is live, ie, (partially) restore the map pointer.
+static inline void clear_mark(HeapObject* obj) {
+ ASSERT(is_marked(obj));
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ obj->set_map(reinterpret_cast<Map*>(clear_mark_bit(map_word)));
+}
+
+
+// True if the object is marked overflowed.
+static inline bool is_overflowed(HeapObject* obj) {
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ return (map_word & kOverflowMask) != 0;
+}
+
+
+// Mutate an object's map pointer to indicate that the object is overflowed.
+// Overflowed objects have been reached during marking of the heap but not
+// pushed on the marking stack (and thus their children have not necessarily
+// been marked).
+static inline void set_overflow(HeapObject* obj) {
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ obj->set_map(reinterpret_cast<Map*>(map_word | kOverflowMask));
+}
+
+
+// Mutate an object's map pointer to remove the indication that the object
+// is overflowed, ie, (partially) restore the map pointer.
+static inline void clear_overflow(HeapObject* obj) {
+ ASSERT(is_overflowed(obj));
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ obj->set_map(reinterpret_cast<Map*>(clear_overflow_bit(map_word)));
+}
+
+
+// A helper class to document/test C++ scopes where we do not
+// expect a GC. Usage:
+//
+// /* Allocation not allowed: we cannot handle a GC in this scope. */
+// { AssertNoAllocation nogc;
+// ...
+// }
+
+#ifdef DEBUG
+
+class DisallowAllocationFailure {
+ public:
+ DisallowAllocationFailure() {
+ old_state_ = Heap::disallow_allocation_failure_;
+ Heap::disallow_allocation_failure_ = true;
+ }
+ ~DisallowAllocationFailure() {
+ Heap::disallow_allocation_failure_ = old_state_;
+ }
+ private:
+ bool old_state_;
+};
+
+class AssertNoAllocation {
+ public:
+ AssertNoAllocation() {
+ old_state_ = Heap::allow_allocation(false);
+ }
+
+ ~AssertNoAllocation() {
+ Heap::allow_allocation(old_state_);
+ }
+
+ private:
+ bool old_state_;
+};
+
+#else // ndef DEBUG
+
+class AssertNoAllocation {
+ public:
+ AssertNoAllocation() { }
+ ~AssertNoAllocation() { }
+};
+
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+// The HeapProfiler writes data to the log files, which can be postprocessed
+// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
+class HeapProfiler {
+ public:
+ // Write a single heap sample to the log file.
+ static void WriteSample();
+
+ private:
+ // Update the array info with stats from obj.
+ static void CollectStats(HeapObject* obj, HistogramInfo* info);
+};
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ masm->
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* done_label,
+ Label* miss_label,
+ Register t0,
+ Register t1) {
+ // Register use:
+ //
+ // t0 - used to hold the property dictionary.
+ //
+ // t1 - initially the receiver
+ // - used for the index into the property dictionary
+ // - holds the result on exit.
+ //
+ // r3 - used as temporary and to hold the capacity of the property
+ // dictionary.
+ //
+ // r2 - holds the name of the property and is unchanges.
+
+ // Check for the absence of an interceptor.
+ // Load the map into t0.
+ __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
+ // Test the has_named_interceptor bit in the map.
+ __ ldr(t0, FieldMemOperand(t1, Map::kInstanceAttributesOffset));
+ __ tst(t0, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+ // Jump to miss if the interceptor bit is set.
+ __ b(ne, miss_label);
+
+
+ // Check that the properties array is a dictionary.
+ __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Factory::hash_table_map()));
+ __ b(ne, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
+ __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
+ __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(r3, r3, Operand(1));
+
+ const int kElementsStartOffset =
+ Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
+ if (i > 0) __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
+ __ and_(t1, t1, Operand(r3));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(Dictionary::kElementSize == 3);
+ __ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
+
+ // Check if the key is identical to the name.
+ __ add(t1, t0, Operand(t1, LSL, 2));
+ __ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
+ __ cmp(r2, Operand(ip));
+ if (i != kProbes - 1) {
+ __ b(eq, done_label);
+ } else {
+ __ b(ne, miss_label);
+ }
+ }
+
+ // Check that the value is a normal property.
+ __ bind(done_label); // t1 == t0 + 4*index
+ __ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
+ __ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ b(ne, miss_label);
+
+ // Get the value at the masked, scaled index and return.
+ __ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the object is a JS array.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(JS_ARRAY_TYPE));
+ __ b(ne, &miss);
+
+ // Load length directly from the JS array.
+ __ ldr(r0, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+}
+
+
+void LoadIC::GenerateShortStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the object is a short string.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ and_(r1, r1, Operand(kIsNotStringMask | kStringSizeMask));
+ // The cast is to resolve the overload for the argument of 0x0.
+ __ cmp(r1, Operand(static_cast<int32_t>(kStringTag | kShortStringTag)));
+ __ b(ne, &miss);
+
+ // Load length directly from the string.
+ __ ldr(r0, FieldMemOperand(r0, String::kLengthOffset));
+ __ mov(r0, Operand(r0, LSR, String::kShortLengthShift));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+}
+
+
+void LoadIC::GenerateMediumStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the object is a medium string.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ and_(r1, r1, Operand(kIsNotStringMask | kStringSizeMask));
+ __ cmp(r1, Operand(kStringTag | kMediumStringTag));
+ __ b(ne, &miss);
+
+ // Load length directly from the string.
+ __ ldr(r0, FieldMemOperand(r0, String::kLengthOffset));
+ __ mov(r0, Operand(r0, LSR, String::kMediumLengthShift));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+}
+
+
+void LoadIC::GenerateLongStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the object is a long string.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ and_(r1, r1, Operand(kIsNotStringMask | kStringSizeMask));
+ __ cmp(r1, Operand(kStringTag | kLongStringTag));
+ __ b(ne, &miss);
+
+ // Load length directly from the string.
+ __ ldr(r0, FieldMemOperand(r0, String::kLengthOffset));
+ __ mov(r0, Operand(r0, LSR, String::kLongLengthShift));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ // NOTE: Right now, this code always misses on ARM which is
+ // sub-optimal. We should port the fast case code from IA-32.
+
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Get the name of the function from the stack; 1 ~ receiver.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r2, MemOperand(ip, 1 * kPointerSize));
+
+ // Probe the stub cache.
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &number);
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &non_number);
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, r1);
+ __ b(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
+ __ b(hs, &non_string);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, r1);
+ __ b(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ cmp(r1, Operand(Factory::true_value()));
+ __ b(eq, &boolean);
+ __ cmp(r1, Operand(Factory::false_value()));
+ __ b(ne, &miss);
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+
+ Label miss, probe, done, global;
+
+ // Get the name of the function from the stack; 1 ~ receiver.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r2, MemOperand(ip, 1 * kPointerSize));
+
+ // Push the number of arguments on the stack to free r0.
+ __ push(r0);
+
+ // Check that the receiver isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the receiver is a valid JS object.
+ __ ldr(r0, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object (unlikely).
+ __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, &global);
+
+ // Search the dictionary placing the result in r1.
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &done, &miss, r0, r1);
+
+ // Check that the value isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the value is a JSFunction.
+ __ ldr(r0, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ __ cmp(r0, Operand(JS_FUNCTION_TYPE));
+ __ b(ne, &miss);
+
+ // Restore the number of arguments.
+ __ pop(r0);
+
+ // Patch the function on the stack; 1 ~ receiver.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r1, MemOperand(ip, 1 * kPointerSize));
+
+ // Get the context and call code from the function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+
+ // Jump to the code.
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r1);
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobal(r1, r0, &miss);
+ __ b(&probe);
+
+ // Cache miss: Restore number of arguments and receiver from stack
+ // and jump to runtime.
+ __ bind(&miss);
+ __ pop(r0);
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r1, MemOperand(ip));
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- lr: return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into r1.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r1, MemOperand(ip, 0 * kPointerSize));
+
+ __ EnterJSFrame(0, 0);
+
+ // Push the receiver and the name of the function.
+ __ ldr(r0, MemOperand(pp, 0));
+ __ mov(r2, Operand(0)); // code slot == 0
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+
+ // Call the entry.
+ __ mov(r0, Operand(2 - 1)); // do not count receiver
+ __ mov(r1, Operand(f));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ // Move result to r1 and restore number of arguments.
+ __ mov(r1, Operand(r0));
+ __ ldr(r0, MemOperand(v8::internal::fp, // fp is shadowed by IC::fp
+ JavaScriptFrameConstants::kArgsLengthOffset));
+
+ __ ExitJSFrame(DO_NOT_RETURN, 0);
+
+ // Patch the function on the stack; 1 ~ receiver.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r1, MemOperand(ip, 1 * kPointerSize));
+
+ // Get the context and call code from the function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+
+ // Jump to the code.
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r1);
+}
+
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, r0, r2, r3);
+
+ // Cache miss: Jump to runtime.
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss, probe, done, global;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the receiver is a valid JS object.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &miss);
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object (unlikely).
+ __ cmp(r1, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, &global);
+
+
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &done, &miss, r1, r0);
+ __ Ret();
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobal(r0, r1, &miss);
+ __ b(&probe);
+
+ // Cache miss: Restore receiver from stack and jump to runtime.
+ __ bind(&miss);
+ __ ldr(r0, MemOperand(sp));
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ __ push(r0);
+ __ push(r2);
+
+ // Set the number of arguments and jump to the entry.
+ __ mov(r0, Operand(2 - 1)); // not counting receiver.
+ __ JumpToBuiltin(f);
+}
+
+
+// TODO(1224671): ICs for keyed load/store is not implemented on ARM.
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+}
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+}
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+}
+
+void KeyedStoreIC::Generate(MacroAssembler* masm,
+ const ExternalReference& f) {
+}
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+}
+
+
+// Defined in ic.cc.
+Object* StoreIC_Miss(Arguments args);
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ __ ldr(r1, MemOperand(sp));
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+
+ // Cache miss: Jump to runtime.
+ Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ __ ldr(r3, MemOperand(sp)); // copy receiver
+ __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+ // Set the number of arguments and jump to the entry.
+ __ mov(r0, Operand(3 - 1)); // not counting receiver.
+ __ JumpToBuiltin(f);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+
+DECLARE_bool(debug_code);
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ masm->
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register r0, Register r1, Register r2,
+ Register name) {
+ // Register use:
+ //
+ // r0 - used to hold the property dictionary.
+ //
+ // r1 - initially the receiver
+ // - used for the index into the property dictionary
+ // - holds the result on exit.
+ //
+ // r2 - used to hold the capacity of the property dictionary.
+ //
+ // name - holds the name of the property and is unchanges.
+
+ Label done;
+
+ // Check for the absence of an interceptor.
+ // Load the map into r0.
+ __ mov(r0, FieldOperand(r1, JSObject::kMapOffset));
+ // Test the has_named_interceptor bit in the map.
+ __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
+ Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+ // Jump to miss if the interceptor bit is set.
+ __ j(not_zero, miss_label, not_taken);
+
+ // Check that the properties array is a dictionary.
+ __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
+ __ mov(r2, FieldOperand(r0, kCapacityOffset));
+ __ shr(r2, kSmiTagSize); // convert smi to int
+ __ dec(r2);
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
+ for (int i = 0; i < kProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(r1, FieldOperand(name, String::kLengthOffset));
+ if (i > 0) __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
+ __ and_(r1, Operand(r2));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(Dictionary::kElementSize == 3);
+ __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+
+ // Check if the key is identical to the name.
+ __ cmp(name,
+ Operand(r0, r1, times_4, kElementsStartOffset - kHeapObjectTag));
+ if (i != kProbes - 1) {
+ __ j(equal, &done, taken);
+ } else {
+ __ j(not_equal, miss_label, not_taken);
+ }
+ }
+
+ // Check that the value is a normal property.
+ __ bind(&done);
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ test(Operand(r0, r1, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ j(not_zero, miss_label, not_taken);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(r1, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateShortStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ StubCompiler::GenerateLoadShortStringLength(masm, eax, edx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateMediumStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ StubCompiler::GenerateLoadMediumStringLength(masm, eax, edx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateLongStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ StubCompiler::GenerateLoadLongStringLength(masm, eax, edx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label slow, fast, check_string;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+
+ // Check that the object isn't a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ // Check that the object is some kind of JS object.
+ __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(edx, JS_OBJECT_TYPE);
+ __ j(less, &slow, not_taken);
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_string, not_taken);
+ __ sar(eax, kSmiTagSize);
+ // Check if the object is a value-wrapper object. In that case we
+ // enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ __ cmp(edx, JS_VALUE_TYPE);
+ __ j(equal, &slow, not_taken);
+ // Get the elements array of the object.
+ __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(equal, &slow, not_taken);
+ // Check that the key (index) is within bounds.
+ __ cmp(eax, FieldOperand(ecx, Array::kLengthOffset));
+ __ j(below, &fast, taken);
+ // Slow case: Load name and receiver from stack and jump to runtime.
+ __ bind(&slow);
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // 1 ~ return address.
+ __ mov(ecx, Operand(esp, 2 * kPointerSize)); // 1 ~ return address, name.
+ __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kGetProperty));
+ // Check if the key is a symbol that is not an array index.
+ __ bind(&check_string);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ test(ebx, Immediate(kIsSymbolMask));
+ __ j(zero, &slow, not_taken);
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ __ test(ebx, Immediate(String::kIsArrayIndexMask));
+ __ j(not_zero, &slow, not_taken);
+ // Probe the dictionary leaving result in ecx.
+ GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
+ __ mov(eax, Operand(ecx));
+ __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ ret(0);
+ // Fast case: Do the load.
+ __ bind(&fast);
+ __ mov(eax, Operand(ecx, eax, times_4, Array::kHeaderSize - kHeapObjectTag));
+ __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow, not_taken);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label slow, fast, array, extra;
+ // Get the key and the object from the stack.
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key
+ // Check that the key is a smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ // Get the type of the object from its map.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ // Check if the object is a JS array or not.
+ __ cmp(ecx, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JS object.
+ __ cmp(ecx, JS_OBJECT_TYPE);
+ __ j(less, &slow, not_taken);
+
+
+ // Object case: Check key against length in the elements array.
+ // eax: value
+ // edx: JSObject
+ // ebx: index (as a smi)
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(equal, &slow, not_taken);
+ // Untag the key (for checking against untagged length in the fixed array).
+ __ mov(edx, Operand(ebx));
+ __ sar(edx, kSmiTagSize); // untag the index and use it for the comparison
+ __ cmp(edx, FieldOperand(ecx, Array::kLengthOffset));
+ // eax: value
+ // ecx: FixedArray
+ // ebx: index (as a smi)
+ __ j(below, &fast, taken);
+
+
+ // Slow case: Push extra copies of the arguments (3).
+ __ bind(&slow);
+ __ pop(ecx);
+ __ push(Operand(esp, 1 * kPointerSize));
+ __ push(Operand(esp, 1 * kPointerSize));
+ __ push(eax);
+ __ push(ecx);
+ // Do tail-call to runtime routine.
+ __ Set(eax, Immediate(2)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(Runtime::kSetProperty));
+
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // eax: value
+ // edx: JSArray
+ // ecx: FixedArray
+ // ebx: index (as a smi)
+ // flags: compare (ebx, edx.length())
+ __ j(not_equal, &slow, not_taken); // do not leave holes in the array
+ __ sar(ebx, kSmiTagSize); // untag
+ __ cmp(ebx, FieldOperand(ecx, Array::kLengthOffset));
+ __ j(above_equal, &slow, not_taken);
+ // Restore tag and increment.
+ __ lea(ebx, Operand(ebx, times_2, 1 << kSmiTagSize));
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), ebx);
+ __ sub(Operand(ebx), Immediate(1 << kSmiTagSize)); // decrement ebx again
+ __ jmp(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode; if it is the
+ // length is always a smi.
+ __ bind(&array);
+ // eax: value
+ // edx: JSArray
+ // ebx: index (as a smi)
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(equal, &slow, not_taken);
+
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
+ __ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
+ __ j(above_equal, &extra, not_taken);
+
+
+ // Fast case: Do the store.
+ __ bind(&fast);
+ // eax: value
+ // ecx: FixedArray
+ // ebx: index (as a smi)
+ __ mov(Operand(ecx, ebx, times_2, Array::kHeaderSize - kHeapObjectTag), eax);
+ // Update write barrier for the elements array address.
+ __ mov(edx, Operand(eax));
+ __ RecordWrite(ecx, 0, edx, ebx);
+ __ ret(0);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack; 2 ~ return address, receiver
+ __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
+
+ // Probe the stub cache.
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &number, not_taken);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ebx, HEAP_NUMBER_TYPE);
+ __ j(not_equal, &non_number, taken);
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, edx);
+ __ jmp(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ cmp(ebx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &non_string, taken);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, edx);
+ __ jmp(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ cmp(edx, Factory::true_value());
+ __ j(equal, &boolean, not_taken);
+ __ cmp(edx, Factory::false_value());
+ __ j(not_equal, &miss, taken);
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+
+ Label miss, probe, global;
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack; 2 ~ return address, receiver.
+ __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the receiver is a valid JS object.
+ __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(eax, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &miss, not_taken);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object.
+ __ cmp(eax, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, &global, not_taken);
+
+ // Search the dictionary placing the result in edx.
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &miss, eax, edx, ebx, ecx);
+
+ // Move the result to register edi and check that it isn't a smi.
+ __ mov(edi, Operand(edx));
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the value is a JavaScript function.
+ __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(edx, JS_FUNCTION_TYPE);
+ __ j(not_equal, &miss, not_taken);
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobal(edx, eax, &miss);
+ __ jmp(&probe);
+
+ // Cache miss: Restore receiver from stack and jump to runtime.
+ __ bind(&miss);
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // 1 ~ return address
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Get the name of the function to call from the stack.
+ // 2 ~ receiver, return address.
+ __ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
+
+ // Enter an internal frame.
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Push the receiver and the name of the function.
+ __ push(Operand(edx));
+ __ push(Operand(ebx));
+
+ // Call the entry.
+ CEntryStub stub;
+ __ mov(Operand(eax), Immediate(2 - 1)); // do not count receiver
+ __ mov(Operand(ebx), Immediate(f));
+ __ CallStub(&stub);
+
+ // Move result to edi and exit the internal frame.
+ __ mov(Operand(edi), eax);
+ __ ExitFrame(StackFrame::INTERNAL);
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+}
+
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, eax, ecx, ebx);
+
+ // Cache miss: Jump to runtime.
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Label miss, probe, global;
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the receiver is a valid JS object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(edx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &miss, not_taken);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object (unlikely).
+ __ cmp(edx, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, &global, not_taken);
+
+ // Search the dictionary placing the result in eax.
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
+ __ ret(0);
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobal(eax, edx, &miss);
+ __ jmp(&probe);
+
+ // Cache miss: Restore receiver from stack and jump to runtime.
+ __ bind(&miss);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ __ mov(eax, Operand(esp, kPointerSize));
+
+ // Move the return address below the arguments.
+ __ pop(ebx);
+ __ push(eax);
+ __ push(ecx);
+ __ push(ebx);
+
+ // Set the number of arguments and jump to the entry.
+ __ mov(Operand(eax), Immediate(1)); // not counting receiver.
+ __ JumpToBuiltin(f);
+}
+
+
+// Defined in ic.cc.
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+ // Move the return address below the arguments.
+ __ pop(ebx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Set the number of arguments and jump to the entry.
+ __ mov(Operand(eax), Immediate(1)); // not counting receiver.
+ __ JumpToBuiltin(f);
+}
+
+
+// Defined in ic.cc.
+Object* StoreIC_Miss(Arguments args);
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ __ mov(edx, Operand(esp, 4));
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
+
+ // Cache miss: Jump to runtime.
+ Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ // Move the return address below the arguments.
+ __ pop(ebx);
+ __ push(Operand(esp, 0));
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Set the number of arguments and jump to the entry.
+ __ Set(eax, Immediate(2)); // not counting receiver.
+ __ JumpToBuiltin(f);
+}
+
+
+// Defined in ic.cc.
+Object* KeyedStoreIC_Miss(Arguments args);
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+
+ // Move the return address below the arguments.
+ __ pop(ecx);
+ __ push(Operand(esp, 1 * kPointerSize));
+ __ push(Operand(esp, 1 * kPointerSize));
+ __ push(eax);
+ __ push(ecx);
+
+ // Do tail-call to runtime routine.
+ __ Set(eax, Immediate(2)); // not counting receiver
+ __ JumpToBuiltin(f);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IC_INL_H_
+#define V8_IC_INL_H_
+
+#include "ic.h"
+#include "debug.h"
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+
+Address IC::address() {
+ // Get the address of the call.
+ Address result = pc() - Assembler::kTargetAddrToReturnAddrDist;
+
+ // First check if any break points are active if not just return the address
+ // of the call.
+ if (!Debug::has_break_points()) return result;
+
+ // At least one break point is active perform additional test to ensure that
+ // break point locations are updated correctly.
+ if (Debug::IsDebugBreak(Assembler::target_address_at(result))) {
+ // If the call site is a call to debug break then return the address in
+ // the original code instead of the address in the running code. This will
+ // cause the original code to be updated and keeps the breakpoint active in
+ // the running code.
+ return OriginalCodeAddress();
+ } else {
+ // No break point here just return the address of the call.
+ return result;
+ }
+}
+
+
+Code* IC::GetTargetAtAddress(Address address) {
+ Address target = Assembler::target_address_at(address);
+ HeapObject* code = HeapObject::FromAddress(target - Code::kHeaderSize);
+ // GetTargetAtAddress is called from IC::Clear which in turn is
+ // called when marking objects during mark sweep. reinterpret_cast
+ // is therefore used instead of the more appropriate
+ // Code::cast. Code::cast does not work when the object's map is
+ // marked.
+ Code* result = reinterpret_cast<Code*>(code);
+ ASSERT(result->is_inline_cache_stub());
+ return result;
+}
+
+
+void IC::SetTargetAtAddress(Address address, Code* target) {
+ ASSERT(target->is_inline_cache_stub());
+ Assembler::set_target_address_at(address, target->instruction_start());
+}
+
+
+Map* IC::GetCodeCacheMapForObject(Object* object) {
+ if (object->IsJSObject()) return JSObject::cast(object)->map();
+ // If the object is a value, we use the prototype map for the cache.
+ ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
+ return JSObject::cast(object->GetPrototype())->map();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_IC_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "arguments.h"
+#include "execution.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DEFINE_bool(trace_ic, false, "trace inline cache state transitions");
+#endif
+DEFINE_bool(use_ic, true, "use inline caching");
+DECLARE_bool(strict);
+
+
+#ifdef DEBUG
+static char TransitionMarkFromState(IC::State state) {
+ switch (state) {
+ case UNINITIALIZED: return '0';
+ case PREMONOMORPHIC: return '0';
+ case MONOMORPHIC: return '1';
+ case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
+ case MEGAMORPHIC: return 'N';
+
+ // We never see the debugger states here, because the state is
+ // computed from the original code - not the patched code. Let
+ // these cases fall through to the unreachable code below.
+ case DEBUG_BREAK: break;
+ case DEBUG_PREPARE_STEP_IN: break;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+void IC::TraceIC(const char* type,
+ Handle<String> name,
+ State old_state,
+ Code* new_target) {
+ if (FLAG_trace_ic) {
+ State new_state = StateFrom(new_target, Heap::undefined_value());
+ PrintF("[%s (%c->%c) ", type,
+ TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state));
+ name->Print();
+ PrintF("]\n");
+ }
+}
+#endif
+
+
+IC::IC(FrameDepth depth) {
+ // To improve the performance of the (much used) IC code, we unfold
+ // a few levels of the stack frame iteration code. This yields a
+ // ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
+ const Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+ Address* pc_address =
+ reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
+ Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ // If there's another JavaScript frame on the stack, we need to look
+ // one frame further down the stack to find the frame pointer and
+ // the return address stack slot.
+ if (depth == EXTRA_CALL_FRAME) {
+ const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
+ pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
+ fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+ }
+#ifdef DEBUG
+ StackFrameIterator it;
+ for (int i = 0; i < depth + 1; i++) it.Advance();
+ StackFrame* frame = it.frame();
+ ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
+#endif
+ fp_ = fp;
+ pc_address_ = pc_address;
+}
+
+
+Address IC::OriginalCodeAddress() {
+ HandleScope scope;
+ // Compute the JavaScript frame for the frame pointer of this IC
+ // structure. We need this to be able to find the function
+ // corresponding to the frame.
+ StackFrameIterator it;
+ while (it.frame()->fp() != this->fp()) it.Advance();
+ JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
+ // Find the function on the stack and both the active code for the
+ // function and the original code.
+ JSFunction* function = JSFunction::cast(frame->function());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Code* code = shared->code();
+ ASSERT(Debug::HasDebugInfo(shared));
+ Code* original_code = Debug::GetDebugInfo(shared)->original_code();
+ ASSERT(original_code->IsCode());
+ // Get the address of the call site in the active code. This is the
+ // place where the call to DebugBreakXXX is and where the IC
+ // normally would be.
+ Address addr = pc() - Assembler::kTargetAddrToReturnAddrDist;
+ // Return the address in the original code. This is the place where
+ // the call which has been overwriten by the DebugBreakXXX resides
+ // and the place where the inline cache system should look.
+ int delta = original_code->instruction_start() - code->instruction_start();
+ return addr + delta;
+}
+
+
+IC::State IC::StateFrom(Code* target, Object* receiver) {
+ IC::State state = target->state();
+
+ if (state != MONOMORPHIC) return state;
+ if (receiver->IsUndefined() || receiver->IsNull()) return state;
+
+ Map* map = GetCodeCacheMapForObject(receiver);
+
+ // Decide whether the inline cache failed because of changes to the
+ // receiver itself or changes to one of its prototypes.
+ //
+ // If there are changes to the receiver itself, the map of the
+ // receiver will have changed and the current target will not be in
+ // the receiver map's code cache. Therefore, if the current target
+ // is in the receiver map's code cache, the inline cache failed due
+ // to prototype check failure.
+ if (map->IncludedInCodeCache(target)) {
+ // For keyed load/store, the most likely cause of cache failure is
+ // that the key has changed. We do not distinguish between
+ // prototype and non-prototype failures for keyed access.
+ Code::Kind kind = target->kind();
+ if (kind == Code::KEYED_LOAD_IC || kind == Code::KEYED_STORE_IC) {
+ return MONOMORPHIC;
+ }
+
+ // Clear the code cache for this map to avoid hitting the same
+ // invalid stub again. It seems likely that most of the code in
+ // the cache is invalid if one of the stubs is so we flush the
+ // entire code cache.
+ map->ClearCodeCache();
+
+ return MONOMORPHIC_PROTOTYPE_FAILURE;
+ }
+ return MONOMORPHIC;
+}
+
+
+RelocMode IC::ComputeMode() {
+ Address addr = address();
+ Code* code = Code::cast(Heap::FindCodeObject(addr));
+ for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
+ !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->pc() == addr) return info->rmode();
+ }
+ UNREACHABLE();
+ return no_reloc;
+}
+
+
+Failure* IC::TypeError(const char* type,
+ Handle<Object> object,
+ Handle<String> name) {
+ HandleScope scope;
+ Handle<Object> args[2] = { name, object };
+ Handle<Object> error = Factory::NewTypeError(type, HandleVector(args, 2));
+ return Top::Throw(*error);
+}
+
+
+Failure* IC::ReferenceError(const char* type, Handle<String> name) {
+ HandleScope scope;
+ Handle<Object> error =
+ Factory::NewReferenceError(type, HandleVector(&name, 1));
+ return Top::Throw(*error);
+}
+
+
+void IC::Clear(Address address) {
+ Code* target = GetTargetAtAddress(address);
+
+ // Don't clear debug break inline cache as it will remove the break point.
+ if (target->state() == DEBUG_BREAK) return;
+
+ switch (target->kind()) {
+ case Code::LOAD_IC: return LoadIC::Clear(address, target);
+ case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
+ case Code::STORE_IC: return StoreIC::Clear(address, target);
+ case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
+ case Code::CALL_IC: return CallIC::Clear(address, target);
+ default: UNREACHABLE();
+ }
+}
+
+
+void CallIC::Clear(Address address, Code* target) {
+ if (target->state() == UNINITIALIZED) return;
+ Code* code = StubCache::FindCallInitialize(target->arguments_count());
+ SetTargetAtAddress(address, code);
+}
+
+
+void KeyedLoadIC::Clear(Address address, Code* target) {
+ if (target->state() == UNINITIALIZED) return;
+ SetTargetAtAddress(address, initialize_stub());
+}
+
+
+void LoadIC::Clear(Address address, Code* target) {
+ if (target->state() == UNINITIALIZED) return;
+ SetTargetAtAddress(address, initialize_stub());
+}
+
+
+void StoreIC::Clear(Address address, Code* target) {
+ if (target->state() == UNINITIALIZED) return;
+ SetTargetAtAddress(address, initialize_stub());
+}
+
+
+void KeyedStoreIC::Clear(Address address, Code* target) {
+ if (target->state() == UNINITIALIZED) return;
+ SetTargetAtAddress(address, initialize_stub());
+}
+
+
+Object* CallIC::LoadFunction(State state,
+ Handle<Object> object,
+ Handle<String> name) {
+ // If the object is undefined or null it's illegal to try to get any
+ // of its properties; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_call", object, name);
+ }
+
+ // Lookup the property in the object.
+ LookupResult lookup;
+ object->Lookup(*name, &lookup);
+
+ Object* result = Heap::the_hole_value();
+
+ if (!lookup.IsValid()) {
+ // If the object does not have the requested property, check which
+ // exception we need to throw.
+ if (is_contextual()) {
+ return ReferenceError("not_defined", name);
+ }
+ return TypeError("undefined_method", object, name);
+ }
+
+ // Lookup is valid: Update inline cache and stub cache.
+ if (FLAG_use_ic && lookup.IsLoaded()) {
+ UpdateCaches(&lookup, state, object, name);
+ }
+
+ if (lookup.type() == INTERCEPTOR) {
+ // Get the property.
+ PropertyAttributes attr;
+ result = object->GetProperty(*name, &attr);
+ if (result->IsFailure()) return result;
+ // If the object does not have the requested property, check which
+ // exception we need to throw.
+ if (attr == ABSENT) {
+ if (is_contextual()) {
+ return ReferenceError("not_defined", name);
+ }
+ return TypeError("undefined_method", object, name);
+ }
+ } else {
+ // Lookup is valid and no interceptors are involved. Get the
+ // property.
+ result = object->GetProperty(*name);
+ if (result->IsFailure()) return result;
+ }
+
+ ASSERT(result != Heap::the_hole_value());
+
+ if (result->IsJSFunction()) {
+ // Check if there is an optimized (builtin) version of the function.
+ // Ignored this will degrade performance for Array.prototype.{push,pop}.
+ // Please note we only return the optimized function iff
+ // the JSObject has FastElements.
+ if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
+ Object* opt = Top::LookupSpecialFunction(JSObject::cast(*object),
+ lookup.holder(),
+ JSFunction::cast(result));
+ if (opt->IsJSFunction()) return opt;
+ }
+
+ // If performing debug step into then flood this function with one-shot
+ // break points if it is called from where step into was requested.
+ if (Debug::StepInActive() && fp() == Debug::step_in_fp()) {
+ // Don't allow step into functions in the native context.
+ if (JSFunction::cast(result)->context()->global() !=
+ Top::context()->builtins()) {
+ HandleScope scope;
+ Handle<SharedFunctionInfo> shared(JSFunction::cast(result)->shared());
+ Debug::FloodWithOneShot(shared);
+ }
+ }
+ return result;
+ }
+
+ // Try to find a suitable function delegate for the object at hand.
+ HandleScope scope;
+ Handle<Object> target(result);
+ Handle<Object> delegate = Execution::GetFunctionDelegate(target);
+
+ if (delegate->IsJSFunction()) {
+ // Patch the receiver and use the delegate as the function to
+ // invoke. This is used for invoking objects as if they were
+ // functions.
+ const int argc = this->target()->arguments_count();
+ StackFrameLocator locator;
+ JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+ int index = frame->ComputeExpressionsCount() - (argc + 1);
+ frame->SetExpression(index, *target);
+ return *delegate;
+ } else {
+ return TypeError("property_not_function", object, name);
+ }
+}
+
+
+void CallIC::UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<Object> object,
+ Handle<String> name) {
+ ASSERT(lookup->IsLoaded());
+ // Bail out if we didn't find a result.
+ if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+ // Compute the number of arguments.
+ int argc = target()->arguments_count();
+ Object* code = NULL;
+
+ if (state == UNINITIALIZED) {
+ // This is the first time we execute this inline cache.
+ // Set the target to the pre monomorphic stub to delay
+ // setting the monomorphic state.
+ code = StubCache::ComputeCallPreMonomorphic(argc);
+ } else if (state == MONOMORPHIC) {
+ code = StubCache::ComputeCallMegamorphic(argc);
+ } else {
+ // Compute monomorphic stub.
+ switch (lookup->type()) {
+ case FIELD: {
+ int index = lookup->GetFieldIndex();
+ code = StubCache::ComputeCallField(argc, *name, *object,
+ lookup->holder(), index);
+ break;
+ }
+ case CONSTANT_FUNCTION: {
+ // Get the constant function and compute the code stub for this
+ // call; used for rewriting to monomorphic state and making sure
+ // that the code stub is in the stub cache.
+ JSFunction* function = lookup->GetConstantFunction();
+ code = StubCache::ComputeCallConstant(argc, *name, *object,
+ lookup->holder(), function);
+ break;
+ }
+ case NORMAL: {
+ // There is only one shared stub for calling normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ if (!object->IsJSObject()) return;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (lookup->holder() != *receiver) return;
+ code = StubCache::ComputeCallNormal(argc, *name, *receiver);
+ break;
+ }
+ case INTERCEPTOR: {
+ code = StubCache::ComputeCallInterceptor(argc, *name, *object,
+ lookup->holder());
+ break;
+ }
+ default:
+ return;
+ }
+ }
+
+ // If we're unable to compute the stub (not enough memory left), we
+ // simply avoid updating the caches.
+ if (code->IsFailure()) return;
+
+ // Patch the call site depending on the state of the cache.
+ if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+ state == MONOMORPHIC || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+ set_target(Code::cast(code));
+ }
+
+#ifdef DEBUG
+ TraceIC("CallIC", name, state, target());
+#endif
+}
+
+
+Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
+ // If the object is undefined or null it's illegal to try to get any
+ // of its properties; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_load", object, name);
+ }
+
+ if (FLAG_use_ic) {
+ // Use specialized code for getting the length of strings.
+ if (object->IsString() && name->Equals(Heap::length_symbol())) {
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
+ Code* target = NULL;
+ if (object->IsShortString()) {
+ target = Builtins::builtin(Builtins::LoadIC_ShortStringLength);
+ } else if (object->IsMediumString()) {
+ target = Builtins::builtin(Builtins::LoadIC_MediumStringLength);
+ } else {
+ ASSERT(object->IsLongString());
+ target = Builtins::builtin(Builtins::LoadIC_LongStringLength);
+ }
+ set_target(target);
+ StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+ return Smi::FromInt(String::cast(*object)->length());
+ }
+
+ // Use specialized code for getting the length of arrays.
+ if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
+#endif
+ Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
+ set_target(target);
+ StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+ return JSArray::cast(*object)->length();
+ }
+
+ // Use specialized code for getting prototype of functions.
+ if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+#endif
+ Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
+ set_target(target);
+ StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+ return Accessors::FunctionGetPrototype(*object, 0);
+ }
+ }
+
+ // Check if the name is trivially convertible to an index and get
+ // the element if so.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) return object->GetElement(index);
+
+ // Named lookup in the object.
+ LookupResult lookup;
+ object->Lookup(*name, &lookup);
+
+ // If lookup is invalid, check if we need to throw an exception.
+ if (!lookup.IsValid()) {
+ if (FLAG_strict || is_contextual()) {
+ return ReferenceError("not_defined", name);
+ }
+ String* class_name = object->IsJSObject()
+ ? Handle<JSObject>::cast(object)->class_name()
+ : Heap::empty_string();
+ LOG(SuspectReadEvent(*name, class_name));
+ USE(class_name);
+ }
+
+ // Update inline cache and stub cache.
+ if (FLAG_use_ic && lookup.IsLoaded()) {
+ UpdateCaches(&lookup, state, object, name);
+ }
+
+ PropertyAttributes attr;
+ if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+ // Get the property.
+ Object* result = object->GetProperty(*object, &lookup, *name, &attr);
+ if (result->IsFailure()) return result;
+ // If the property is not present, check if we need to throw an
+ // exception.
+ if (attr == ABSENT && is_contextual()) {
+ return ReferenceError("not_defined", name);
+ }
+ return result;
+ }
+
+ // Get the property.
+ return object->GetProperty(*object, &lookup, *name, &attr);
+}
+
+
+void LoadIC::UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<Object> object,
+ Handle<String> name) {
+ ASSERT(lookup->IsLoaded());
+ // Bail out if we didn't find a result.
+ if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+ // Loading properties from values is not common, so don't try to
+ // deal with non-JS objects here.
+ if (!object->IsJSObject()) return;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+ // Compute the code stub for this load.
+ Object* code = NULL;
+ if (state == UNINITIALIZED) {
+ // This is the first time we execute this inline cache.
+ // Set the target to the pre monomorphic stub to delay
+ // setting the monomorphic state.
+ code = pre_monomorphic_stub();
+ } else {
+ // Compute monomorphic stub.
+ switch (lookup->type()) {
+ case FIELD: {
+ code = StubCache::ComputeLoadField(*name, *receiver,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ break;
+ }
+ case CONSTANT_FUNCTION: {
+ Object* constant = lookup->GetConstantFunction();
+ code = StubCache::ComputeLoadConstant(*name, *receiver,
+ lookup->holder(), constant);
+ break;
+ }
+ case NORMAL: {
+ // There is only one shared stub for loading normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ if (lookup->holder() != *receiver) return;
+ code = StubCache::ComputeLoadNormal(*name, *receiver);
+ break;
+ }
+ case CALLBACKS: {
+ if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+ AccessorInfo* callback =
+ AccessorInfo::cast(lookup->GetCallbackObject());
+ if (v8::ToCData<Address>(callback->getter()) == 0) return;
+ code = StubCache::ComputeLoadCallback(*name, *receiver,
+ lookup->holder(), callback);
+ break;
+ }
+ case INTERCEPTOR: {
+ code = StubCache::ComputeLoadInterceptor(*name, *receiver,
+ lookup->holder());
+ break;
+ }
+ default:
+ return;
+ }
+ }
+
+ // If we're unable to compute the stub (not enough memory left), we
+ // simply avoid updating the caches.
+ if (code->IsFailure()) return;
+
+ // Patch the call site depending on the state of the cache.
+ if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+ state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+ set_target(Code::cast(code));
+ } else if (state == MONOMORPHIC) {
+ set_target(megamorphic_stub());
+ }
+
+#ifdef DEBUG
+ TraceIC("LoadIC", name, state, target());
+#endif
+}
+
+
+Object* KeyedLoadIC::Load(State state,
+ Handle<Object> object,
+ Handle<Object> key) {
+ if (key->IsSymbol()) {
+ Handle<String> name = Handle<String>::cast(key);
+
+ // If the object is undefined or null it's illegal to try to get any
+ // of its properties; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_load", object, name);
+ }
+
+ if (FLAG_use_ic) {
+ // Use specialized code for getting the length of strings.
+ if (object->IsString() && name->Equals(Heap::length_symbol())) {
+ Handle<String> string = Handle<String>::cast(object);
+ Object* code = NULL;
+ if (string->IsShortString()) {
+ code = StubCache::ComputeKeyedLoadShortStringLength(*name, *string);
+ } else if (string->IsMediumString()) {
+ code =
+ StubCache::ComputeKeyedLoadMediumStringLength(*name, *string);
+ } else {
+ ASSERT(string->IsLongString());
+ code = StubCache::ComputeKeyedLoadLongStringLength(*name, *string);
+ }
+ if (code->IsFailure()) return code;
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC("KeyedLoadIC", name, state, target());
+#endif
+ return Smi::FromInt(string->length());
+ }
+
+ // Use specialized code for getting the length of arrays.
+ if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ Object* code = StubCache::ComputeKeyedLoadArrayLength(*name, *array);
+ if (code->IsFailure()) return code;
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC("KeyedLoadIC", name, state, target());
+#endif
+ return JSArray::cast(*object)->length();
+ }
+
+ // Use specialized code for getting prototype of functions.
+ if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+ Object* code =
+ StubCache::ComputeKeyedLoadFunctionPrototype(*name, *function);
+ if (code->IsFailure()) return code;
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC("KeyedLoadIC", name, state, target());
+#endif
+ return Accessors::FunctionGetPrototype(*object, 0);
+ }
+ }
+
+ // Check if the name is trivially convertible to an index and get
+ // the element or char if so.
+ uint32_t index = 0;
+ if (name->AsArrayIndex(&index)) {
+ HandleScope scope;
+ // Rewrite to the generic keyed load stub.
+ if (FLAG_use_ic) set_target(generic_stub());
+ return Runtime::GetElementOrCharAt(object, index);
+ }
+
+ // Named lookup.
+ LookupResult lookup;
+ object->Lookup(*name, &lookup);
+
+ // If lookup is invalid, check if we need to throw an exception.
+ if (!lookup.IsValid()) {
+ if (FLAG_strict || is_contextual()) {
+ return ReferenceError("not_defined", name);
+ }
+ }
+
+ // Update the inline cache.
+ if (FLAG_use_ic && lookup.IsLoaded()) {
+ UpdateCaches(&lookup, state, object, name);
+ }
+
+ PropertyAttributes attr;
+ if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+ // Get the property.
+ Object* result = object->GetProperty(*object, &lookup, *name, &attr);
+ if (result->IsFailure()) return result;
+ // If the property is not present, check if we need to throw an
+ // exception.
+ if (attr == ABSENT && is_contextual()) {
+ return ReferenceError("not_defined", name);
+ }
+ return result;
+ }
+
+ return object->GetProperty(*object, &lookup, *name, &attr);
+ }
+
+ // Do not use ICs for objects that require access checks (including
+ // the global object).
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+
+ if (use_ic) set_target(generic_stub());
+
+ // Get the property.
+ return Runtime::GetObjectProperty(object, *key);
+}
+
+
+void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
+ Handle<Object> object, Handle<String> name) {
+ ASSERT(lookup->IsLoaded());
+ // Bail out if we didn't find a result.
+ if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+ if (!object->IsJSObject()) return;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+ // Compute the code stub for this load.
+ Object* code = NULL;
+
+ if (state == UNINITIALIZED) {
+ // This is the first time we execute this inline cache.
+ // Set the target to the pre monomorphic stub to delay
+ // setting the monomorphic state.
+ code = pre_monomorphic_stub();
+ } else {
+ // Compute a monomorphic stub.
+ switch (lookup->type()) {
+ case FIELD: {
+ code = StubCache::ComputeKeyedLoadField(*name, *receiver,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ break;
+ }
+ case CONSTANT_FUNCTION: {
+ Object* constant = lookup->GetConstantFunction();
+ code = StubCache::ComputeKeyedLoadConstant(*name, *receiver,
+ lookup->holder(), constant);
+ break;
+ }
+ case CALLBACKS: {
+ if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+ AccessorInfo* callback =
+ AccessorInfo::cast(lookup->GetCallbackObject());
+ if (v8::ToCData<Address>(callback->getter()) == 0) return;
+ code = StubCache::ComputeKeyedLoadCallback(*name, *receiver,
+ lookup->holder(), callback);
+ break;
+ }
+ case INTERCEPTOR: {
+ code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver,
+ lookup->holder());
+ break;
+ }
+ default: {
+ // Always rewrite to the generic case so that we do not
+ // repeatedly try to rewrite.
+ code = generic_stub();
+ break;
+ }
+ }
+ }
+
+ // If we're unable to compute the stub (not enough memory left), we
+ // simply avoid updating the caches.
+ if (code->IsFailure()) return;
+
+ // Patch the call site depending on the state of the cache. Make
+ // sure to always rewrite from monomorphic to megamorphic.
+ ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
+ if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
+ set_target(Code::cast(code));
+ } else if (state == MONOMORPHIC) {
+ set_target(megamorphic_stub());
+ }
+
+#ifdef DEBUG
+ TraceIC("KeyedLoadIC", name, state, target());
+#endif
+}
+
+
+Object* StoreIC::Store(State state,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value) {
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
+
+ // Ignore stores where the receiver is not a JSObject.
+ if (!object->IsJSObject()) return *value;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+ // Check if the given name is an array index.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ SetElement(receiver, index, value);
+ return *value;
+ }
+
+ // Lookup the property locally in the receiver.
+ LookupResult lookup;
+ receiver->LocalLookup(*name, &lookup);
+
+ // Update inline cache and stub cache.
+ if (FLAG_use_ic && lookup.IsLoaded()) {
+ UpdateCaches(&lookup, state, receiver, name, value);
+ }
+
+ // Set the property.
+ return receiver->SetProperty(*name, *value, NONE);
+}
+
+
+void StoreIC::UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
+ ASSERT(lookup->IsLoaded());
+ // Bail out if we didn't find a result.
+ if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+ // If the property is read-only, we leave the IC in its current
+ // state.
+ if (lookup->IsReadOnly()) return;
+
+ // If the property has a non-field type allowing map transitions
+ // where there is extra room in the object, we leave the IC in its
+ // current state.
+ PropertyType type = lookup->type();
+
+ // Compute the code stub for this store; used for rewriting to
+ // monomorphic state and making sure that the code stub is in the
+ // stub cache.
+ Object* code = NULL;
+ switch (type) {
+ case FIELD: {
+ code = StubCache::ComputeStoreField(*name, *receiver,
+ lookup->GetFieldIndex());
+ break;
+ }
+ case MAP_TRANSITION: {
+ if (lookup->GetAttributes() != NONE) return;
+ if (receiver->map()->unused_property_fields() == 0) return;
+ HandleScope scope;
+ ASSERT(type == MAP_TRANSITION &&
+ (receiver->map()->unused_property_fields() > 0));
+ Handle<Map> transition(lookup->GetTransitionMap());
+ int index = transition->PropertyIndexFor(*name);
+ code = StubCache::ComputeStoreField(*name, *receiver, index, *transition);
+ break;
+ }
+ case CALLBACKS: {
+ if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ if (v8::ToCData<Address>(callback->setter()) == 0) return;
+ code = StubCache::ComputeStoreCallback(*name, *receiver, callback);
+ break;
+ }
+ case INTERCEPTOR: {
+ code = StubCache::ComputeStoreInterceptor(*name, *receiver);
+ break;
+ }
+ default:
+ return;
+ }
+
+ // If we're unable to compute the stub (not enough memory left), we
+ // simply avoid updating the caches.
+ if (code->IsFailure()) return;
+
+ // Patch the call site depending on the state of the cache.
+ if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+ set_target(Code::cast(code));
+ } else if (state == MONOMORPHIC) {
+ set_target(megamorphic_stub());
+ }
+
+#ifdef DEBUG
+ TraceIC("StoreIC", name, state, target());
+#endif
+}
+
+
+Object* KeyedStoreIC::Store(State state,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value) {
+ if (key->IsSymbol()) {
+ Handle<String> name = Handle<String>::cast(key);
+
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
+
+ // Ignore stores where the receiver is not a JSObject.
+ if (!object->IsJSObject()) return *value;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+ // Check if the given name is an array index.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ SetElement(receiver, index, value);
+ return *value;
+ }
+
+ // Lookup the property locally in the receiver.
+ LookupResult lookup;
+ receiver->LocalLookup(*name, &lookup);
+
+ // Update inline cache and stub cache.
+ if (FLAG_use_ic && lookup.IsLoaded()) {
+ UpdateCaches(&lookup, state, receiver, name, value);
+ }
+
+ // Set the property.
+ return receiver->SetProperty(*name, *value, NONE);
+ }
+
+ // Do not use ICs for objects that require access checks (including
+ // the global object).
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+
+ if (use_ic) set_target(generic_stub());
+
+ // Set the property.
+ return Runtime::SetObjectProperty(object, key, value, NONE);
+}
+
+
+void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
+ ASSERT(lookup->IsLoaded());
+ // Bail out if we didn't find a result.
+ if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+ // If the property is read-only, we leave the IC in its current
+ // state.
+ if (lookup->IsReadOnly()) return;
+
+ // If the property has a non-field type allowing map transitions
+ // where there is extra room in the object, we leave the IC in its
+ // current state.
+ PropertyType type = lookup->type();
+
+ // Compute the code stub for this store; used for rewriting to
+ // monomorphic state and making sure that the code stub is in the
+ // stub cache.
+ Object* code = NULL;
+
+ switch (type) {
+ case FIELD: {
+ code = StubCache::ComputeKeyedStoreField(*name, *receiver,
+ lookup->GetFieldIndex());
+ break;
+ }
+ case MAP_TRANSITION: {
+ if (lookup->GetAttributes() == NONE) {
+ if (receiver->map()->unused_property_fields() == 0) return;
+ HandleScope scope;
+ ASSERT(type == MAP_TRANSITION &&
+ (receiver->map()->unused_property_fields() > 0));
+ Handle<Map> transition(lookup->GetTransitionMap());
+ int index = transition->PropertyIndexFor(*name);
+ code = StubCache::ComputeKeyedStoreField(*name, *receiver,
+ index, *transition);
+ break;
+ }
+ // fall through.
+ }
+ default: {
+ // Always rewrite to the generic case so that we do not
+ // repeatedly try to rewrite.
+ code = generic_stub();
+ break;
+ }
+ }
+
+ // If we're unable to compute the stub (not enough memory left), we
+ // simply avoid updating the caches.
+ if (code->IsFailure()) return;
+
+ // Patch the call site depending on the state of the cache. Make
+ // sure to always rewrite from monomorphic to megamorphic.
+ ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
+ if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
+ set_target(Code::cast(code));
+ } else if (state == MONOMORPHIC) {
+ set_target(megamorphic_stub());
+ }
+
+#ifdef DEBUG
+ TraceIC("KeyedStoreIC", name, state, target());
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+// Used from ic_<arch>.cc.
+Object* CallIC_Miss(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+ CallIC ic;
+ IC::State state = IC::StateFrom(ic.target(), args[0]);
+ return ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
+}
+
+
+void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::GeneratePreMonomorphic(MacroAssembler* masm, int argc) {
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc.
+Object* LoadIC_Miss(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+ LoadIC ic;
+ IC::State state = IC::StateFrom(ic.target(), args[0]);
+ return ic.Load(state, args.at<Object>(0), args.at<String>(1));
+}
+
+
+void LoadIC::GenerateInitialize(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc
+Object* KeyedLoadIC_Miss(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+ KeyedLoadIC ic;
+ IC::State state = IC::StateFrom(ic.target(), args[0]);
+ return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
+}
+
+
+void KeyedLoadIC::GenerateInitialize(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc.
+Object* StoreIC_Miss(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 3);
+ StoreIC ic;
+ IC::State state = IC::StateFrom(ic.target(), args[0]);
+ return ic.Store(state, args.at<Object>(0), args.at<String>(1),
+ args.at<Object>(2));
+}
+
+
+void StoreIC::GenerateInitialize(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc.
+Object* KeyedStoreIC_Miss(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 3);
+ KeyedStoreIC ic;
+ IC::State state = IC::StateFrom(ic.target(), args[0]);
+ return ic.Store(state, args.at<Object>(0), args.at<Object>(1),
+ args.at<Object>(2));
+}
+
+
+void KeyedStoreIC::GenerateInitialize(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
+}
+
+
+static Address IC_utilities[] = {
+#define ADDR(name) FUNCTION_ADDR(name),
+ IC_UTIL_LIST(ADDR)
+ NULL
+#undef ADDR
+};
+
+
+Address IC::AddressFromUtilityId(IC::UtilityId id) {
+ return IC_utilities[id];
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IC_H_
+#define V8_IC_H_
+
+#include "assembler.h"
+
+namespace v8 { namespace internal {
+
+// IC_UTIL_LIST defines all utility functions called from generated
+// inline caching code. The argument for the macro, ICU, is the function name.
+#define IC_UTIL_LIST(ICU) \
+ ICU(LoadIC_Miss) \
+ ICU(KeyedLoadIC_Miss) \
+ ICU(CallIC_Miss) \
+ ICU(StoreIC_Miss) \
+ ICU(KeyedStoreIC_Miss) \
+ /* Utilities for IC stubs. */ \
+ ICU(LoadCallbackProperty) \
+ ICU(StoreCallbackProperty) \
+ ICU(LoadInterceptorProperty) \
+ ICU(StoreInterceptorProperty)
+
+//
+// IC is the base class for LoadIC, StoreIC and CallIC.
+//
+class IC {
+ public:
+
+ // The ids for utility called from the generated code.
+ enum UtilityId {
+ #define CONST_NAME(name) k##name,
+ IC_UTIL_LIST(CONST_NAME)
+ #undef CONST_NAME
+ kUtilityCount
+ };
+
+ // Looks up the address of the named utility.
+ static Address AddressFromUtilityId(UtilityId id);
+
+ // Alias the inline cache state type to make the IC code more readable.
+ typedef InlineCacheState State;
+
+ // The IC code is either invoked with no extra frames on the stack
+ // or with a single extra frame for supporting calls.
+ enum FrameDepth {
+ NO_EXTRA_FRAME = 0,
+ EXTRA_CALL_FRAME = 1
+ };
+
+ // Construct the IC structure with the given number of extra
+ // JavaScript frames on the stack.
+ explicit IC(FrameDepth depth);
+
+ // Get the call-site target; used for determining the state.
+ Code* target() { return GetTargetAtAddress(address()); }
+ inline Address address();
+
+ // Compute the current IC state based on the target stub and the receiver.
+ static State StateFrom(Code* target, Object* receiver);
+
+ // Clear the inline cache to initial state.
+ static void Clear(Address address);
+
+ // Computes the reloc info for this IC. This is a fairly expensive
+ // operation as it has to search through the heap to find the code
+ // object that contains this IC site.
+ RelocMode ComputeMode();
+
+ // Returns if this IC is for contextual (no explicit receiver)
+ // access to properties.
+ bool is_contextual() { return ComputeMode() == code_target_context; }
+
+ // Returns the map to use for caching stubs for a given object.
+ // This method should not be called with undefined or null.
+ static inline Map* GetCodeCacheMapForObject(Object* object);
+
+ protected:
+ Address fp() const { return fp_; }
+ Address pc() const { return *pc_address_; }
+
+ // Computes the address in the original code when the code running is
+ // containing break points (calls to DebugBreakXXX builtins).
+ Address OriginalCodeAddress();
+
+ // Set the call-site target.
+ void set_target(Code* code) { SetTargetAtAddress(address(), code); }
+
+#ifdef DEBUG
+ static void TraceIC(const char* type,
+ Handle<String> name,
+ State old_state,
+ Code* new_target);
+#endif
+
+ static Failure* TypeError(const char* type,
+ Handle<Object> object,
+ Handle<String> name);
+ static Failure* ReferenceError(const char* type, Handle<String> name);
+
+ // Access the target code for the given IC address.
+ static inline Code* GetTargetAtAddress(Address address);
+ static inline void SetTargetAtAddress(Address address, Code* target);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
+
+ // Frame pointer for the frame that uses (calls) the IC.
+ Address fp_;
+
+ // All access to the program counter of an IC structure is indirect
+ // to make the code GC safe. This feature is crucial since
+ // GetProperty and SetProperty are called and they in turn might
+ // invoke the garbage collector.
+ Address* pc_address_;
+};
+
+
+// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
+// cannot make forward declarations to an enum.
+class IC_Utility {
+ public:
+ explicit IC_Utility(IC::UtilityId id)
+ : address_(IC::AddressFromUtilityId(id)), id_(id) {}
+
+ Address address() const { return address_; }
+
+ IC::UtilityId id() const { return id_; }
+ private:
+ Address address_;
+ IC::UtilityId id_;
+};
+
+
+class CallIC: public IC {
+ public:
+ CallIC() : IC(EXTRA_CALL_FRAME) { ASSERT(target()->is_call_stub()); }
+
+ Object* LoadFunction(State state, Handle<Object> object, Handle<String> name);
+
+
+ // Code generator routines.
+ static void GenerateInitialize(MacroAssembler* masm, int argc);
+ static void GeneratePreMonomorphic(MacroAssembler* masm, int argc);
+ static void GenerateMiss(MacroAssembler* masm, int argc);
+ static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+ static void GenerateNormal(MacroAssembler* masm, int argc);
+
+ private:
+ static void Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f);
+
+ // Update the inline cache and the global stub cache based on the
+ // lookup result.
+ void UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<Object> object,
+ Handle<String> name);
+
+ static void Clear(Address address, Code* target);
+ friend class IC;
+};
+
+
+class LoadIC: public IC {
+ public:
+ LoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_load_stub()); }
+
+ Object* Load(State state, Handle<Object> object, Handle<String> name);
+
+ // Code generator routines.
+ static void GenerateInitialize(MacroAssembler* masm);
+ static void GeneratePreMonomorphic(MacroAssembler* masm);
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateNormal(MacroAssembler* masm);
+
+ // Specialized code generator routines.
+ static void GenerateArrayLength(MacroAssembler* masm);
+ static void GenerateShortStringLength(MacroAssembler* masm);
+ static void GenerateMediumStringLength(MacroAssembler* masm);
+ static void GenerateLongStringLength(MacroAssembler* masm);
+ static void GenerateFunctionPrototype(MacroAssembler* masm);
+
+ private:
+ static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+ // Update the inline cache and the global stub cache based on the
+ // lookup result.
+ void UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<Object> object,
+ Handle<String> name);
+
+ // Stub accessors.
+ static Code* megamorphic_stub() {
+ return Builtins::builtin(Builtins::LoadIC_Megamorphic);
+ }
+ static Code* initialize_stub() {
+ return Builtins::builtin(Builtins::LoadIC_Initialize);
+ }
+ static Code* pre_monomorphic_stub() {
+ return Builtins::builtin(Builtins::LoadIC_PreMonomorphic);
+ }
+
+ static void Clear(Address address, Code* target);
+ friend class IC;
+};
+
+
+class KeyedLoadIC: public IC {
+ public:
+ KeyedLoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_keyed_load_stub()); }
+
+ Object* Load(State state, Handle<Object> object, Handle<Object> key);
+
+ // Code generator routines.
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm);
+ static void GeneratePreMonomorphic(MacroAssembler* masm);
+ static void GenerateGeneric(MacroAssembler* masm);
+
+ private:
+ static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+ // Update the inline cache.
+ void UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<Object> object,
+ Handle<String> name);
+
+ // Stub accessors.
+ static Code* initialize_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_Initialize);
+ }
+ static Code* megamorphic_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+ }
+ static Code* generic_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+ }
+ static Code* pre_monomorphic_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
+ }
+
+ static void Clear(Address address, Code* target);
+ friend class IC;
+};
+
+
+class StoreIC: public IC {
+ public:
+ StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
+
+ Object* Store(State state,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value);
+
+ // Code generators for stub routines. Only called once at startup.
+ static void GenerateInitialize(MacroAssembler* masm);
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm);
+
+ private:
+ static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+ // Update the inline cache and the global stub cache based on the
+ // lookup result.
+ void UpdateCaches(LookupResult* lookup,
+ State state, Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value);
+
+ // Stub accessors.
+ static Code* megamorphic_stub() {
+ return Builtins::builtin(Builtins::StoreIC_Megamorphic);
+ }
+ static Code* initialize_stub() {
+ return Builtins::builtin(Builtins::StoreIC_Initialize);
+ }
+
+ static void Clear(Address address, Code* target);
+ friend class IC;
+};
+
+
+class KeyedStoreIC: public IC {
+ public:
+ KeyedStoreIC() : IC(NO_EXTRA_FRAME) { }
+
+ Object* Store(State state,
+ Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> value);
+
+ // Code generators for stub routines. Only called once at startup.
+ static void GenerateInitialize(MacroAssembler* masm);
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateGeneric(MacroAssembler* masm);
+
+ private:
+ static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+ // Update the inline cache.
+ void UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value);
+
+ // Stub accessors.
+ static Code* initialize_stub() {
+ return Builtins::builtin(Builtins::KeyedStoreIC_Initialize);
+ }
+ static Code* megamorphic_stub() {
+ return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+ }
+ static Code* generic_stub() {
+ return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+ }
+
+ static void Clear(Address address, Code* target);
+ friend class IC;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_IC_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "execution.h"
+#include "factory.h"
+#include "jsregexp.h"
+#include "third_party/jscre/pcre.h"
+#include "platform.h"
+#include "top.h"
+
+namespace v8 { namespace internal {
+
+
+#define CAPTURE_INDEX 0
+#define INTERNAL_INDEX 1
+
+static Failure* malloc_failure;
+
+static void* JSREMalloc(size_t size) {
+ Object* obj = Heap::AllocateByteArray(size);
+
+ // If allocation failed, return a NULL pointer to JSRE, and jsRegExpCompile
+ // will return NULL to the caller, performs GC there.
+ // Also pass failure information to the caller.
+ if (obj->IsFailure()) {
+ malloc_failure = Failure::cast(obj);
+ return NULL;
+ }
+
+ // Note: object is unrooted, the caller of jsRegExpCompile must
+ // create a handle for the return value before doing heap allocation.
+ return reinterpret_cast<void*>(ByteArray::cast(obj)->GetDataStartAddress());
+}
+
+
+static void JSREFree(void* p) {
+ USE(p); // Do nothing, memory is garbage collected.
+}
+
+
+String* RegExpImpl::last_ascii_string_ = NULL;
+String* RegExpImpl::two_byte_cached_string_ = NULL;
+
+
+void RegExpImpl::NewSpaceCollectionPrologue() {
+ // The two byte string is always in the old space. The Ascii string may be
+ // in either place. If it is in the old space we don't need to do anything.
+ if (Heap::InNewSpace(last_ascii_string_)) {
+ // Invalidate the cache.
+ last_ascii_string_ = NULL;
+ two_byte_cached_string_ = NULL;
+ }
+}
+
+
+void RegExpImpl::OldSpaceCollectionPrologue() {
+ last_ascii_string_ = NULL;
+ two_byte_cached_string_ = NULL;
+}
+
+
+Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<String> pattern,
+ Handle<String> flags,
+ bool* has_pending_exception) {
+ // Ensure that RegExp has been loaded.
+ if (!Top::regexp_function()->IsLoaded()) {
+ LoadLazy(Top::regexp_function(), has_pending_exception);
+ if (*has_pending_exception) return Handle<Object>(Failure::Exception());
+ }
+ // Call the construct code with 2 arguments.
+ Object** argv[2] = { Handle<Object>::cast(pattern).location(),
+ Handle<Object>::cast(flags).location() };
+ return Execution::New(Top::regexp_function(), 2, argv, has_pending_exception);
+}
+
+
+// Converts a source string to a 16 bit flat string or a SlicedString containing
+// a 16 bit flat string).
+Handle<String> RegExpImpl::CachedStringToTwoByte(Handle<String> subject) {
+ if (*subject == last_ascii_string_) {
+ ASSERT(two_byte_cached_string_ != NULL);
+ return Handle<String>(String::cast(two_byte_cached_string_));
+ }
+ Handle<String> two_byte_string = StringToTwoByte(subject);
+ last_ascii_string_ = *subject;
+ two_byte_cached_string_ = *two_byte_string;
+ return two_byte_string;
+}
+
+
+// Converts a source string to a 16 bit flat string or a SlicedString containing
+// a 16 bit flat string).
+Handle<String> RegExpImpl::StringToTwoByte(Handle<String> pattern) {
+ if (!pattern->IsFlat()) {
+ FlattenString(pattern);
+ }
+ Handle<String> flat_string(pattern->IsConsString() ?
+ String::cast(ConsString::cast(*pattern)->first()) :
+ *pattern);
+ ASSERT(!flat_string->IsConsString());
+ ASSERT(flat_string->IsSeqString() || flat_string->IsSlicedString() ||
+ flat_string->IsExternalString());
+ if (!flat_string->IsAscii()) {
+ return flat_string;
+ }
+
+ Handle<String> two_byte_string =
+ Factory::NewRawTwoByteString(flat_string->length(), TENURED);
+ static StringInputBuffer convert_to_two_byte_buffer;
+ convert_to_two_byte_buffer.Reset(*flat_string);
+ for (int i = 0; convert_to_two_byte_buffer.has_more(); i++) {
+ two_byte_string->Set(i, convert_to_two_byte_buffer.GetNext());
+ }
+ return two_byte_string;
+}
+
+
+Handle<Object> RegExpImpl::JsreCompile(Handle<JSValue> re,
+ Handle<String> pattern,
+ Handle<String> flags) {
+ JSRegExpIgnoreCaseOption case_option = JSRegExpDoNotIgnoreCase;
+ JSRegExpMultilineOption multiline_option = JSRegExpSingleLine;
+ FlattenString(flags);
+ for (int i = 0; i < flags->length(); i++) {
+ if (flags->Get(i) == 'i') case_option = JSRegExpIgnoreCase;
+ if (flags->Get(i) == 'm') multiline_option = JSRegExpMultiline;
+ }
+
+ Handle<String> two_byte_pattern = StringToTwoByte(pattern);
+
+ unsigned number_of_captures;
+ const char* error_message = NULL;
+
+ malloc_failure = Failure::Exception();
+ JSRegExp* code = jsRegExpCompile(two_byte_pattern->GetTwoByteData(),
+ pattern->length(), case_option,
+ multiline_option, &number_of_captures,
+ &error_message, &JSREMalloc, &JSREFree);
+
+ if (code == NULL && malloc_failure->IsRetryAfterGC()) {
+ // Performs a GC, then retries.
+ if (!Heap::CollectGarbage(malloc_failure->requested(),
+ malloc_failure->allocation_space())) {
+ // TODO(1181417): Fix this.
+ V8::FatalProcessOutOfMemory("RegExpImpl::JsreCompile");
+ }
+ malloc_failure = Failure::Exception();
+ code = jsRegExpCompile(two_byte_pattern->GetTwoByteData(),
+ pattern->length(), case_option,
+ multiline_option, &number_of_captures,
+ &error_message, &JSREMalloc, &JSREFree);
+ if (code == NULL && malloc_failure->IsRetryAfterGC()) {
+ // TODO(1181417): Fix this.
+ V8::FatalProcessOutOfMemory("RegExpImpl::JsreCompile");
+ }
+ }
+
+ if (error_message != NULL) {
+ // Throw an exception.
+ SmartPointer<char> char_pattern =
+ two_byte_pattern->ToCString(DISALLOW_NULLS);
+ Handle<JSArray> array = Factory::NewJSArray(2);
+ SetElement(array, 0, Factory::NewStringFromUtf8(CStrVector(*char_pattern)));
+ SetElement(array, 1, Factory::NewStringFromUtf8(CStrVector(error_message)));
+ Handle<Object> regexp_err =
+ Factory::NewSyntaxError("malformed_regexp", array);
+ return Handle<Object>(Top::Throw(*regexp_err));
+ }
+
+ ASSERT(code != NULL);
+
+ // Convert the return address to a ByteArray pointer.
+ Handle<ByteArray> internal(
+ ByteArray::FromDataStartAddress(reinterpret_cast<Address>(code)));
+
+ Handle<FixedArray> value = Factory::NewFixedArray(2);
+ value->set(CAPTURE_INDEX, Smi::FromInt(number_of_captures));
+ value->set(INTERNAL_INDEX, *internal);
+ re->set_value(*value);
+
+ return re;
+}
+
+
+Handle<Object> RegExpImpl::JsreExecOnce(Handle<JSValue> regexp,
+ int num_captures,
+ Handle<String> subject,
+ int previous_index,
+ const uc16* two_byte_subject,
+ int* offsets_vector,
+ int offsets_vector_length) {
+ int rc;
+ {
+ AssertNoAllocation a;
+ ByteArray* internal = JsreInternal(regexp);
+ const JSRegExp* js_regexp =
+ reinterpret_cast<JSRegExp*>(internal->GetDataStartAddress());
+
+ rc = jsRegExpExecute(js_regexp, two_byte_subject,
+ subject->length(),
+ previous_index,
+ offsets_vector,
+ offsets_vector_length);
+ }
+
+ // The KJS JavaScript engine returns null (ie, a failed match) when
+ // JSRE's internal match limit is exceeded. We duplicate that behavior here.
+ if (rc == JSRegExpErrorNoMatch
+ || rc == JSRegExpErrorHitLimit) {
+ return Factory::null_value();
+ }
+
+ // Other JSRE errors:
+ if (rc < 0) {
+ // Throw an exception.
+ Handle<Object> code(Smi::FromInt(rc));
+ Handle<Object> args[2] = { Factory::LookupAsciiSymbol("jsre_exec"), code };
+ Handle<Object> regexp_err(
+ Factory::NewTypeError("jsre_error", HandleVector(args, 2)));
+ return Handle<Object>(Top::Throw(*regexp_err));
+ }
+
+ Handle<JSArray> result = Factory::NewJSArray(2 * (num_captures+1));
+
+ // The captures come in (start, end+1) pairs.
+ for (int i = 0; i < 2 * (num_captures+1); i += 2) {
+ SetElement(result, i, Handle<Object>(Smi::FromInt(offsets_vector[i])));
+ SetElement(result, i+1, Handle<Object>(Smi::FromInt(offsets_vector[i+1])));
+ }
+
+ return result;
+}
+
+
+class OffsetsVector {
+ public:
+ inline OffsetsVector(int num_captures) {
+ offsets_vector_length_ = (num_captures + 1) * 3;
+ if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ vector_ = NewArray<int>(offsets_vector_length_);
+ } else {
+ vector_ = static_offsets_vector_;
+ }
+ }
+
+
+ inline ~OffsetsVector() {
+ if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ DeleteArray(vector_);
+ vector_ = NULL;
+ }
+ }
+
+
+ inline int* vector() {
+ return vector_;
+ }
+
+
+ inline int length() {
+ return offsets_vector_length_;
+ }
+
+ private:
+ int* vector_;
+ int offsets_vector_length_;
+ static const int kStaticOffsetsVectorSize = 30;
+ static int static_offsets_vector_[kStaticOffsetsVectorSize];
+};
+
+
+int OffsetsVector::static_offsets_vector_[
+ OffsetsVector::kStaticOffsetsVectorSize];
+
+
+Handle<Object> RegExpImpl::JsreExec(Handle<JSValue> regexp,
+ Handle<String> subject,
+ Handle<Object> index) {
+ // Prepare space for the return values.
+ int num_captures = JsreCapture(regexp);
+
+ OffsetsVector offsets(num_captures);
+
+ int previous_index = static_cast<int>(DoubleToInteger(index->Number()));
+
+ Handle<String> subject16 = CachedStringToTwoByte(subject);
+
+ Handle<Object> result(JsreExecOnce(regexp, num_captures, subject,
+ previous_index,
+ subject16->GetTwoByteData(),
+ offsets.vector(), offsets.length()));
+
+ return result;
+}
+
+
+Handle<Object> RegExpImpl::JsreExecGlobal(Handle<JSValue> regexp,
+ Handle<String> subject) {
+ // Prepare space for the return values.
+ int num_captures = JsreCapture(regexp);
+
+ OffsetsVector offsets(num_captures);
+
+ int previous_index = 0;
+
+ Handle<JSArray> result = Factory::NewJSArray(0);
+ int i = 0;
+ Handle<Object> matches;
+
+ Handle<String> subject16 = CachedStringToTwoByte(subject);
+
+ do {
+ if (previous_index > subject->length() || previous_index < 0) {
+ // Per ECMA-262 15.10.6.2, if the previous index is greater than the
+ // string length, there is no match.
+ matches = Factory::null_value();
+ } else {
+ matches = JsreExecOnce(regexp, num_captures, subject, previous_index,
+ subject16->GetTwoByteData(),
+ offsets.vector(), offsets.length());
+
+ if (matches->IsJSArray()) {
+ SetElement(result, i, matches);
+ i++;
+ previous_index = offsets.vector()[1];
+ if (offsets.vector()[0] == offsets.vector()[1]) {
+ previous_index++;
+ }
+ }
+ }
+ } while (matches->IsJSArray());
+
+ // If we exited the loop with an exception, throw it.
+ if (matches->IsNull()) { // Exited loop normally.
+ return result;
+ } else { // Exited loop with the exception in matches.
+ return matches;
+ }
+}
+
+
+int RegExpImpl::JsreCapture(Handle<JSValue> re) {
+ Object* value = re->value();
+ ASSERT(value->IsFixedArray());
+ return Smi::cast(FixedArray::cast(value)->get(CAPTURE_INDEX))->value();
+}
+
+
+ByteArray* RegExpImpl::JsreInternal(Handle<JSValue> re) {
+ Object* value = re->value();
+ ASSERT(value->IsFixedArray());
+ return ByteArray::cast(FixedArray::cast(value)->get(INTERNAL_INDEX));
+}
+
+}} // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSREGEXP_H_
+#define V8_JSREGEXP_H_
+
+namespace v8 { namespace internal {
+
+class RegExpImpl {
+ public:
+ // Creates a regular expression literal in the old space.
+ // This function calls the garbage collector if necessary.
+ static Handle<Object> CreateRegExpLiteral(Handle<String> pattern,
+ Handle<String> flags,
+ bool* has_pending_exception);
+
+ // Returns a string representation of a regular expression.
+ // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
+ // This function calls the garbage collector if necessary.
+ static Handle<String> ToString(Handle<Object> value);
+
+ static Handle<Object> JsreCompile(Handle<JSValue> re,
+ Handle<String> pattern,
+ Handle<String> flags);
+
+ // Implements RegExp.prototype.exec(string) function.
+ // See ECMA-262 section 15.10.6.2.
+ // This function calls the garbage collector if necessary.
+ static Handle<Object> JsreExec(Handle<JSValue> regexp,
+ Handle<String> subject,
+ Handle<Object> index);
+
+ // Call RegExp.prototyp.exec(string) in a loop.
+ // Used by String.prototype.match and String.prototype.replace.
+ // This function calls the garbage collector if necessary.
+ static Handle<Object> JsreExecGlobal(Handle<JSValue> regexp,
+ Handle<String> subject);
+
+ static void NewSpaceCollectionPrologue();
+ static void OldSpaceCollectionPrologue();
+
+ private:
+ // Converts a source string to a 16 bit flat string. The string
+ // will be either sequential or it will be a SlicedString backed
+ // by a flat string.
+ static Handle<String> StringToTwoByte(Handle<String> pattern);
+ static Handle<String> CachedStringToTwoByte(Handle<String> pattern);
+
+ static String* last_ascii_string_;
+ static String* two_byte_cached_string_;
+
+ // Returns the caputure from the re.
+ static int JsreCapture(Handle<JSValue> re);
+ static ByteArray* JsreInternal(Handle<JSValue> re);
+
+ // Call jsRegExpExecute once
+ static Handle<Object> JsreExecOnce(Handle<JSValue> regexp,
+ int num_captures,
+ Handle<String> subject,
+ int previous_index,
+ const uc16* utf8_subject,
+ int* ovector,
+ int ovector_length);
+
+ // Set the subject cache. The previous string buffer is not deleted, so the
+ // caller should ensure that it doesn't leak.
+ static void SetSubjectCache(String* subject, char* utf8_subject,
+ int uft8_length, int character_position,
+ int utf8_position);
+
+ // A one element cache of the last utf8_subject string and its length. The
+ // subject JS String object is cached in the heap. We also cache a
+ // translation between position and utf8 position.
+ static char* utf8_subject_cache_;
+ static int utf8_length_cache_;
+ static int utf8_position_;
+ static int character_position_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_JSREGEXP_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIST_INL_H_
+#define V8_LIST_INL_H_
+
+#include "list.h"
+
+namespace v8 { namespace internal {
+
+
+template<typename T, class P>
+T& List<T, P>::Add(const T& element) {
+ if (length_ >= capacity_) {
+ // Grow the list capacity by 50%, but make sure to let it grow
+ // even when the capacity is zero (possible initial case).
+ int new_capacity = 1 + capacity_ + (capacity_ >> 1);
+ T* new_data = NewData(new_capacity);
+ memcpy(new_data, data_, capacity_ * sizeof(T));
+ DeleteData(data_);
+ data_ = new_data;
+ capacity_ = new_capacity;
+ }
+ return data_[length_++] = element;
+}
+
+
+template<typename T, class P>
+Vector<T> List<T, P>::AddBlock(const T& element, int count) {
+ int start = length_;
+ for (int i = 0; i < count; i++)
+ Add(element);
+ return Vector<T>(&data_[start], count);
+}
+
+
+template<typename T, class P>
+T List<T, P>::Remove(int i) {
+ T element = at(i);
+ length_--;
+ while (i < length_) {
+ data_[i] = data_[i + 1];
+ i++;
+ }
+ return element;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Clear() {
+ DeleteData(data_);
+ Initialize(0);
+}
+
+
+template<typename T, class P>
+void List<T, P>::Rewind(int pos) {
+ length_ = pos;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Iterate(void (*callback)(T* x)) {
+ for (int i = 0; i < length_; i++) callback(&data_[i]);
+}
+
+
+template<typename T, class P>
+void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
+ qsort(data_,
+ length_,
+ sizeof(T),
+ reinterpret_cast<int (*)(const void*, const void*)>(cmp));
+#ifdef DEBUG
+ for (int i = 1; i < length_; i++)
+ ASSERT(cmp(&data_[i - 1], &data_[i]) <= 0);
+#endif
+}
+
+
+template<typename T, class P>
+void List<T, P>::Initialize(int capacity) {
+ ASSERT(capacity >= 0);
+ data_ = (capacity > 0) ? NewData(capacity) : NULL;
+ capacity_ = capacity;
+ length_ = 0;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_LIST_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIST_H_
+#define V8_LIST_H_
+
+namespace v8 { namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// The list is a template for very light-weight lists. We are not
+// using the STL because we want full control over space and speed of
+// the code. This implementation is based on code by Robert Griesemer
+// and Rob Pike.
+//
+// The list is parameterized by the type of its elements (T) and by an
+// allocation policy (P). The policy is used for allocating lists in
+// the C free store or the zone; see zone.h.
+
+// Forward defined as
+// template <typename T, class P = FreeStoreAllocationPolicy> class List;
+template <typename T, class P>
+class List {
+ public:
+ INLINE(explicit List(int capacity)) { Initialize(capacity); }
+ INLINE(~List()) { DeleteData(data_); }
+
+ INLINE(void* operator new(size_t size)) { return P::New(size); }
+ INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
+
+ inline T& operator[](int i) const {
+ ASSERT(0 <= i && i < length_);
+ return data_[i];
+ }
+ inline T& at(int i) const { return this->operator[](i); }
+ INLINE(const T& last() const) {
+ ASSERT(!is_empty());
+ return this->at(length_ - 1);
+ }
+
+ INLINE(bool is_empty() const) { return length_ == 0; }
+ INLINE(int length() const) { return length_; }
+
+ Vector<T> ToVector() { return Vector<T>(data_, length_); }
+
+ // Adds a copy of the given 'element' to the end of the list,
+ // expanding the list if necessary.
+ T& Add(const T& element);
+
+ // Added 'count' elements with the value 'value' and returns a
+ // vector that allows access to the elements. The vector is valid
+ // until the next change is made to this list.
+ Vector<T> AddBlock(const T& value, int count);
+
+ // Removes the i'th element without deleting it even if T is a
+ // pointer type; moves all elements above i "down". Returns the
+ // removed element.
+ T Remove(int i);
+
+ // Removes the last element without deleting it even if T is a
+ // pointer type. Returns the removed element.
+ INLINE(T RemoveLast()) { return Remove(length_ - 1); }
+
+ // Clears the list by setting the length to zero. Even if T is a
+ // pointer type, clearing the list doesn't delete the entries.
+ INLINE(void Clear());
+
+ // Drops all but the first 'pos' elements from the list.
+ INLINE(void Rewind(int pos));
+
+ // Iterate through all list entries, starting at index 0.
+ void Iterate(void (*callback)(T* x));
+
+ // Sort all list entries (using QuickSort)
+ void Sort(int (*cmp)(const T* x, const T* y));
+
+ INLINE(void Initialize(int capacity));
+
+ private:
+ T* data_;
+ int capacity_;
+ int length_;
+
+ INLINE(T* NewData(int n)) { return static_cast<T*>(P::New(n * sizeof(T))); }
+ INLINE(void DeleteData(T* data)) { P::Delete(data); }
+
+ DISALLOW_EVIL_CONSTRUCTORS(List);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_LIST_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "log.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+//
+// Command line flags used by Logger.
+//
+DEFINE_bool(log, false,
+ "Minimal logging (no API, code, GC, suspect, or handles samples).");
+DEFINE_bool(log_all, false, "Log all events to the log file.");
+DEFINE_bool(log_api, false, "Log API events to the log file.");
+DEFINE_bool(log_code, false,
+ "Log code events to the log file without profiling.");
+DEFINE_bool(log_gc, false,
+ "Log heap samples on garbage collection for the hp2ps tool.");
+DEFINE_bool(log_suspect, false, "Log suspect operations.");
+DEFINE_bool(log_handles, false, "Log global handle events.");
+DEFINE_bool(log_state_changes, false, "Log state changes.");
+DEFINE_bool(prof, false,
+ "Log statistical profiling information (implies --log-code).");
+DEFINE_bool(sliding_state_window, false,
+ "Update sliding state window counters.");
+
+DEFINE_string(logfile, "v8.log", "Specify the name of the log file.");
+
+
+//
+// Sliding state window. Updates counters to keep track of the last
+// window of kBufferSize states. This is useful to track where we
+// spent our time.
+//
+class SlidingStateWindow {
+ public:
+ SlidingStateWindow();
+ ~SlidingStateWindow();
+ void AddState(StateTag state);
+
+ private:
+ static const int kBufferSize = 256;
+ int current_index_;
+ bool is_full_;
+ byte buffer_[kBufferSize];
+
+
+ void IncrementStateCounter(StateTag state) {
+ Counters::state_counters[state].Increment();
+ }
+
+
+ void DecrementStateCounter(StateTag state) {
+ Counters::state_counters[state].Decrement();
+ }
+};
+
+
+//
+// The Profiler samples pc and sp values for the main thread.
+// Each sample is appended to a circular buffer.
+// An independent thread removes data and writes it to the log.
+// This design minimizes the time spent in the sampler.
+//
+class Profiler: public Thread {
+ public:
+ Profiler();
+ void Engage();
+ void Disengage();
+
+ // Inserts collected profiling data into buffer.
+ void Insert(TickSample* sample) {
+ if (Succ(head_) == tail_) {
+ overflow_ = true;
+ } else {
+ buffer_[head_] = *sample;
+ head_ = Succ(head_);
+ buffer_semaphore_->Signal(); // Tell we have an element.
+ }
+ }
+
+ // Waits for a signal and removes profiling data.
+ bool Remove(TickSample* sample) {
+ buffer_semaphore_->Wait(); // Wait for an element.
+ *sample = buffer_[tail_];
+ bool result = overflow_;
+ tail_ = Succ(tail_);
+ overflow_ = false;
+ return result;
+ }
+
+ void Run();
+
+ private:
+ // Returns the next index in the cyclic buffer.
+ int Succ(int index) { return (index + 1) % kBufferSize; }
+
+ // Cyclic buffer for communicating profiling samples
+ // between the signal handler and the worker thread.
+ static const int kBufferSize = 128;
+ TickSample buffer_[kBufferSize]; // Buffer storage.
+ int head_; // Index to the buffer head.
+ int tail_; // Index to the buffer tail.
+ bool overflow_; // Tell whether a buffer overflow has occurred.
+ Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
+
+ // Tells whether worker thread should continue running.
+ bool running_;
+};
+
+
+//
+// Ticker used to provide ticks to the profiler and the sliding state
+// window.
+//
+class Ticker: public ProfileSampler {
+ public:
+ explicit Ticker(int interval):
+ ProfileSampler(interval), window_(NULL), profiler_(NULL) {}
+
+ ~Ticker() { if (IsActive()) Stop(); }
+
+ void Tick(TickSample* sample) {
+ if (profiler_) profiler_->Insert(sample);
+ if (window_) window_->AddState(Logger::state());
+ }
+
+ void SetWindow(SlidingStateWindow* window) {
+ window_ = window;
+ if (!IsActive()) Start();
+ }
+
+ void ClearWindow() {
+ window_ = NULL;
+ if (!profiler_ && IsActive()) Stop();
+ }
+
+ void SetProfiler(Profiler* profiler) {
+ profiler_ = profiler;
+ if (!IsActive()) Start();
+ }
+
+ void ClearProfiler() {
+ profiler_ = NULL;
+ if (!window_ && IsActive()) Stop();
+ }
+
+ private:
+ SlidingStateWindow* window_;
+ Profiler* profiler_;
+};
+
+
+//
+// SlidingStateWindow implementation.
+//
+SlidingStateWindow::SlidingStateWindow(): current_index_(0), is_full_(false) {
+ for (int i = 0; i < kBufferSize; i++) {
+ buffer_[i] = static_cast<byte>(OTHER);
+ }
+ Logger::ticker_->SetWindow(this);
+}
+
+
+SlidingStateWindow::~SlidingStateWindow() {
+ Logger::ticker_->ClearWindow();
+}
+
+
+void SlidingStateWindow::AddState(StateTag state) {
+ if (is_full_) {
+ DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
+ } else if (current_index_ == kBufferSize - 1) {
+ is_full_ = true;
+ }
+ buffer_[current_index_] = static_cast<byte>(state);
+ IncrementStateCounter(state);
+ ASSERT(IsPowerOf2(kBufferSize));
+ current_index_ = (current_index_ + 1) & (kBufferSize - 1);
+}
+
+
+//
+// Profiler implementation.
+//
+Profiler::Profiler() {
+ buffer_semaphore_ = OS::CreateSemaphore(0);
+ head_ = 0;
+ tail_ = 0;
+ overflow_ = false;
+ running_ = false;
+}
+
+
+void Profiler::Engage() {
+ OS::LogSharedLibraryAddresses();
+
+ // Start thread processing the profiler buffer.
+ running_ = true;
+ Start();
+
+ // Register to get ticks.
+ Logger::ticker_->SetProfiler(this);
+
+ LOG(StringEvent("profiler", "begin"));
+}
+
+
+void Profiler::Disengage() {
+ // Stop receiving ticks.
+ Logger::ticker_->ClearProfiler();
+
+ // Terminate the worker thread by setting running_ to false,
+ // inserting a fake element in the queue and then wait for
+ // the thread to terminate.
+ running_ = false;
+ TickSample sample;
+ sample.pc = 0;
+ sample.sp = 0;
+ sample.state = OTHER;
+ Insert(&sample);
+ Join();
+
+ LOG(StringEvent("profiler", "end"));
+}
+
+
+void Profiler::Run() {
+ TickSample sample;
+ bool overflow = Logger::profiler_->Remove(&sample);
+ while (running_) {
+ LOG(TickEvent(&sample, overflow));
+ overflow = Logger::profiler_->Remove(&sample);
+ }
+}
+
+
+//
+// Synchronize class used for ensuring block structured
+// locking for the Logger::*Event functions.
+//
+
+class Synchronize {
+ public:
+ explicit Synchronize(Mutex* mutex) {
+ mutex_ = mutex;
+ mutex_->Lock();
+ }
+ ~Synchronize() {
+ mutex_->Unlock();
+ }
+ private:
+ // Mutex used for enforcing block structured access.
+ Mutex* mutex_;
+};
+
+
+//
+// Logger class implementation.
+//
+Ticker* Logger::ticker_ = NULL;
+FILE* Logger::logfile_ = NULL;
+Profiler* Logger::profiler_ = NULL;
+Mutex* Logger::mutex_ = NULL;
+VMState* Logger::current_state_ = NULL;
+SlidingStateWindow* Logger::sliding_state_window_ = NULL;
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+void Logger::Preamble(const char* content) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "%s", content);
+#endif
+}
+
+
+void Logger::StringEvent(const char* name, const char* value) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "%s,\"%s\"\n", name, value);
+#endif
+}
+
+
+void Logger::IntEvent(const char* name, int value) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "%s,%d\n", name, value);
+#endif
+}
+
+
+void Logger::HandleEvent(const char* name, Object** location) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_handles) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "%s,0x%x\n", name,
+ reinterpret_cast<unsigned int>(location));
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+// ApiEvent is private so all the calls come from the Logger class. It is the
+// caller's responsibility to ensure that logfile_ is not NULL and that
+// FLAG_log_api is true.
+void Logger::ApiEvent(const char* format, ...) {
+ ASSERT(logfile_ != NULL && FLAG_log_api);
+ Synchronize s(mutex_);
+ va_list ap;
+ va_start(ap, format);
+ vfprintf(logfile_, format, ap);
+}
+#endif
+
+
+void Logger::ApiNamedSecurityCheck(Object* key) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_api) return;
+ if (key->IsString()) {
+ SmartPointer<char> str =
+ String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ ApiEvent("api,check-security,\"%s\"\n", *str);
+ } else if (key->IsUndefined()) {
+ ApiEvent("api,check-security,undefined\n");
+ } else {
+ ApiEvent("api,check-security,['no-name']\n");
+ }
+#endif
+}
+
+
+void Logger::SharedLibraryEvent(const char* library_path,
+ unsigned start,
+ unsigned end) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_prof) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
+ start, end);
+#endif
+}
+
+
+void Logger::SharedLibraryEvent(const wchar_t* library_path,
+ unsigned start,
+ unsigned end) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_prof) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
+ start, end);
+#endif
+}
+
+void Logger::ApiIndexedSecurityCheck(uint32_t index) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_api) return;
+ ApiEvent("api,check-security,%u\n", index);
+#endif
+}
+
+
+void Logger::ApiNamedPropertyAccess(const char* tag,
+ JSObject* holder,
+ Object* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ ASSERT(name->IsString());
+ if (logfile_ == NULL || !FLAG_log_api) return;
+ String* class_name_obj = holder->class_name();
+ SmartPointer<char> class_name =
+ class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ SmartPointer<char> property_name =
+ String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ Logger::ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+#endif
+}
+
+void Logger::ApiIndexedPropertyAccess(const char* tag,
+ JSObject* holder,
+ uint32_t index) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_api) return;
+ String* class_name_obj = holder->class_name();
+ SmartPointer<char> class_name =
+ class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ Logger::ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+#endif
+}
+
+void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_api) return;
+ String* class_name_obj = object->class_name();
+ SmartPointer<char> class_name =
+ class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ Logger::ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+#endif
+}
+
+
+void Logger::ApiEntryCall(const char* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_api) return;
+ Logger::ApiEvent("api,%s\n", name);
+#endif
+}
+
+
+void Logger::NewEvent(const char* name, void* object, size_t size) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "new,%s,0x%x,%u\n", name,
+ reinterpret_cast<unsigned int>(object),
+ static_cast<unsigned int>(size));
+#endif
+}
+
+
+void Logger::DeleteEvent(const char* name, void* object) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "delete,%s,0x%x\n", name,
+ reinterpret_cast<unsigned int>(object));
+#endif
+}
+
+
+void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_code) return;
+ Synchronize s(mutex_);
+
+ fprintf(logfile_, "code-creation,%s,0x%x,%d,\"", tag,
+ reinterpret_cast<unsigned int>(code->address()),
+ code->instruction_size());
+ for (const char* p = comment; *p != '\0'; p++) {
+ if (*p == '\"') fprintf(logfile_, "\\");
+ fprintf(logfile_, "%c", *p);
+ }
+ fprintf(logfile_, "\"\n");
+#endif
+}
+
+
+void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_code) return;
+ Synchronize s(mutex_);
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ fprintf(logfile_, "code-creation,%s,0x%x,%d,\"%s\"\n", tag,
+ reinterpret_cast<unsigned int>(code->address()),
+ code->instruction_size(), *str);
+#endif
+}
+
+
+void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_code) return;
+ Synchronize s(mutex_);
+
+ fprintf(logfile_, "code-creation,%s,0x%x,%d,\"args_count: %d\"\n", tag,
+ reinterpret_cast<unsigned int>(code->address()),
+ code->instruction_size(),
+ args_count);
+#endif
+}
+
+
+void Logger::CodeMoveEvent(Address from, Address to) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_code) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "code-move,0x%x,0x%x\n",
+ reinterpret_cast<unsigned int>(from),
+ reinterpret_cast<unsigned int>(to));
+#endif
+}
+
+
+void Logger::CodeDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_code) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "code-delete,0x%x\n", reinterpret_cast<unsigned int>(from));
+#endif
+}
+
+
+void Logger::ResourceEvent(const char* name, const char* tag) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "%s,%s,", name, tag);
+
+ uint32_t sec, usec;
+ if (OS::GetUserTime(&sec, &usec) != -1) {
+ fprintf(logfile_, "%d,%d,", sec, usec);
+ }
+ fprintf(logfile_, "%.0f", OS::TimeCurrentMillis());
+
+ fprintf(logfile_, "\n");
+#endif
+}
+
+
+void Logger::SuspectReadEvent(String* name, String* obj) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_suspect) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "suspect-read,");
+ obj->PrintOn(logfile_);
+ fprintf(logfile_, ",\"");
+ name->PrintOn(logfile_);
+ fprintf(logfile_, "\"\n");
+#endif
+}
+
+
+void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_gc) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
+#endif
+}
+
+
+void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_gc) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "heap-sample-end,\"%s\",\"%s\"\n", space, kind);
+#endif
+}
+
+
+void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (logfile_ == NULL || !FLAG_log_gc) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "heap-sample-item,%s,%d,%d\n", type, number, bytes);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::TickEvent(TickSample* sample, bool overflow) {
+ if (logfile_ == NULL) return;
+ Synchronize s(mutex_);
+ fprintf(logfile_, "tick,0x%x,0x%x,%d", sample->pc, sample->sp,
+ static_cast<int>(sample->state));
+ if (overflow) fprintf(logfile_, ",overflow");
+ fprintf(logfile_, "\n");
+}
+#endif
+
+
+bool Logger::Setup() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // --log-all enables all the log flags.
+ if (FLAG_log_all) {
+ FLAG_log_api = true;
+ FLAG_log_code = true;
+ FLAG_log_gc = true;
+ FLAG_log_suspect = true;
+ FLAG_log_handles = true;
+ }
+
+ // --prof implies --log-code.
+ if (FLAG_prof) FLAG_log_code = true;
+
+ // Each of the individual log flags implies --log. Check after
+ // checking --log-all and --prof in case they set --log-code.
+ if (FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
+ FLAG_log_handles || FLAG_log_suspect) {
+ FLAG_log = true;
+ }
+
+ // If we're logging anything, we need to open the log file.
+ if (FLAG_log) {
+ if (strcmp(FLAG_logfile, "-") == 0) {
+ logfile_ = stdout;
+ } else {
+ logfile_ = fopen(FLAG_logfile, "w");
+ }
+ mutex_ = OS::CreateMutex();
+ }
+
+ current_state_ = new VMState(OTHER);
+
+ ticker_ = new Ticker(10);
+
+ if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
+ sliding_state_window_ = new SlidingStateWindow();
+ }
+
+ if (FLAG_prof) {
+ profiler_ = new Profiler();
+ profiler_->Engage();
+ }
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+
+void Logger::TearDown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Stop the profiler before closing the file.
+ if (profiler_ != NULL) {
+ profiler_->Disengage();
+ delete profiler_;
+ profiler_ = NULL;
+ }
+
+ // Deleting the current_state_ has the side effect of assigning to it(!).
+ while (current_state_) delete current_state_;
+ delete sliding_state_window_;
+
+ delete ticker_;
+
+ if (logfile_ != NULL) {
+ fclose(logfile_);
+ logfile_ = NULL;
+ delete mutex_;
+ mutex_ = NULL;
+ }
+#endif
+}
+
+
+void Logger::EnableSlidingStateWindow() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If the ticker is NULL, Logger::Setup has not been called yet. In
+ // that case, we set the sliding_state_window flag so that the
+ // sliding window computation will be started when Logger::Setup is
+ // called.
+ if (ticker_ == NULL) {
+ FLAG_sliding_state_window = true;
+ return;
+ }
+ // Otherwise, if the sliding state window computation has not been
+ // started we do it now.
+ if (sliding_state_window_ == NULL) {
+ sliding_state_window_ = new SlidingStateWindow();
+ }
+#endif
+}
+
+
+//
+// VMState class implementation. A simple stack of VM states held by the
+// logger and partially threaded through the call stack. States are pushed by
+// VMState construction and popped by destruction.
+//
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static const char* StateToString(StateTag state) {
+ switch (state) {
+ case GC:
+ return "GC";
+ case COMPILER:
+ return "COMPILER";
+ case OTHER:
+ return "OTHER";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+VMState::VMState(StateTag state) {
+ state_ = state;
+ previous_ = Logger::current_state_;
+ Logger::current_state_ = this;
+
+ if (FLAG_log_state_changes) {
+ LOG(StringEvent("Entering", StateToString(state_)));
+ if (previous_) {
+ LOG(StringEvent("From", StateToString(previous_->state_)));
+ }
+ }
+}
+
+
+VMState::~VMState() {
+ Logger::current_state_ = previous_;
+
+ if (FLAG_log_state_changes) {
+ LOG(StringEvent("Leaving", StateToString(state_)));
+ if (previous_) {
+ LOG(StringEvent("To", StateToString(previous_->state_)));
+ }
+ }
+}
+#endif
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_H_
+#define V8_LOG_H_
+
+namespace v8 { namespace internal {
+
+// Logger is used for collecting logging information from V8 during
+// execution. The result is dumped to a file.
+//
+// Available command line flags:
+//
+// --log
+// Minimal logging (no API, code, or GC sample events), default is off.
+//
+// --log-all
+// Log all events to the file, default is off. This is the same as combining
+// --log-api, --log-code, and --log-gc.
+//
+// --log-api
+// Log API events to the logfile, default is off. --log-api implies --log.
+//
+// --log-code
+// Log code (create, move, and delete) events to the logfile, default is off.
+// --log-code implies --log.
+//
+// --log-gc
+// Log GC heap samples after each GC that can be processed by hp2ps, default
+// is off. --log-gc implies --log.
+//
+// --logfile <filename>
+// Specify the name of the logfile, default is "v8.log".
+//
+// --prof
+// Collect statistical profiling information (ticks), default is off. The
+// tick profiler requires code events, so --prof implies --log-code.
+
+// Forward declarations.
+class Ticker;
+class Profiler;
+class Semaphore;
+class SlidingStateWindow;
+
+#undef LOG
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define LOG(Call) v8::internal::Logger::Call
+#else
+#define LOG(Call) ((void) 0)
+#endif
+
+
+class VMState {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ public:
+ explicit VMState(StateTag state);
+ ~VMState();
+
+ StateTag state() { return state_; }
+
+ private:
+ StateTag state_;
+ VMState* previous_;
+#else
+ public:
+ explicit VMState(StateTag state) {}
+#endif
+};
+
+
+class Logger {
+ public:
+ // Opens the file for logging if the right flags are set.
+ static bool Setup();
+
+ // Closes file opened in Setup.
+ static void TearDown();
+
+ // Enable the computation of a sliding window of states.
+ static void EnableSlidingStateWindow();
+
+ // Write a raw string to the log to be used as a preamble.
+ // No check is made that the 'preamble' is actually at the beginning
+ // of the log.
+ static void Preamble(const char* content);
+
+ // ==== Events that are always logged. ====
+ // Emits an event with a string value -> (name, value).
+ static void StringEvent(const char* name, const char* value);
+
+ // Emits an event with an int value -> (name, value).
+ static void IntEvent(const char* name, int value);
+
+ // Emits an event with an handle value -> (name, location).
+ static void HandleEvent(const char* name, Object** location);
+
+ // Emits memory management events for C allocated structures.
+ static void NewEvent(const char* name, void* object, size_t size);
+ static void DeleteEvent(const char* name, void* object);
+
+ // Emits an event with a tag, and some resource usage information.
+ // -> (name, tag, <rusage information>).
+ // Currently, the resource usage information is a process time stamp
+ // and a real time timestamp.
+ static void ResourceEvent(const char* name, const char* tag);
+
+ // Emits an event that an undefined property was read from an
+ // object.
+ static void SuspectReadEvent(String* name, String* obj);
+
+
+ // ==== Events logged by --log-api. ====
+ static void ApiNamedSecurityCheck(Object* key);
+ static void ApiIndexedSecurityCheck(uint32_t index);
+ static void ApiNamedPropertyAccess(const char* tag,
+ JSObject* holder,
+ Object* name);
+ static void ApiIndexedPropertyAccess(const char* tag,
+ JSObject* holder,
+ uint32_t index);
+ static void ApiObjectAccess(const char* tag, JSObject* obj);
+ static void ApiEntryCall(const char* name);
+
+
+ // ==== Events logged by --log-code. ====
+ // Emits a code create event.
+ static void CodeCreateEvent(const char* tag, Code* code, const char* source);
+ static void CodeCreateEvent(const char* tag, Code* code, String* name);
+ static void CodeCreateEvent(const char* tag, Code* code, int args_count);
+ // Emits a code move event.
+ static void CodeMoveEvent(Address from, Address to);
+ // Emits a code delete event.
+ static void CodeDeleteEvent(Address from);
+
+ // ==== Events logged by --log-gc. ====
+ // Heap sampling events: start, end, and individual types.
+ static void HeapSampleBeginEvent(const char* space, const char* kind);
+ static void HeapSampleEndEvent(const char* space, const char* kind);
+ static void HeapSampleItemEvent(const char* type, int number, int bytes);
+
+ static void SharedLibraryEvent(const char* library_path,
+ unsigned start,
+ unsigned end);
+ static void SharedLibraryEvent(const wchar_t* library_path,
+ unsigned start,
+ unsigned end);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static StateTag state() {
+ return current_state_ ? current_state_->state() : OTHER;
+ }
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ private:
+ // Emits a profiler tick event. Used by the profiler thread.
+ static void TickEvent(TickSample* sample, bool overflow);
+
+ static void ApiEvent(const char* name, ...);
+
+ // When logging is active, logfile_ refers the file
+ // events are written to.
+ static FILE* logfile_;
+
+ // The sampler used by the profiler and the sliding state window.
+ static Ticker* ticker_;
+
+ // When the statistical profile is active, profiler_
+ // points to a Profiler, that handles collection
+ // of samples.
+ static Profiler* profiler_;
+
+ // mutex_ is a Mutex used for enforcing exclusive
+ // access to the log file.
+ static Mutex* mutex_;
+
+ // A stack of VM states.
+ static VMState* current_state_;
+
+ // SlidingStateWindow instance keeping a sliding window of the most
+ // recent VM states.
+ static SlidingStateWindow* sliding_state_window_;
+
+ // Internal implementation classes with access to
+ // private members.
+ friend class EventLog;
+ friend class TimeLog;
+ friend class Profiler;
+ friend class SlidingStateWindow;
+ friend class VMState;
+#endif
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_LOG_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 { namespace internal {
+
+DECLARE_bool(debug_code);
+DECLARE_bool(optimize_locals);
+
+
+// Give alias names to registers
+Register cp = { 8 }; // JavaScript context pointer
+Register pp = { 10 }; // parameter pointer
+
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false) {
+}
+
+
+// We always generate arm code, never thumb code, even if V8 is compiled to
+// thumb, so we require inter-working support
+#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
+#error "flag -mthumb-interwork missing"
+#endif
+
+
+// We do not support thumb inter-working with an arm architecture not supporting
+// the blx instruction (below v5t)
+#if defined(__THUMB_INTERWORK__)
+#if !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__)
+// add tests for other versions above v5t as required
+#error "for thumb inter-working we require architecture v5t or above"
+#endif
+#endif
+
+
+// Using blx may yield better code, so use it when required or when available
+#if defined(__THUMB_INTERWORK__) || defined(__ARM_ARCH_5__)
+#define USE_BLX 1
+#endif
+
+// Using bx does not yield better code, so use it only when required
+#if defined(__THUMB_INTERWORK__)
+#define USE_BX 1
+#endif
+
+
+void MacroAssembler::Jump(Register target, Condition cond) {
+#if USE_BX
+ bx(target, cond);
+#else
+ mov(pc, Operand(target), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocMode rmode, Condition cond) {
+#if USE_BX
+ mov(ip, Operand(target, rmode), LeaveCC, cond);
+ bx(ip, cond);
+#else
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Jump(byte* target, RelocMode rmode, Condition cond) {
+ ASSERT(!is_code_target(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocMode rmode, Condition cond) {
+ ASSERT(is_code_target(rmode));
+ // 'code' is always generated ARM code, never THUMB code
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Register target, Condition cond) {
+#if USE_BLX
+ blx(target, cond);
+#else
+ // set lr for return at current pc + 8
+ mov(lr, Operand(pc), LeaveCC, cond);
+ mov(pc, Operand(target), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Call(intptr_t target, RelocMode rmode, Condition cond) {
+#if !defined(__arm__)
+ if (rmode == runtime_entry) {
+ mov(r2, Operand(target, rmode), LeaveCC, cond);
+ // Set lr for return at current pc + 8.
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+ // Notify the simulator of the transition to C code.
+ swi(assembler::arm::call_rt_r2);
+ } else {
+ // set lr for return at current pc + 8
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // emit a ldr<cond> pc, [pc + offset of target in constant pool]
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+ }
+#else
+ // Set lr for return at current pc + 8.
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+#endif // !defined(__arm__)
+ // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
+ // 'blx ip'; however, the code would not be shorter than the above sequence
+ // and the target address of the call would be referenced by the first
+ // instruction rather than the second one, which would make it harder to patch
+ // (two instructions before the return address, instead of one).
+ ASSERT(kTargetAddrToReturnAddrDist == sizeof(Instr));
+}
+
+
+void MacroAssembler::Call(byte* target, RelocMode rmode, Condition cond) {
+ ASSERT(!is_code_target(rmode));
+ Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocMode rmode, Condition cond) {
+ ASSERT(is_code_target(rmode));
+ // 'code' is always generated ARM code, never THUMB code
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Ret() {
+#if USE_BX
+ bx(lr);
+#else
+ mov(pc, Operand(lr));
+#endif
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ push(r0);
+ mov(r0, src);
+}
+
+
+void MacroAssembler::Push(const MemOperand& src) {
+ push(r0);
+ ldr(r0, src);
+}
+
+
+void MacroAssembler::Pop(Register dst) {
+ mov(dst, Operand(r0));
+ pop(r0);
+}
+
+
+void MacroAssembler::Pop(const MemOperand& dst) {
+ str(r0, dst);
+ pop(r0);
+}
+
+
+// Will clobber 4 registers: object, offset, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Register offset,
+ Register scratch) {
+ // This is how much we shift the remembered set bit offset to get the
+ // offset of the word in the remembered set. We divide by kBitsPerInt (32,
+ // shift right 5) and then multiply by kIntSize (4, shift left 2).
+ const int kRSetWordShift = 3;
+
+ Label fast, done;
+
+ // First, test that the start address is not in the new space. We cannot
+ // set remembered set bits in the new space.
+ and_(scratch, object, Operand(Heap::NewSpaceMask()));
+ cmp(scratch, Operand(ExternalReference::new_space_start()));
+ b(eq, &done);
+
+ mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
+ // Compute the bit offset in the remembered set.
+ and_(scratch, object, Operand(ip));
+ add(offset, scratch, Operand(offset));
+ mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
+
+ // Compute the page address from the heap object pointer.
+ bic(object, object, Operand(ip));
+
+ // If the bit offset lies beyond the normal remembered set range, it is in
+ // the extra remembered set area of a large object.
+ cmp(offset, Operand(Page::kPageSize / kPointerSize));
+ b(lt, &fast);
+
+ // Adjust the bit offset to be relative to the start of the extra
+ // remembered set and the start address to be the address of the extra
+ // remembered set.
+ sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
+ // Load the array length into 'scratch' and multiply by four to get the
+ // size in bytes of the elements.
+ ldr(scratch, MemOperand(object, Page::kObjectStartOffset
+ + FixedArray::kLengthOffset));
+ mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
+ // Add the page header (including remembered set), array header, and array
+ // body size to the page address.
+ add(object, object, Operand(Page::kObjectStartOffset
+ + Array::kHeaderSize));
+ add(object, object, Operand(scratch));
+
+ bind(&fast);
+ // Now object is the address of the start of the remembered set and offset
+ // is the bit offset from that start.
+ // Get address of the rset word.
+ add(object, object, Operand(offset, LSR, kRSetWordShift));
+ // Get bit offset in the word.
+ and_(offset, offset, Operand(kBitsPerInt - 1));
+
+ ldr(scratch, MemOperand(object));
+ mov(ip, Operand(1));
+ orr(scratch, scratch, Operand(ip, LSL, offset));
+ str(scratch, MemOperand(object));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::EnterJSFrame(int argc, RegList callee_saved) {
+ // Generate code entering a JS function called from a JS function
+ // stack: receiver, arguments
+ // r0: number of arguments (not including function, nor receiver)
+ // r1: preserved
+ // sp: stack pointer
+ // fp: frame pointer
+ // cp: callee's context
+ // pp: caller's parameter pointer
+ // lr: return address
+
+ // compute parameter pointer before making changes
+ // ip = sp + kPointerSize*(args_len+1); // +1 for receiver
+ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ add(ip, ip, Operand(kPointerSize));
+
+ // push extra parameters if we don't have enough
+ // (this can only happen if argc > 0 to begin with)
+ if (argc > 0) {
+ Label loop, done;
+
+ // assume enough arguments to be the most common case
+ sub(r2, r0, Operand(argc), SetCC); // number of missing arguments
+ b(ge, &done); // enough arguments
+
+ // not enough arguments
+ mov(r3, Operand(Factory::undefined_value()));
+ bind(&loop);
+ push(r3);
+ add(r2, r2, Operand(1), SetCC);
+ b(lt, &loop);
+
+ bind(&done);
+ }
+
+ mov(r3, Operand(r0)); // args_len to be saved
+ mov(r2, Operand(cp)); // context to be saved
+
+ // Make sure there are no instructions between both stm instructions, because
+ // the callee_saved list is obtained during stack unwinding by decoding the
+ // first stmdb instruction, which is found (or not) at a constant offset from
+ // the pc saved by the second stmdb instruction.
+ if (callee_saved != 0) {
+ stm(db_w, sp, callee_saved);
+ }
+
+ // push in reverse order: context (r2), args_len (r3), caller_pp, caller_fp,
+ // sp_on_exit (ip == pp, may be patched on exit), return address, prolog_pc
+ stm(db_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit() |
+ ip.bit() | lr.bit() | pc.bit());
+
+ // Setup new frame pointer.
+ add(fp, sp, Operand(-StandardFrameConstants::kContextOffset));
+ mov(pp, Operand(ip)); // setup new parameter pointer
+ mov(r0, Operand(0)); // spare slot to store caller code object during GC
+ // r0: TOS (code slot == 0)
+ // r1: preserved
+}
+
+
+void MacroAssembler::ExitJSFrame(ExitJSFlag flag, RegList callee_saved) {
+ // r0: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: parameter pointer
+
+ if (callee_saved != 0 || flag == DO_NOT_RETURN) {
+ add(r3, fp, Operand(JavaScriptFrameConstants::kSavedRegistersOffset));
+ }
+
+ if (callee_saved != 0) {
+ ldm(ia_w, r3, callee_saved);
+ }
+
+ if (flag == DO_NOT_RETURN) {
+ // restore sp as caller_sp (not as pp)
+ str(r3, MemOperand(fp, JavaScriptFrameConstants::kSPOnExitOffset));
+ }
+
+ if (flag == DO_NOT_RETURN && generating_stub()) {
+ // If we're generating a stub, we need to preserve the link
+ // register to be able to return to the place the stub was called
+ // from.
+ mov(ip, Operand(lr));
+ }
+
+ mov(sp, Operand(fp)); // respect ABI stack constraint
+ ldm(ia, sp, pp.bit() | fp.bit() | sp.bit() |
+ ((flag == RETURN) ? pc.bit() : lr.bit()));
+
+ if (flag == DO_NOT_RETURN && generating_stub()) {
+ // Return to the place where the stub was called without
+ // clobbering the value of the link register.
+ mov(pc, Operand(ip));
+ }
+
+ // r0: result
+ // sp: points to function arg (if return) or to last arg (if no return)
+ // fp: restored frame pointer
+ // pp: restored parameter pointer
+}
+
+
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of registers to memory location.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ str(reg, MemOperand(ip));
+ }
+ }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of memory location to registers.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ ldr(reg, MemOperand(ip));
+ }
+ }
+}
+
+
+void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
+ RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of the memory location to the stack and adjust base.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ ldr(ip, MemOperand(ip));
+ str(ip, MemOperand(base, 4, NegPreIndex));
+ }
+ }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of the stack to the memory location and adjust base.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ ldr(scratch, MemOperand(base, 4, PostIndex));
+ str(scratch, MemOperand(ip));
+ }
+ }
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+ HandlerType type) {
+ ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // The pc (return address) is passed in register lr.
+ if (try_location == IN_JAVASCRIPT) {
+ mov(r0, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS
+ stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
+ if (type == TRY_CATCH_HANDLER) {
+ mov(r3, Operand(StackHandler::TRY_CATCH));
+ } else {
+ mov(r3, Operand(StackHandler::TRY_FINALLY));
+ }
+ push(r3); // state
+ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ ldr(r1, MemOperand(r3));
+ push(r1); // next sp
+ str(sp, MemOperand(r3)); // chain handler
+ // TOS is r0
+ } else {
+ // Must preserve r0-r3, r5-r7 are available.
+ ASSERT(try_location == IN_JS_ENTRY);
+ // The parameter pointer is meaningless here and fp does not point to a JS
+ // frame. So we save NULL for both pp and fp. We expect the code throwing an
+ // exception to check fp before dereferencing it to restore the context.
+ mov(r5, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS
+ mov(pp, Operand(0)); // set pp to NULL
+ mov(ip, Operand(0)); // to save a NULL fp
+ stm(db_w, sp, pp.bit() | ip.bit() | lr.bit());
+ mov(r6, Operand(StackHandler::ENTRY));
+ push(r6); // state
+ mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+ ldr(r6, MemOperand(r7));
+ push(r6); // next sp
+ str(sp, MemOperand(r7)); // chain handler
+ push(r5); // flush TOS
+ }
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+ JSObject* holder, Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ // Make sure there's no overlap between scratch and the other
+ // registers.
+ ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 1;
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ while (object != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+
+ // Get the map of the current object.
+ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ cmp(scratch, Operand(Handle<Map>(object->map())));
+
+ // Branch on the result of the map check.
+ b(ne, miss);
+
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (object->IsJSGlobalObject()) {
+ CheckAccessGlobal(reg, scratch, miss);
+ // Restore scratch register to be the map of the object. In the
+ // new space case below, we load the prototype from the map in
+ // the scratch register.
+ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // from now the object is in holder_reg
+ JSObject* prototype = JSObject::cast(object->GetPrototype());
+ if (Heap::InNewSpace(prototype)) {
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ mov(reg, Operand(Handle<JSObject>(prototype)));
+ }
+
+ // Go to the next object in the prototype chain.
+ object = prototype;
+ }
+
+ // Check the holder map.
+ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ cmp(scratch, Operand(Handle<Map>(object->map())));
+ b(ne, miss);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(object == holder);
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+ if (object->IsJSGlobalObject()) {
+ CheckAccessGlobal(reg, scratch, miss);
+ }
+ return reg;
+}
+
+
+void MacroAssembler::CheckAccessGlobal(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ ASSERT(!holder_reg.is(scratch));
+
+ // Load the security context.
+ mov(scratch, Operand(Top::security_context_address()));
+ ldr(scratch, MemOperand(scratch));
+ // In debug mode, make sure the security context is set.
+ if (kDebug) {
+ cmp(scratch, Operand(0));
+ Check(ne, "we should not have an empty security context");
+ }
+
+ // Load the global object of the security context.
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ ldr(scratch, FieldMemOperand(scratch, offset));
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kSecurityTokenOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalObject::kSecurityTokenOffset));
+ cmp(scratch, Operand(ip));
+ b(ne, miss);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+ ASSERT(!generating_stub()); // stub calls are not allowed in stubs
+ Call(stub->GetCode(), code_target);
+}
+
+
+void MacroAssembler::CallJSExitStub(CodeStub* stub) {
+ ASSERT(!generating_stub()); // stub calls are not allowed in stubs
+ Call(stub->GetCode(), exit_js_frame);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ ASSERT(argc >= 1 && generating_stub());
+ if (argc > 1)
+ add(sp, sp, Operand((argc - 1) * kPointerSize));
+ Ret();
+}
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+ ASSERT(num_arguments >= 1); // must have receiver for call
+
+ if (f->nargs < 0) {
+ // The number of arguments is not constant for this call, or we don't
+ // have an entry stub that pushes the value. Push it before the call.
+ push(r0);
+ // Receiver does not count as an argument.
+ mov(r0, Operand(num_arguments - 1));
+ } else {
+ ASSERT(f->nargs == num_arguments);
+ }
+
+ RuntimeStub stub((Runtime::FunctionId) f->stub_id);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::Function* f) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ if (f->nargs >= 0) {
+ // The number of arguments is fixed for this call.
+ // Set r0 correspondingly.
+ push(r0);
+ mov(r0, Operand(f->nargs - 1)); // receiver does not count as an argument
+ }
+ JumpToBuiltin(ExternalReference(f)); // tail call to runtime routine
+}
+
+
+void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+#if defined(__thumb__)
+ // Thumb mode builtin.
+ ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+ mov(r1, Operand(builtin));
+ CEntryStub stub;
+ Jump(stub.GetCode(), code_target);
+}
+
+
+void MacroAssembler::InvokeBuiltin(const char* name,
+ int argc,
+ InvokeJSFlags flags) {
+ Handle<String> symbol = Factory::LookupAsciiSymbol(name);
+ Object* object = Top::security_context_builtins()->GetProperty(*symbol);
+ bool unresolved = true;
+ Code* code = Builtins::builtin(Builtins::Illegal);
+
+ if (object->IsJSFunction()) {
+ Handle<JSFunction> function(JSFunction::cast(object));
+ if (function->is_compiled() || CompileLazy(function, CLEAR_EXCEPTION)) {
+ code = function->code();
+ unresolved = false;
+ }
+ }
+
+ if (flags == CALL_JS) {
+ Call(Handle<Code>(code), code_target);
+ } else {
+ ASSERT(flags == JUMP_JS);
+ Jump(Handle<Code>(code), code_target);
+ }
+
+ if (unresolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(false);
+ Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+ unresolved_.Add(entry);
+ }
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+ if (FLAG_debug_code)
+ Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+ Label L;
+ b(cc, &L);
+ Abort(msg);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the aligment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ push(r0);
+ mov(r0, Operand(p0));
+ push(r0);
+ mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ CallRuntime(Runtime::kAbort, 2);
+ // will not return here
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MACRO_ASSEMBLER_ARM_H_
+#define V8_MACRO_ASSEMBLER_ARM_H_
+
+#include "assembler.h"
+
+namespace v8 { namespace internal {
+
+
+// Give alias names to registers
+extern Register cp; // JavaScript context pointer
+extern Register pp; // parameter pointer
+
+
+// Helper types to make boolean flag easier to read at call-site.
+enum InvokeJSFlags {
+ CALL_JS,
+ JUMP_JS
+};
+
+enum ExitJSFlag {
+ RETURN,
+ DO_NOT_RETURN
+};
+
+enum CodeLocation {
+ IN_JAVASCRIPT,
+ IN_JS_ENTRY,
+ IN_C_ENTRY
+};
+
+enum HandlerType {
+ TRY_CATCH_HANDLER,
+ TRY_FINALLY_HANDLER,
+ JS_ENTRY_HANDLER
+};
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ MacroAssembler(void* buffer, int size);
+
+ // ---------------------------------------------------------------------------
+ // Low-level helpers for compiler
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working
+ private:
+ void Jump(intptr_t target, RelocMode rmode, Condition cond = al);
+ void Call(intptr_t target, RelocMode rmode, Condition cond = al);
+ public:
+ void Jump(Register target, Condition cond = al);
+ void Jump(byte* target, RelocMode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocMode rmode, Condition cond = al);
+ void Call(Register target, Condition cond = al);
+ void Call(byte* target, RelocMode rmode, Condition cond = al);
+ void Call(Handle<Code> code, RelocMode rmode, Condition cond = al);
+ void Ret();
+
+ // Sets the remembered set bit for [address+offset], where address is the
+ // address of the heap object 'object'. The address must be in the first 8K
+ // of an allocated page. The 'scratch' register is used in the
+ // implementation and all 3 registers are clobbered by the operation, as
+ // well as the ip register.
+ void RecordWrite(Register object, Register offset, Register scratch);
+
+ // ---------------------------------------------------------------------------
+ // Activation frames
+
+ void EnterJSFrame(int argc, RegList callee_saved);
+ void ExitJSFrame(ExitJSFlag flag, RegList callee_saved);
+
+
+ // Support functions.
+ void Push(const Operand& src);
+ void Push(const MemOperand& src);
+ void Pop(Register dst);
+ void Pop(const MemOperand& dst);
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void SaveRegistersToMemory(RegList regs);
+ void RestoreRegistersFromMemory(RegList regs);
+ void CopyRegistersFromMemoryToStack(Register base, RegList regs);
+ void CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs);
+
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ // The return address must be passed in register lr.
+ // On exit, r0 contains TOS (code slot).
+ void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generates code that verifies that the maps of objects in the
+ // prototype chain of object hasn't changed since the code was
+ // generated and branches to the miss label if any map has. If
+ // necessary the function also generates code for security check
+ // in case of global object holders. The scratch and holder
+ // registers are always clobbered, but the object register is only
+ // clobbered if it the same as the holder register. The function
+ // returns a register containing the holder - either object_reg or
+ // holder_reg.
+ Register CheckMaps(JSObject* object, Register object_reg,
+ JSObject* holder, Register holder_reg,
+ Register scratch, Label* miss);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobal(Register holder_reg, Register scratch, Label* miss);
+
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub);
+ void CallJSExitStub(CodeStub* stub);
+
+ // Return from a code stub after popping its arguments.
+ void StubReturn(int argc);
+
+ // Call a runtime routine.
+ // Eventually this should be used for all C calls.
+ void CallRuntime(Runtime::Function* f, int num_arguments);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToBuiltin, but also takes care of passing the number
+ // of parameters, if known.
+ void TailCallRuntime(Runtime::Function* f);
+
+ // Jump to the builtin routine.
+ void JumpToBuiltin(const ExternalReference& builtin);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(const char* name, int argc, InvokeJSFlags flags);
+
+ struct Unresolved {
+ int pc;
+ uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ const char* name;
+ };
+ List<Unresolved>* unresolved() { return &unresolved_; }
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, const char* msg);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, const char* msg);
+
+ // Print a message to stdout and abort execution.
+ void Abort(const char* msg);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+
+ private:
+ List<Unresolved> unresolved_;
+ bool generating_stub_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+
+} } // namespace v8::internal
+
+#endif // V8_MACRO_ASSEMBLER_ARM_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+#include "serialize.h"
+
+namespace v8 { namespace internal {
+
+DECLARE_bool(debug_code);
+DEFINE_bool(native_code_counters, false,
+ "generate extra code for manipulating stats counters");
+
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false) {
+}
+
+
+static void RecordWriteHelper(MacroAssembler* masm,
+ Register object,
+ Register addr,
+ Register scratch) {
+ Label fast;
+
+ // Compute the page address from the heap object pointer, leave it
+ // in 'object'.
+ masm->and_(object, ~Page::kPageAlignmentMask);
+
+ // Compute the bit addr in the remembered set, leave it in "addr".
+ masm->sub(addr, Operand(object));
+ masm->shr(addr, kObjectAlignmentBits);
+
+ // If the bit offset lies beyond the normal remembered set range, it is in
+ // the extra remembered set area of a large object.
+ masm->cmp(addr, Page::kPageSize / kPointerSize);
+ masm->j(less, &fast);
+
+ // Adjust 'addr' to be relative to the start of the extra remembered set
+ // and the page address in 'object' to be the address of the extra
+ // remembered set.
+ masm->sub(Operand(addr), Immediate(Page::kPageSize / kPointerSize));
+ // Load the array length into 'scratch' and multiply by four to get the
+ // size in bytes of the elements.
+ masm->mov(scratch, Operand(object, Page::kObjectStartOffset
+ + FixedArray::kLengthOffset));
+ masm->shl(scratch, kObjectAlignmentBits);
+ // Add the page header, array header, and array body size to the page
+ // address.
+ masm->add(Operand(object), Immediate(Page::kObjectStartOffset
+ + Array::kHeaderSize));
+ masm->add(object, Operand(scratch));
+
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ masm->bind(&fast);
+ masm->bts(Operand(object, 0), addr);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+ const char* GetName() { return "RecordWriteStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits of three registers (object, address and
+ // scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class AddressBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {
+};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ RecordWriteHelper(masm, object_, addr_, scratch_);
+ masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object, int offset,
+ Register value, Register scratch) {
+ // First, check if a remembered set write is even needed. The tests below
+ // catch stores of Smis and stores into young gen (which does not have space
+ // for the remembered set bits.
+ Label done;
+
+ // This optimization cannot survive serialization and deserialization,
+ // so we disable as long as serialization can take place.
+ int32_t new_space_start =
+ reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
+ if (Serializer::enabled() || new_space_start < 0) {
+ // Cannot do smart bit-twiddling. Need to do two consecutive checks.
+ // Check for Smi first.
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+ // Test that the object address is not in the new space. We cannot
+ // set remembered set bits in the new space.
+ mov(value, Operand(object));
+ and_(value, Heap::NewSpaceMask());
+ cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
+ j(equal, &done);
+ } else {
+ // move the value SmiTag into the sign bit
+ shl(value, 31);
+ // combine the object with value SmiTag
+ or_(value, Operand(object));
+ // remove the uninteresing bits inside the page
+ and_(value, Heap::NewSpaceMask() | (1 << 31));
+ // xor has two effects:
+ // - if the value was a smi, then the result will be negative
+ // - if the object is pointing into new space area the page bits will
+ // all be zero
+ xor_(value, new_space_start | (1 << 31));
+ // Check for both conditions in one branch
+ j(less_equal, &done);
+ }
+
+ if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+ // Compute the bit offset in the remembered set, leave it in 'value'.
+ mov(value, Operand(object));
+ and_(value, Page::kPageAlignmentMask);
+ add(Operand(value), Immediate(offset));
+ shr(value, kObjectAlignmentBits);
+
+ // Compute the page address from the heap object pointer, leave it in
+ // 'object'.
+ and_(object, ~Page::kPageAlignmentMask);
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ bts(Operand(object, 0), value);
+ } else {
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
+ } else {
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric
+ lea(dst,
+ Operand(object, dst, times_2, Array::kHeaderSize - kHeapObjectTag));
+ }
+ // If we are already generating a shared stub, not inlining the
+ // record write code isn't going to save us any memory.
+ if (generating_stub()) {
+ RecordWriteHelper(this, object, dst, value);
+ } else {
+ RecordWriteStub stub(object, dst, value);
+ CallStub(&stub);
+ }
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of registers to memory location.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ mov(Operand::StaticVariable(reg_addr), reg);
+ }
+ }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of memory location to registers.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ mov(reg, Operand::StaticVariable(reg_addr));
+ }
+ }
+}
+
+
+void MacroAssembler::PushRegistersFromMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Push the content of the memory location to the stack.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ push(Operand::StaticVariable(reg_addr));
+ }
+ }
+}
+
+
+void MacroAssembler::PopRegistersToMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Pop the content from the stack to the memory location.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ pop(Operand::StaticVariable(reg_addr));
+ }
+ }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of the stack to the memory location and adjust base.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ mov(scratch, Operand(base, 0));
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ mov(Operand::StaticVariable(reg_addr), scratch);
+ lea(base, Operand(base, kPointerSize));
+ }
+ }
+}
+
+
+void MacroAssembler::Set(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, Operand(dst)); // shorter than mov
+ } else {
+ mov(Operand(dst), x);
+ }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::FCmp() {
+ fcompp();
+ push(eax);
+ fnstsw_ax();
+ sahf();
+ pop(eax);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ ASSERT(type != StackFrame::JAVA_SCRIPT);
+ push(ebp);
+ mov(ebp, Operand(esp));
+ push(esi);
+ push(Immediate(Smi::FromInt(type)));
+ if (type == StackFrame::INTERNAL) {
+ push(Immediate(0));
+ }
+}
+
+
+void MacroAssembler::ExitFrame(StackFrame::Type type) {
+ ASSERT(type != StackFrame::JAVA_SCRIPT);
+ if (FLAG_debug_code) {
+ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(type)));
+ Check(equal, "stack frame types must match");
+ }
+ leave();
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+ HandlerType type) {
+ ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // The pc (return address) is already on TOS.
+ if (try_location == IN_JAVASCRIPT) {
+ if (type == TRY_CATCH_HANDLER) {
+ push(Immediate(StackHandler::TRY_CATCH));
+ } else {
+ push(Immediate(StackHandler::TRY_FINALLY));
+ }
+ push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+ push(ebp);
+ push(edi);
+ } else {
+ ASSERT(try_location == IN_JS_ENTRY);
+ // The parameter pointer is meaningless here and ebp does not
+ // point to a JS frame. So we save NULL for both pp and ebp. We
+ // expect the code throwing an exception to check ebp before
+ // dereferencing it to restore the context.
+ push(Immediate(StackHandler::ENTRY));
+ push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+ push(Immediate(0)); // NULL frame pointer
+ push(Immediate(0)); // NULL parameter pointer
+ }
+ // Cached TOS.
+ mov(eax, Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ // Link this handler.
+ mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+ JSObject* holder, Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ // Make sure there's no overlap between scratch and the other
+ // registers.
+ ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 1;
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ while (object != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+
+ JSObject* prototype = JSObject::cast(object->GetPrototype());
+ if (Heap::InNewSpace(prototype)) {
+ // Get the map of the current object.
+ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
+ // Branch on the result of the map check.
+ j(not_equal, miss, not_taken);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (object->IsJSGlobalObject()) {
+ CheckAccessGlobal(reg, scratch, miss);
+ // Restore scratch register to be the map of the object. We
+ // load the prototype from the map in the scratch register.
+ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ reg = holder_reg; // from now the object is in holder_reg
+ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ } else {
+ // Check the map of the current object.
+ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(object->map())));
+ // Branch on the result of the map check.
+ j(not_equal, miss, not_taken);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (object->IsJSGlobalObject()) {
+ CheckAccessGlobal(reg, scratch, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // from now the object is in holder_reg
+ mov(reg, Handle<JSObject>(prototype));
+ }
+
+ // Go to the next object in the prototype chain.
+ object = prototype;
+ }
+
+ // Check the holder map.
+ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(holder->map())));
+ j(not_equal, miss, not_taken);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(object == holder);
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+ if (object->IsJSGlobalObject()) {
+ CheckAccessGlobal(reg, scratch, miss);
+ }
+ return reg;
+}
+
+
+void MacroAssembler::CheckAccessGlobal(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ ASSERT(!holder_reg.is(scratch));
+
+ // Load the security context.
+ ExternalReference security_context =
+ ExternalReference(Top::k_security_context_address);
+ mov(scratch, Operand::StaticVariable(security_context));
+ // When generating debug code, make sure the security context is set.
+ if (FLAG_debug_code) {
+ cmp(Operand(scratch), Immediate(0));
+ Check(not_equal, "we should not have an empty security context");
+ }
+ // Load the global object of the security context.
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ mov(scratch, FieldOperand(scratch, offset));
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ mov(scratch, FieldOperand(scratch, JSGlobalObject::kSecurityTokenOffset));
+ cmp(scratch, FieldOperand(holder_reg, JSGlobalObject::kSecurityTokenOffset));
+ j(not_equal, miss, not_taken);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op,
+ Label* then_label) {
+ Label ok;
+ test(result, Operand(result));
+ j(not_zero, &ok, taken);
+ test(op, Operand(op));
+ j(sign, then_label, not_taken);
+ bind(&ok);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op1,
+ Register op2,
+ Register scratch,
+ Label* then_label) {
+ Label ok;
+ test(result, Operand(result));
+ j(not_zero, &ok, taken);
+ mov(scratch, Operand(op1));
+ or_(scratch, Operand(op2));
+ j(sign, then_label, not_taken);
+ bind(&ok);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+ ASSERT(!generating_stub()); // calls are not allowed in stubs
+ call(stub->GetCode(), code_target);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ ASSERT(argc >= 1 && generating_stub());
+ ret((argc - 1) * kPointerSize);
+}
+
+
+void MacroAssembler::IllegalOperation() {
+ push(Immediate(Factory::undefined_value()));
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+ if (num_arguments < 1) {
+ // must have receiver for call
+ IllegalOperation();
+ return;
+ }
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+
+ if (f->nargs < 0) {
+ // The number of arguments is not constant for this call.
+ // Receiver does not count as an argument.
+ mov(Operand(eax), Immediate(num_arguments - 1));
+ } else {
+ if (f->nargs != num_arguments) {
+ IllegalOperation();
+ return;
+ }
+ // Receiver does not count as an argument.
+ mov(Operand(eax), Immediate(f->nargs - 1));
+ }
+
+ RuntimeStub stub((Runtime::FunctionId) f->stub_id);
+ CallStub(&stub);
+}
+
+
+
+void MacroAssembler::TailCallRuntime(Runtime::Function* f) {
+ JumpToBuiltin(ExternalReference(f)); // tail call to runtime routine
+}
+
+
+void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+ // Set the entry point and jump to the C entry runtime stub.
+ mov(Operand(ebx), Immediate(ext));
+ CEntryStub ces;
+ jmp(ces.GetCode(), code_target);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ const Operand& code_operand,
+ Label* done,
+ InvokeFlag flag) {
+ bool definitely_matches = false;
+ Label invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ mov(eax, actual.immediate());
+ mov(ebx, expected.immediate());
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmp(expected.reg(), actual.immediate());
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(ebx));
+ mov(eax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmp(expected.reg(), Operand(actual.reg()));
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(eax));
+ ASSERT(expected.reg().is(ebx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ if (!code_constant.is_null()) {
+ mov(Operand(edx), Immediate(code_constant));
+ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_operand.is_reg(edx)) {
+ mov(edx, code_operand);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ call(adaptor, code_target);
+ jmp(done);
+ } else {
+ jmp(adaptor, code_target);
+ }
+ bind(&invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(const Operand& code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ Label done;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ call(code);
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ jmp(code);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocMode rmode,
+ InvokeFlag flag) {
+ Label done;
+ Operand dummy(eax);
+ InvokePrologue(expected, actual, code, dummy, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ call(code, rmode);
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ jmp(code, rmode);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ ASSERT(fun.is(edi));
+ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+ ParameterCount expected(ebx);
+ InvokeCode(Operand(edx), expected, actual, flag);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ // Calls are not allowed in stubs.
+ ASSERT(flag == JUMP_FUNCTION || !generating_stub());
+
+ // Rely on the assertion to check that the number of provided
+ // arguments match the expected number of arguments. Fake a
+ // parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ InvokeCode(Handle<Code>(code), expected, expected, code_target, flag);
+
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+
+ if (!resolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(true);
+ Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
+ unresolved_.Add(entry);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+
+ mov(Operand(target), Immediate(code));
+ if (!resolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(false);
+ Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
+ unresolved_.Add(entry);
+ }
+ add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+ bool* resolved) {
+ // Move the builtin function into the temporary function slot by
+ // reading it from the builtins object. NOTE: We should be able to
+ // reduce this to two instructions by putting the function table in
+ // the global object instead of the "builtins" object and by using a
+ // real register for the function.
+ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+ int builtins_offset =
+ JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+ mov(edi, FieldOperand(edx, builtins_offset));
+
+ Code* code = Builtins::builtin(Builtins::Illegal);
+ *resolved = false;
+
+ if (Top::security_context() != NULL) {
+ Object* object = Top::security_context_builtins()->javascript_builtin(id);
+ if (object->IsJSFunction()) {
+ Handle<JSFunction> function(JSFunction::cast(object));
+ // Make sure the number of parameters match the formal parameter count.
+ ASSERT(function->shared()->formal_parameter_count() ==
+ Builtins::GetArgumentsCount(id));
+ if (function->is_compiled() || CompileLazy(function, CLEAR_EXCEPTION)) {
+ code = function->code();
+ *resolved = true;
+ }
+ }
+ }
+
+ return Handle<Code>(code);
+}
+
+
+void MacroAssembler::Ret() {
+ ret(0);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Operand operand = Operand::StaticVariable(ExternalReference(counter));
+ if (value == 1) {
+ inc(operand);
+ } else {
+ add(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Operand operand = Operand::StaticVariable(ExternalReference(counter));
+ if (value == 1) {
+ dec(operand);
+ } else {
+ sub(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+ if (FLAG_debug_code) Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+ Label L;
+ j(cc, &L, taken);
+ Abort(msg);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the aligment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ push(eax);
+ push(Immediate(p0));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+ CallRuntime(Runtime::kAbort, 2);
+ // will not return here
+}
+
+
+CodePatcher::CodePatcher(byte* address, int size)
+ : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ // Create a new macro assembler pointing to the assress of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MACRO_ASSEMBLER_IA32_H_
+#define V8_MACRO_ASSEMBLER_IA32_H_
+
+#include "assembler.h"
+
+namespace v8 { namespace internal {
+
+
+// Helper type to make boolean flag easier to read at call-site.
+enum InvokeFlag {
+ CALL_FUNCTION,
+ JUMP_FUNCTION
+};
+
+enum CodeLocation {
+ IN_JAVASCRIPT,
+ IN_JS_ENTRY,
+ IN_C_ENTRY
+};
+
+enum HandlerType {
+ TRY_CATCH_HANDLER,
+ TRY_FINALLY_HANDLER,
+ JS_ENTRY_HANDLER
+};
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ MacroAssembler(void* buffer, int size);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ // Set the remembered set bit for [object+offset].
+ // object is the object being stored into, value is the object being stored.
+ // If offset is zero, then the scratch register contains the array index into
+ // the elements array represented as a Smi.
+ // All registers are clobbered by the operation.
+ void RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void SaveRegistersToMemory(RegList regs);
+ void RestoreRegistersFromMemory(RegList regs);
+ void PushRegistersFromMemory(RegList regs);
+ void PopRegistersToMemory(RegList regs);
+ void CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs);
+
+
+ // ---------------------------------------------------------------------------
+ // Activation frames
+
+ // Enter or exit a stack frame of the given type. Cannot be used to
+ // construct or leave JavaScript frames.
+ void EnterFrame(StackFrame::Type type);
+ void ExitFrame(StackFrame::Type type);
+
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(const Operand& code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
+ void InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocMode rmode,
+ InvokeFlag flag);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+
+ // Store the code object for the given builtin in the target register.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Get the code for the given builtin. Returns if able to resolve
+ // the function in the 'resolved' flag.
+ Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ // Expression support
+ void Set(Register dst, const Immediate& x);
+ void Set(const Operand& dst, const Immediate& x);
+
+ // FCmp is similar to integer cmp, but requires unsigned
+ // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+ void FCmp();
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ // The return address must be pushed before calling this helper.
+ // On exit, eax contains TOS (next_sp).
+ void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generates code that verifies that the maps of objects in the
+ // prototype chain of object hasn't changed since the code was
+ // generated and branches to the miss label if any map has. If
+ // necessary the function also generates code for security check
+ // in case of global object holders. The scratch and holder
+ // registers are always clobbered, but the object register is only
+ // clobbered if it the same as the holder register. The function
+ // returns a register containing the holder - either object_reg or
+ // holder_reg.
+ Register CheckMaps(JSObject* object, Register object_reg,
+ JSObject* holder, Register holder_reg,
+ Register scratch, Label* miss);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, but the scratch register is clobbered.
+ void CheckAccessGlobal(Register holder_reg, Register scratch, Label* miss);
+
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Check if result is zero and op is negative.
+ void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+ // Check if result is zero and any of op1 and op2 are negative.
+ // Register scratch is destroyed, and it must be different from op2.
+ void NegativeZeroTest(Register result, Register op1, Register op2,
+ Register scratch, Label* then_label);
+
+ // Generates code for reporting that an illegal operation has
+ // occurred
+ void IllegalOperation();
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub);
+
+ // Return from a code stub after popping its arguments.
+ void StubReturn(int argc);
+
+ // Call a runtime routine.
+ // Eventually this should be used for all C calls.
+ void CallRuntime(Runtime::Function* f, int num_arguments);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId id, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToBuiltin, but also takes care of passing the number
+ // of parameters, if known.
+ void TailCallRuntime(Runtime::Function* f);
+
+ // Jump to the builtin routine.
+ void JumpToBuiltin(const ExternalReference& ext);
+
+ void Ret();
+
+ struct Unresolved {
+ int pc;
+ uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ const char* name;
+ };
+ List<Unresolved>* unresolved() { return &unresolved_; }
+
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value);
+ void IncrementCounter(StatsCounter* counter, int value);
+ void DecrementCounter(StatsCounter* counter, int value);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, const char* msg);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, const char* msg);
+
+ // Print a message to stdout and abort execution.
+ void Abort(const char* msg);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+
+ private:
+ List<Unresolved> unresolved_;
+ bool generating_stub_;
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ const Operand& code_operand,
+ Label* done,
+ InvokeFlag flag);
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int size);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+};
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+static inline Operand FieldOperand(Register object, int offset) {
+ return Operand(object, offset - kHeapObjectTag);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_MACRO_ASSEMBLER_IA32_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MACRO_ASSEMBLER_H_
+#define V8_MACRO_ASSEMBLER_H_
+
+#if defined(ARM) || defined (__arm__) || defined(__thumb__)
+
+#include "constants-arm.h"
+#include "assembler.h"
+#include "assembler-arm.h"
+#include "assembler-arm-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "macro-assembler-arm.h"
+
+#else // ia32
+
+#include "assembler.h"
+#include "assembler-ia32.h"
+#include "assembler-ia32-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "macro-assembler-ia32.h"
+
+#endif
+
+#endif // V8_MACRO_ASSEMBLER_H_
--- /dev/null
+# Copyright 2006-2008 Google Inc. All Rights Reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Dictionary that is passed as defines for js2c.py.
+# Used for defines that must be defined for all native js files.
+
+const NONE = 0;
+const READ_ONLY = 1;
+const DONT_ENUM = 2;
+const DONT_DELETE = 4;
+
+# Constants used for getter and setter operations.
+const GETTER = 0;
+const SETTER = 1;
+
+# These definitions must match the constants defined in v9.h
+const kApiTagOffset = 0;
+const kApiPropertyListOffset = 1;
+const kApiSerialNumberOffset = 2;
+const kApiConstructorOffset = 2;
+const kApiPrototypeTemplateOffset = 6;
+const kApiParentTemplateOffset = 7;
+
+const NO_HINT = 0;
+const NUMBER_HINT = 1;
+const STRING_HINT = 2;
+
+const kFunctionTag = 0;
+const kNewObjectTag = 1;
+
+# For date.js
+const HoursPerDay = 24;
+const MinutesPerHour = 60;
+const SecondsPerMinute = 60;
+const msPerSecond = 1000;
+const msPerMinute = 60000;
+const msPerHour = 3600000;
+const msPerDay = 86400000;
+
+# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1)
+const kInvalidDate = 'Invalid Date';
+const kDayZeroInJulianDay = 2440588;
+const kMonthMask = 0x1e0;
+const kDayMask = 0x01f;
+const kYearShift = 9;
+const kMonthShift = 5;
+
+# Type query macros
+macro IS_NULL(arg) = (arg === null);
+macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
+macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
+macro IS_FUNCTION(arg) = (typeof(arg) === 'function');
+macro IS_NUMBER(arg) = (typeof(arg) === 'number');
+macro IS_STRING(arg) = (typeof(arg) === 'string');
+macro IS_OBJECT(arg) = (typeof(arg) === 'object');
+macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_REGEXP(arg) = (%ClassOf(arg) === 'RegExp');
+macro IS_ARRAY(arg) = (%ClassOf(arg) === 'Array');
+macro IS_DATE(arg) = (%ClassOf(arg) === 'Date');
+macro IS_ERROR(arg) = (%ClassOf(arg) === 'Error');
+macro IS_SCRIPT(arg) = (%ClassOf(arg) === 'Script');
+
+# 'Inline' macros
+# (Make sure arg is evaluated only once via %IS_VAR)
+macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInt32(arg));
+
+python macro CHAR_CODE(str) = ord(str[1]);
+
+# Accessors for original global properties that ensure they have been loaded.
+const ORIGINAL_REGEXP = (global.RegExp, $RegExp);
+const ORIGINAL_DATE = (global.Date, $Date);
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "execution.h"
+#include "global-handles.h"
+#include "ic-inl.h"
+#include "mark-compact.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+// The verification code used between phases of the m-c collector does not
+// currently work.
+//
+// TODO(1240833): Fix the heap verification code and turn this into a real
+// flag.
+static const bool FLAG_verify_global_gc = false;
+
+DECLARE_bool(gc_verbose);
+#endif // DEBUG
+
+DEFINE_bool(always_compact, false, "Perform compaction on every full GC");
+DEFINE_bool(never_compact, false,
+ "Never perform compaction on full GC - testing only");
+
+DEFINE_bool(cleanup_ics_at_gc, true,
+ "Flush inline caches prior to mark compact collection.");
+DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
+ "Flush code caches in maps during mark compact cycle.");
+
+DECLARE_bool(gc_global);
+
+// ----------------------------------------------------------------------------
+// MarkCompactCollector
+
+bool MarkCompactCollector::compacting_collection_ = false;
+
+#ifdef DEBUG
+MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
+
+// Counters used for debugging the marking phase of mark-compact or mark-sweep
+// collection.
+int MarkCompactCollector::live_bytes_ = 0;
+int MarkCompactCollector::live_young_objects_ = 0;
+int MarkCompactCollector::live_old_objects_ = 0;
+int MarkCompactCollector::live_immutable_objects_ = 0;
+int MarkCompactCollector::live_map_objects_ = 0;
+int MarkCompactCollector::live_lo_objects_ = 0;
+#endif
+
+void MarkCompactCollector::CollectGarbage() {
+ Prepare();
+
+ MarkLiveObjects();
+
+ SweepLargeObjectSpace();
+
+ if (compacting_collection_) {
+ EncodeForwardingAddresses();
+
+ UpdatePointers();
+
+ RelocateObjects();
+
+ RebuildRSets();
+
+ } else {
+ SweepSpaces();
+ }
+
+ Finish();
+}
+
+
+void MarkCompactCollector::Prepare() {
+ static const int kFragmentationLimit = 50; // Percent.
+#ifdef DEBUG
+ ASSERT(state_ == IDLE);
+ state_ = PREPARE_GC;
+#endif
+ ASSERT(!FLAG_always_compact || !FLAG_never_compact);
+
+ compacting_collection_ = FLAG_always_compact;
+
+ // We compact the old generation if it gets too fragmented (ie, we could
+ // recover an expected amount of space by reclaiming the waste and free
+ // list blocks). We always compact when the flag --gc-global is true
+ // because objects do not get promoted out of new space on non-compacting
+ // GCs.
+ if (!compacting_collection_) {
+ int old_gen_recoverable = Heap::old_space()->Waste()
+ + Heap::old_space()->AvailableFree()
+ + Heap::code_space()->Waste()
+ + Heap::code_space()->AvailableFree();
+ int old_gen_used = old_gen_recoverable
+ + Heap::old_space()->Size()
+ + Heap::code_space()->Size();
+ int old_gen_fragmentation = (old_gen_recoverable * 100) / old_gen_used;
+ if (old_gen_fragmentation > kFragmentationLimit) {
+ compacting_collection_ = true;
+ }
+ }
+
+ if (FLAG_never_compact) compacting_collection_ = false;
+
+#ifdef DEBUG
+ if (compacting_collection_) {
+ // We will write bookkeeping information to the remembered set area
+ // starting now.
+ Page::set_rset_state(Page::NOT_IN_USE);
+ }
+#endif
+
+ Heap::map_space()->PrepareForMarkCompact(compacting_collection_);
+ Heap::old_space()->PrepareForMarkCompact(compacting_collection_);
+ Heap::code_space()->PrepareForMarkCompact(compacting_collection_);
+
+ Counters::global_objects.Set(0);
+
+#ifdef DEBUG
+ live_bytes_ = 0;
+ live_young_objects_ = 0;
+ live_old_objects_ = 0;
+ live_immutable_objects_ = 0;
+ live_map_objects_ = 0;
+ live_lo_objects_ = 0;
+#endif
+}
+
+
+void MarkCompactCollector::Finish() {
+#ifdef DEBUG
+ ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
+ state_ = IDLE;
+#endif
+ // The stub cache is not traversed during GC; clear the cache to
+ // force lazy re-initialization of it. This must be done after the
+ // GC, because it relies on the new address of certain old space
+ // objects (empty string, illegal builtin).
+ StubCache::Clear();
+}
+
+
+// ---------------------------------------------------------------------------
+// Forwarding pointers and map pointer encoding
+// | 11 bits | offset to the live object in the page
+// | 11 bits | offset in a map page
+// | 10 bits | map table index
+
+static const int kMapPageIndexBits = 10;
+static const int kMapPageOffsetBits = 11;
+static const int kForwardingOffsetBits = 11;
+static const int kAlignmentBits = 1;
+
+static const int kMapPageIndexShift = 0;
+static const int kMapPageOffsetShift =
+ kMapPageIndexShift + kMapPageIndexBits;
+static const int kForwardingOffsetShift =
+ kMapPageOffsetShift + kMapPageOffsetBits;
+
+// 0x000003FF
+static const uint32_t kMapPageIndexMask =
+ (1 << kMapPageOffsetShift) - 1;
+
+// 0x001FFC00
+static const uint32_t kMapPageOffsetMask =
+ ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
+
+// 0xFFE00000
+static const uint32_t kForwardingOffsetMask =
+ ~(kMapPageIndexMask | kMapPageOffsetMask);
+
+
+static uint32_t EncodePointers(Address map_addr, int offset) {
+ // Offset is the distance to the first alive object in the same
+ // page. The offset between two objects in the same page should not
+ // exceed the object area size of a page.
+ ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+
+ int compact_offset = offset >> kObjectAlignmentBits;
+ ASSERT(compact_offset < (1 << kForwardingOffsetBits));
+
+ Page* map_page = Page::FromAddress(map_addr);
+ int map_page_index = map_page->mc_page_index;
+ ASSERT_MAP_PAGE_INDEX(map_page_index);
+
+ int map_page_offset = map_page->Offset(map_addr) >> kObjectAlignmentBits;
+
+ return (compact_offset << kForwardingOffsetShift)
+ | (map_page_offset << kMapPageOffsetShift)
+ | (map_page_index << kMapPageIndexShift);
+}
+
+
+static int DecodeOffset(uint32_t encoded) {
+ // The offset field is represented in the MSB.
+ int offset = (encoded >> kForwardingOffsetShift) << kObjectAlignmentBits;
+ ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+ return offset;
+}
+
+
+static Address DecodeMapPointer(uint32_t encoded, MapSpace* map_space) {
+ int map_page_index = (encoded & kMapPageIndexMask) >> kMapPageIndexShift;
+ ASSERT_MAP_PAGE_INDEX(map_page_index);
+
+ int map_page_offset = ((encoded & kMapPageOffsetMask) >> kMapPageOffsetShift)
+ << kObjectAlignmentBits;
+
+ return (map_space->PageAddress(map_page_index) + map_page_offset);
+}
+
+
+// ----------------------------------------------------------------------------
+// Phase 1: tracing and marking live objects.
+// before: all objects are in normal state.
+// after: a live object's map pointer is marked as '00'.
+
+// Marking all live objects in the heap as part of mark-sweep or mark-compact
+// collection. Before marking, all objects are in their normal state. After
+// marking, live objects' map pointers are marked indicating that the object
+// has been found reachable.
+//
+// The marking algorithm is a (mostly) depth-first (because of possible stack
+// overflow) traversal of the graph of objects reachable from the roots. It
+// uses an explicit stack of pointers rather than recursion. The young
+// generation's inactive ('from') space is used as a marking stack. The
+// objects in the marking stack are the ones that have been reached and marked
+// but their children have not yet been visited.
+//
+// The marking stack can overflow during traversal. In that case, we set an
+// overflow flag. When the overflow flag is set, we continue marking objects
+// reachable from the objects on the marking stack, but no longer push them on
+// the marking stack. Instead, we mark them as both marked and overflowed.
+// When the stack is in the overflowed state, objects marked as overflowed
+// have been reached and marked but their children have not been visited yet.
+// After emptying the marking stack, we clear the overflow flag and traverse
+// the heap looking for objects marked as overflowed, push them on the stack,
+// and continue with marking. This process repeats until all reachable
+// objects have been marked.
+
+static MarkingStack marking_stack;
+
+// Helper class for marking pointers in HeapObjects.
+class MarkingVisitor : public ObjectVisitor {
+ public:
+
+ void VisitPointer(Object** p) {
+ MarkObjectByPointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Mark all objects pointed to in [start, end).
+ const int kMinRangeForMarkingRecursion = 64;
+ if (end - start >= kMinRangeForMarkingRecursion) {
+ if (VisitUnmarkedObjects(start, end)) return;
+ // We are close to a stack overflow, so just mark the objects.
+ }
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ }
+
+ void BeginCodeIteration(Code* code) {
+ // When iterating over a code object during marking
+ // ic targets are derived pointers.
+ ASSERT(code->ic_flag() == Code::IC_TARGET_IS_ADDRESS);
+ }
+
+ void EndCodeIteration(Code* code) {
+ // If this is a compacting collection, set ic targets
+ // are pointing to object headers.
+ if (IsCompacting()) code->set_ic_flag(Code::IC_TARGET_IS_OBJECT);
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(is_code_target(rinfo->rmode()));
+ Code* code = CodeFromDerivedPointer(rinfo->target_address());
+ if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
+ IC::Clear(rinfo->pc());
+ // Please note targets for cleared inline cached do not have to be
+ // marked since they are contained in Heap::non_monomorphic_cache().
+ } else {
+ MarkCompactCollector::MarkObject(code);
+ }
+ if (IsCompacting()) {
+ // When compacting we convert the target to a real object pointer.
+ code = CodeFromDerivedPointer(rinfo->target_address());
+ rinfo->set_target_object(code);
+ }
+ }
+
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT(is_js_return(rinfo->rmode()) && rinfo->is_call_instruction());
+ HeapObject* code = CodeFromDerivedPointer(rinfo->call_address());
+ MarkCompactCollector::MarkObject(code);
+ // When compacting we convert the call to a real object pointer.
+ if (IsCompacting()) rinfo->set_call_object(code);
+ }
+
+ private:
+ // Mark obj if needed.
+ void MarkObject(Object* obj) {
+ if (!obj->IsHeapObject()) return;
+ MarkCompactCollector::MarkObject(HeapObject::cast(obj));
+ }
+
+ // Mark object pointed to by p.
+ void MarkObjectByPointer(Object** p) {
+ Object* obj = *p;
+ if (!obj->IsHeapObject()) return;
+
+ // Optimization: Bypass ConsString object where right size is
+ // Heap::empty_string().
+ // Please note this checks performed equals:
+ // object->IsConsString() &&
+ // (ConsString::cast(object)->second() == Heap::empty_string())
+ // except the map for the object might be marked.
+ intptr_t map_word =
+ reinterpret_cast<intptr_t>(HeapObject::cast(obj)->map());
+ uint32_t tag =
+ (reinterpret_cast<Map*>(clear_mark_bit(map_word)))->instance_type();
+ if ((tag < FIRST_NONSTRING_TYPE) &&
+ (kConsStringTag ==
+ static_cast<StringRepresentationTag>(tag &
+ kStringRepresentationMask)) &&
+ (Heap::empty_string() ==
+ reinterpret_cast<String*>(
+ reinterpret_cast<ConsString*>(obj)->second()))) {
+ // Since we don't have the object start it is impossible to update the
+ // remeber set quickly. Therefore this optimization only is taking
+ // place when we can avoid changing.
+ Object* first = reinterpret_cast<ConsString*>(obj)->first();
+ if (Heap::InNewSpace(obj) || !Heap::InNewSpace(first)) {
+ obj = first;
+ *p = obj;
+ }
+ }
+ MarkCompactCollector::MarkObject(HeapObject::cast(obj));
+ }
+
+ // Tells whether the mark sweep collection will perform compaction.
+ bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
+
+ // Retrieves the Code pointer from derived code entry.
+ Code* CodeFromDerivedPointer(Address addr) {
+ ASSERT(addr != NULL);
+ return reinterpret_cast<Code*>(
+ HeapObject::FromAddress(addr - Code::kHeaderSize));
+ }
+
+ // Visit an unmarked object.
+ void VisitUnmarkedObject(HeapObject* obj) {
+ ASSERT(Heap::Contains(obj));
+#ifdef DEBUG
+ MarkCompactCollector::UpdateLiveObjectCount(obj);
+#endif
+ Map* map = obj->map();
+ set_mark(obj);
+ // Mark the map pointer and the body.
+ MarkCompactCollector::MarkObject(map);
+ obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
+ }
+
+ // Visit all unmarked objects pointed to by [start, end).
+ // Returns false if the operation fails (lack of stack space).
+ inline bool VisitUnmarkedObjects(Object** start, Object** end) {
+ // Return false is we are close to the stack limit.
+ StackLimitCheck check;
+ if (check.HasOverflowed()) return false;
+
+ // Visit the unmarked objects.
+ for (Object** p = start; p < end; p++) {
+ if (!(*p)->IsHeapObject()) continue;
+ HeapObject* obj = HeapObject::cast(*p);
+ if (is_marked(obj)) continue;
+ VisitUnmarkedObject(obj);
+ }
+ return true;
+ }
+};
+
+
+// Helper class for pruning the symbol table.
+class SymbolTableCleaner : public ObjectVisitor {
+ public:
+ SymbolTableCleaner() : pointers_removed_(0) { }
+ void VisitPointers(Object** start, Object** end) {
+ // Visit all HeapObject pointers in [start, end).
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject() && !is_marked(HeapObject::cast(*p))) {
+ // Set the entry to null_value (as deleted).
+ *p = Heap::null_value();
+ pointers_removed_++;
+ }
+ }
+ }
+
+ int PointersRemoved() {
+ return pointers_removed_;
+ }
+ private:
+ int pointers_removed_;
+};
+
+
+static void MarkObjectGroups(MarkingVisitor* marker) {
+ List<ObjectGroup*>& object_groups = GlobalHandles::ObjectGroups();
+
+ for (int i = 0; i < object_groups.length(); i++) {
+ ObjectGroup* entry = object_groups[i];
+ bool group_marked = false;
+ List<Object**>& objects = entry->objects_;
+ for (int j = 0; j < objects.length(); j++) {
+ Object* obj = *objects[j];
+ if (obj->IsHeapObject() && is_marked(HeapObject::cast(obj))) {
+ group_marked = true;
+ break;
+ }
+ }
+
+ if (!group_marked) continue;
+
+ for (int j = 0; j < objects.length(); j++) {
+ marker->VisitPointer(objects[j]);
+ }
+ }
+}
+
+
+void MarkCompactCollector::MarkUnmarkedObject(HeapObject* obj) {
+#ifdef DEBUG
+ if (!is_marked(obj)) UpdateLiveObjectCount(obj);
+#endif
+ ASSERT(!is_marked(obj));
+ if (obj->IsJSGlobalObject()) Counters::global_objects.Increment();
+
+ if (FLAG_cleanup_caches_in_maps_at_gc && obj->IsMap()) {
+ Map::cast(obj)->ClearCodeCache();
+ }
+
+ set_mark(obj);
+ if (!marking_stack.overflowed()) {
+ ASSERT(Heap::Contains(obj));
+ marking_stack.Push(obj);
+ } else {
+ // Set object's stack overflow bit, wait for rescan.
+ set_overflow(obj);
+ }
+}
+
+
+void MarkCompactCollector::MarkObjectsReachableFromTopFrame() {
+ MarkingVisitor marking_visitor;
+ do {
+ while (!marking_stack.is_empty()) {
+ HeapObject* obj = marking_stack.Pop();
+ ASSERT(Heap::Contains(obj));
+ ASSERT(is_marked(obj) && !is_overflowed(obj));
+
+ // Because the object is marked, the map pointer is not tagged as a
+ // normal HeapObject pointer, we need to recover the map pointer,
+ // then use the map pointer to mark the object body.
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ Map* map = reinterpret_cast<Map*>(clear_mark_bit(map_word));
+ MarkObject(map);
+ obj->IterateBody(map->instance_type(), obj->SizeFromMap(map),
+ &marking_visitor);
+ };
+ // Check objects in object groups.
+ MarkObjectGroups(&marking_visitor);
+ } while (!marking_stack.is_empty());
+}
+
+
+static int OverflowObjectSize(HeapObject* obj) {
+ // Recover the normal map pointer, it might be marked as live and
+ // overflowed.
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ map_word = clear_mark_bit(map_word);
+ map_word = clear_overflow_bit(map_word);
+ return obj->SizeFromMap(reinterpret_cast<Map*>(map_word));
+}
+
+
+static bool VisitOverflowedObject(HeapObject* obj) {
+ if (!is_overflowed(obj)) return true;
+ ASSERT(is_marked(obj));
+
+ if (marking_stack.overflowed()) return false;
+
+ clear_overflow(obj); // clear overflow bit
+ ASSERT(Heap::Contains(obj));
+ marking_stack.Push(obj);
+ return true;
+}
+
+
+template<class T>
+static void ScanOverflowedObjects(T* it) {
+ while (it->has_next()) {
+ HeapObject* obj = it->next();
+ if (!VisitOverflowedObject(obj)) {
+ ASSERT(marking_stack.overflowed());
+ break;
+ }
+ }
+}
+
+
+bool MarkCompactCollector::MustBeMarked(Object** p) {
+ // Check whether *p is a HeapObject pointer.
+ if (!(*p)->IsHeapObject()) return false;
+ return !is_marked(HeapObject::cast(*p));
+}
+
+
+void MarkCompactCollector::MarkLiveObjects() {
+#ifdef DEBUG
+ ASSERT(state_ == PREPARE_GC);
+ state_ = MARK_LIVE_OBJECTS;
+#endif
+ // The to space contains live objects, the from space is used as a marking
+ // stack.
+ marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
+ Heap::new_space()->FromSpaceHigh());
+
+ ASSERT(!marking_stack.overflowed());
+
+ // Mark the heap roots, including global variables, stack variables, etc.
+ MarkingVisitor marking_visitor;
+
+ Heap::IterateStrongRoots(&marking_visitor);
+
+ // Take care of the symbol table specially.
+ SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
+#ifdef DEBUG
+ UpdateLiveObjectCount(symbol_table);
+#endif
+
+ // 1. mark the prefix of the symbol table and push the objects on
+ // the stack.
+ symbol_table->IteratePrefix(&marking_visitor);
+ // 2. mark the symbol table without pushing it on the stack.
+ set_mark(symbol_table); // map word is changed.
+
+ bool has_processed_weak_pointers = false;
+
+ // Mark objects reachable from the roots.
+ while (true) {
+ MarkObjectsReachableFromTopFrame();
+
+ if (!marking_stack.overflowed()) {
+ if (has_processed_weak_pointers) break;
+ // First we mark weak pointers not yet reachable.
+ GlobalHandles::MarkWeakRoots(&MustBeMarked);
+ // Then we process weak pointers and process the transitive closure.
+ GlobalHandles::IterateWeakRoots(&marking_visitor);
+ has_processed_weak_pointers = true;
+ continue;
+ }
+
+ // The marking stack overflowed, we need to rebuild it by scanning the
+ // whole heap.
+ marking_stack.clear_overflowed();
+
+ // We have early stops if the stack overflowed again while scanning
+ // overflowed objects in a space.
+ SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
+ ScanOverflowedObjects(&new_it);
+ if (marking_stack.overflowed()) continue;
+
+ HeapObjectIterator old_it(Heap::old_space(), &OverflowObjectSize);
+ ScanOverflowedObjects(&old_it);
+ if (marking_stack.overflowed()) continue;
+
+ HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
+ ScanOverflowedObjects(&code_it);
+ if (marking_stack.overflowed()) continue;
+
+ HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
+ ScanOverflowedObjects(&map_it);
+ if (marking_stack.overflowed()) continue;
+
+ LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
+ ScanOverflowedObjects(&lo_it);
+ }
+
+ // Prune the symbol table removing all symbols only pointed to by
+ // the symbol table.
+ SymbolTableCleaner v;
+ symbol_table->IterateElements(&v);
+ symbol_table->ElementsRemoved(v.PointersRemoved());
+
+#ifdef DEBUG
+ if (FLAG_verify_global_gc) VerifyHeapAfterMarkingPhase();
+#endif
+
+ // Remove object groups after marking phase.
+ GlobalHandles::RemoveObjectGroups();
+
+ // Objects in the active semispace of the young generation will be relocated
+ // to the inactive semispace. Set the relocation info to the beginning of
+ // the inactive semispace.
+ Heap::new_space()->MCResetRelocationInfo();
+}
+
+
+#ifdef DEBUG
+void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
+ live_bytes_ += obj->Size();
+ if (Heap::new_space()->Contains(obj)) {
+ live_young_objects_++;
+ } else if (Heap::map_space()->Contains(obj)) {
+ ASSERT(obj->IsMap());
+ live_map_objects_++;
+ } else if (Heap::old_space()->Contains(obj)) {
+ live_old_objects_++;
+ } else if (Heap::code_space()->Contains(obj)) {
+ live_immutable_objects_++;
+ } else if (Heap::lo_space()->Contains(obj)) {
+ live_lo_objects_++;
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static int CountMarkedCallback(HeapObject* obj) {
+ if (!is_marked(obj)) return obj->Size();
+ clear_mark(obj);
+ int obj_size = obj->Size();
+ set_mark(obj);
+ return obj_size;
+}
+
+
+void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
+ Heap::new_space()->Verify();
+ Heap::old_space()->Verify();
+ Heap::code_space()->Verify();
+ Heap::map_space()->Verify();
+
+ int live_objects;
+
+#define CHECK_LIVE_OBJECTS(it, expected) \
+ live_objects = 0; \
+ while (it.has_next()) { \
+ HeapObject* obj = HeapObject::cast(it.next()); \
+ if (is_marked(obj)) live_objects++; \
+ } \
+ ASSERT(live_objects == expected);
+
+ SemiSpaceIterator new_it(Heap::new_space(), &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(new_it, live_young_objects_);
+
+ HeapObjectIterator old_it(Heap::old_space(), &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(old_it, live_old_objects_);
+
+ HeapObjectIterator code_it(Heap::code_space(), &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(code_it, live_immutable_objects_);
+
+ HeapObjectIterator map_it(Heap::map_space(), &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(map_it, live_map_objects_);
+
+ LargeObjectIterator lo_it(Heap::lo_space(), &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(lo_it, live_lo_objects_);
+
+#undef CHECK_LIVE_OBJECTS
+}
+#endif // DEBUG
+
+
+void MarkCompactCollector::SweepLargeObjectSpace() {
+#ifdef DEBUG
+ ASSERT(state_ == MARK_LIVE_OBJECTS);
+ state_ =
+ compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
+#endif
+ // Deallocate unmarked objects and clear marked bits for marked objects.
+ Heap::lo_space()->FreeUnmarkedObjects();
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 2: Encode forwarding addresses.
+// When compacting, forwarding addresses for objects in old space and map
+// space are encoded in their map pointer word (along with an encoding of
+// their map pointers).
+//
+// 31 21 20 10 9 0
+// +-----------------+------------------+-----------------+
+// |forwarding offset|page offset of map|page index of map|
+// +-----------------+------------------+-----------------+
+// 11 bits 11 bits 10 bits
+//
+// An address range [start, end) can have both live and non-live objects.
+// Maximal non-live regions are marked so they can be skipped on subsequent
+// sweeps of the heap. A distinguished map-pointer encoding is used to mark
+// free regions of one-word size (in which case the next word is the start
+// of a live object). A second distinguished map-pointer encoding is used
+// to mark free regions larger than one word, and the size of the free
+// region (including the first word) is written to the second word of the
+// region.
+//
+// Any valid map page offset must lie in the object area of the page, so map
+// page offsets less than Page::kObjectStartOffset are invalid. We use a
+// pair of distinguished invalid map encodings (for single word and multiple
+// words) to indicate free regions in the page found during computation of
+// forwarding addresses and skipped over in subsequent sweeps.
+static const uint32_t kSingleFreeEncoding = 0;
+static const uint32_t kMultiFreeEncoding = 1;
+
+
+// Encode a free region, defined by the given start address and size, in the
+// first word or two of the region.
+void EncodeFreeRegion(Address free_start, int free_size) {
+ ASSERT(free_size >= kIntSize);
+ if (free_size == kIntSize) {
+ Memory::uint32_at(free_start) = kSingleFreeEncoding;
+ } else {
+ ASSERT(free_size >= 2 * kIntSize);
+ Memory::uint32_at(free_start) = kMultiFreeEncoding;
+ Memory::int_at(free_start + kIntSize) = free_size;
+ }
+
+#ifdef DEBUG
+ // Zap the body of the free region.
+ if (FLAG_enable_slow_asserts) {
+ for (int offset = 2 * kIntSize;
+ offset < free_size;
+ offset += kPointerSize) {
+ Memory::Address_at(free_start + offset) = kZapValue;
+ }
+ }
+#endif
+}
+
+
+// Try to promote all objects in new space. Heap numbers and sequential
+// strings are promoted to the code space, all others to the old space.
+inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
+ bool has_pointers = !object->IsHeapNumber() && !object->IsSeqString();
+ Object* forwarded = has_pointers ?
+ Heap::old_space()->MCAllocateRaw(object_size) :
+ Heap::code_space()->MCAllocateRaw(object_size);
+
+ if (forwarded->IsFailure()) {
+ forwarded = Heap::new_space()->MCAllocateRaw(object_size);
+ }
+ return forwarded;
+}
+
+
+// Allocation functions for the paged spaces call the space's MCAllocateRaw.
+inline Object* MCAllocateFromOldSpace(HeapObject* object, int object_size) {
+ return Heap::old_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) {
+ return Heap::code_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) {
+ return Heap::map_space()->MCAllocateRaw(object_size);
+}
+
+
+// The forwarding address is encoded at the same offset as the current
+// to-space object, but in from space.
+inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
+ int object_size,
+ Object* new_object,
+ int* ignored) {
+ int offset =
+ Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
+ Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
+ HeapObject::cast(new_object)->address();
+}
+
+
+// The forwarding address is encoded in the map pointer of the object as an
+// offset (in terms of live bytes) from the address of the first live object
+// in the page.
+inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
+ int object_size,
+ Object* new_object,
+ int* offset) {
+ // Record the forwarding address of the first live object if necessary.
+ if (*offset == 0) {
+ Page::FromAddress(old_object->address())->mc_first_forwarded =
+ HeapObject::cast(new_object)->address();
+ }
+
+ uint32_t encoded = EncodePointers(old_object->map()->address(), *offset);
+ old_object->set_map(reinterpret_cast<Map*>(encoded));
+ *offset += object_size;
+ ASSERT(*offset <= Page::kObjectAreaSize);
+}
+
+
+// Most non-live objects are ignored.
+inline void IgnoreNonLiveObject(HeapObject* object) {}
+
+
+// A code deletion event is logged for non-live code objects.
+inline void LogNonLiveCodeObject(HeapObject* object) {
+ if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
+}
+
+
+// Function template that, given a range of addresses (eg, a semispace or a
+// paged space page), iterates through the objects in the range to clear
+// mark bits and compute and encode forwarding addresses. As a side effect,
+// maximal free chunks are marked so that they can be skipped on subsequent
+// sweeps.
+//
+// The template parameters are an allocation function, a forwarding address
+// encoding function, and a function to process non-live objects.
+template<MarkCompactCollector::AllocationFunction Alloc,
+ MarkCompactCollector::EncodingFunction Encode,
+ MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+inline void EncodeForwardingAddressesInRange(Address start,
+ Address end,
+ int* offset) {
+ // The start address of the current free region while sweeping the space.
+ // This address is set when a transition from live to non-live objects is
+ // encountered. A value (an encoding of the 'next free region' pointer)
+ // is written to memory at this address when a transition from non-live to
+ // live objects is encountered.
+ Address free_start = NULL;
+
+ // A flag giving the state of the previously swept object. Initially true
+ // to ensure that free_start is initialized to a proper address before
+ // trying to write to it.
+ bool is_prev_alive = true;
+
+ int object_size; // Will be set on each iteration of the loop.
+ for (Address current = start; current < end; current += object_size) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ if (is_marked(object)) {
+ clear_mark(object);
+ object_size = object->Size();
+
+ Object* forwarded = Alloc(object, object_size);
+ // Allocation cannot fail, because we are compacting the space.
+ ASSERT(!forwarded->IsFailure());
+ Encode(object, object_size, forwarded, offset);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("forward %p -> %p.\n", object->address(),
+ HeapObject::cast(forwarded)->address());
+ }
+#endif
+ if (!is_prev_alive) { // Transition from non-live to live.
+ EncodeFreeRegion(free_start, current - free_start);
+ is_prev_alive = true;
+ }
+ } else { // Non-live object.
+ object_size = object->Size();
+ ProcessNonLive(object);
+ if (is_prev_alive) { // Transition from live to non-live.
+ free_start = current;
+ is_prev_alive = false;
+ }
+ }
+ }
+
+ // If we ended on a free region, mark it.
+ if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
+}
+
+
+// Functions to encode the forwarding pointers in each compactable space.
+void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
+ int ignored;
+ EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
+ EncodeForwardingAddressInNewSpace,
+ IgnoreNonLiveObject>(
+ Heap::new_space()->bottom(),
+ Heap::new_space()->top(),
+ &ignored);
+}
+
+
+template<MarkCompactCollector::AllocationFunction Alloc,
+ MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
+ PagedSpace* space) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ // The offset of each live object in the page from the first live object
+ // in the page.
+ int offset = 0;
+ EncodeForwardingAddressesInRange<Alloc,
+ EncodeForwardingAddressInPagedSpace,
+ ProcessNonLive>(
+ p->ObjectAreaStart(),
+ p->AllocationTop(),
+ &offset);
+ }
+}
+
+
+static void SweepSpace(NewSpace* space) {
+ HeapObject* object;
+ for (Address current = space->bottom();
+ current < space->top();
+ current += object->Size()) {
+ object = HeapObject::FromAddress(current);
+ if (is_marked(object)) {
+ clear_mark(object);
+ } else {
+ // We give non-live objects a map that will correctly give their size,
+ // since their existing map might not be live after the collection.
+ int size = object->Size();
+ if (size >= Array::kHeaderSize) {
+ object->set_map(Heap::byte_array_map());
+ ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
+ } else {
+ ASSERT(size == kPointerSize);
+ object->set_map(Heap::one_word_filler_map());
+ }
+ ASSERT(object->Size() == size);
+ }
+ // The object is now unmarked for the call to Size() at the top of the
+ // loop.
+ }
+}
+
+
+static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ bool is_previous_alive = true;
+ Address free_start = NULL;
+ HeapObject* object;
+
+ for (Address current = p->ObjectAreaStart();
+ current < p->AllocationTop();
+ current += object->Size()) {
+ object = HeapObject::FromAddress(current);
+ if (is_marked(object)) {
+ clear_mark(object);
+ if (MarkCompactCollector::IsCompacting() && object->IsCode()) {
+ // If this is compacting collection marked code objects have had
+ // their IC targets converted to objects.
+ // They need to be converted back to addresses.
+ Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+ }
+ if (!is_previous_alive) { // Transition from free to live.
+ dealloc(free_start, current - free_start);
+ is_previous_alive = true;
+ }
+ } else {
+ if (object->IsCode()) {
+ LOG(CodeDeleteEvent(Code::cast(object)->address()));
+ }
+ if (is_previous_alive) { // Transition from live to free.
+ free_start = current;
+ is_previous_alive = false;
+ }
+ }
+ // The object is now unmarked for the call to Size() at the top of the
+ // loop.
+ }
+
+ // If the last region was not live we need to from free_start to the
+ // allocation top in the page.
+ if (!is_previous_alive) {
+ int free_size = p->AllocationTop() - free_start;
+ if (free_size > 0) {
+ dealloc(free_start, free_size);
+ }
+ }
+ }
+}
+
+
+void MarkCompactCollector::DeallocateOldBlock(Address start,
+ int size_in_bytes) {
+ Heap::ClearRSetRange(start, size_in_bytes);
+ Heap::old_space()->Free(start, size_in_bytes);
+}
+
+
+void MarkCompactCollector::DeallocateCodeBlock(Address start,
+ int size_in_bytes) {
+ Heap::code_space()->Free(start, size_in_bytes);
+}
+
+
+void MarkCompactCollector::DeallocateMapBlock(Address start,
+ int size_in_bytes) {
+ // Objects in map space are frequently assumed to have size Map::kSize and a
+ // valid map in their first word. Thus, we break the free block up into
+ // chunks and free them separately.
+ ASSERT(size_in_bytes % Map::kSize == 0);
+ Heap::ClearRSetRange(start, size_in_bytes);
+ Address end = start + size_in_bytes;
+ for (Address a = start; a < end; a += Map::kSize) {
+ Heap::map_space()->Free(a);
+ }
+}
+
+
+void MarkCompactCollector::EncodeForwardingAddresses() {
+ ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+ // Compute the forwarding pointers in each space.
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldSpace,
+ IgnoreNonLiveObject>(
+ Heap::old_space());
+
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
+ LogNonLiveCodeObject>(
+ Heap::code_space());
+
+ // Compute new space next to last after the old and code spaces have been
+ // compacted. Objects in new space can be promoted to old or code space.
+ EncodeForwardingAddressesInNewSpace();
+
+ // Compute map space last because computing forwarding addresses
+ // overwrites non-live objects. Objects in the other spaces rely on
+ // non-live map pointers to get the sizes of non-live objects.
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
+ IgnoreNonLiveObject>(
+ Heap::map_space());
+
+ // Write relocation info to the top page, so we can use it later. This is
+ // done after promoting objects from the new space so we get the correct
+ // allocation top.
+ Heap::old_space()->MCWriteRelocationInfoToPage();
+ Heap::code_space()->MCWriteRelocationInfoToPage();
+ Heap::map_space()->MCWriteRelocationInfoToPage();
+}
+
+
+void MarkCompactCollector::SweepSpaces() {
+ ASSERT(state_ == SWEEP_SPACES);
+ ASSERT(!IsCompacting());
+ // Noncompacting collections simply sweep the spaces to clear the mark
+ // bits and free the nonlive blocks (for old and map spaces). We sweep
+ // the map space last because freeing non-live maps overwrites them and
+ // the other spaces rely on possibly non-live maps to get the sizes for
+ // non-live objects.
+ SweepSpace(Heap::old_space(), &DeallocateOldBlock);
+ SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
+ SweepSpace(Heap::new_space());
+ SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+}
+
+
+// Iterate the live objects in a range of addresses (eg, a page or a
+// semispace). The live regions of the range have been linked into a list.
+// The first live region is [first_live_start, first_live_end), and the last
+// address in the range is top. The callback function is used to get the
+// size of each live object.
+int MarkCompactCollector::IterateLiveObjectsInRange(
+ Address start,
+ Address end,
+ HeapObjectCallback size_func) {
+ int live_objects = 0;
+ Address current = start;
+ while (current < end) {
+ uint32_t encoded_map = Memory::uint32_at(current);
+ if (encoded_map == kSingleFreeEncoding) {
+ current += kPointerSize;
+ } else if (encoded_map == kMultiFreeEncoding) {
+ current += Memory::int_at(current + kIntSize);
+ } else {
+ live_objects++;
+ current += size_func(HeapObject::FromAddress(current));
+ }
+ }
+ return live_objects;
+}
+
+
+int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
+ HeapObjectCallback size_f) {
+ ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+ return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
+}
+
+
+int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
+ HeapObjectCallback size_f) {
+ ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+ int total = 0;
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
+ p->AllocationTop(),
+ size_f);
+ }
+ return total;
+}
+
+
+#ifdef DEBUG
+static int VerifyMapObject(HeapObject* obj) {
+ InstanceType type = reinterpret_cast<Map*>(obj)->instance_type();
+ ASSERT(FIRST_TYPE <= type && type <= LAST_TYPE);
+ return Map::kSize;
+}
+
+
+void MarkCompactCollector::VerifyHeapAfterEncodingForwardingAddresses() {
+ Heap::new_space()->Verify();
+ Heap::old_space()->Verify();
+ Heap::code_space()->Verify();
+ Heap::map_space()->Verify();
+
+ ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+ int live_maps = IterateLiveObjects(Heap::map_space(), &VerifyMapObject);
+ ASSERT(live_maps == live_map_objects_);
+
+ // Verify page headers in paged spaces.
+ VerifyPageHeaders(Heap::old_space());
+ VerifyPageHeaders(Heap::code_space());
+ VerifyPageHeaders(Heap::map_space());
+}
+
+
+void MarkCompactCollector::VerifyPageHeaders(PagedSpace* space) {
+ PageIterator mc_it(space, PageIterator::PAGES_USED_BY_MC);
+ while (mc_it.has_next()) {
+ Page* p = mc_it.next();
+ Address mc_alloc_top = p->mc_relocation_top;
+ ASSERT(p->ObjectAreaStart() <= mc_alloc_top &&
+ mc_alloc_top <= p->ObjectAreaEnd());
+ }
+
+ int page_count = 0;
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ ASSERT(p->mc_page_index == page_count);
+ page_count++;
+
+ // first_forwarded could be 'deadbeed' if no live objects in this page
+ Address first_forwarded = p->mc_first_forwarded;
+ ASSERT(first_forwarded == kZapValue ||
+ space->Contains(first_forwarded));
+ }
+}
+#endif
+
+
+// ----------------------------------------------------------------------------
+// Phase 3: Update pointers
+
+// Helper class for updating pointers in HeapObjects.
+class UpdatingVisitor: public ObjectVisitor {
+ public:
+
+ void VisitPointer(Object** p) {
+ MarkCompactCollector::UpdatePointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Mark all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ MarkCompactCollector::UpdatePointer(p);
+ }
+ }
+};
+
+void MarkCompactCollector::UpdatePointers() {
+#ifdef DEBUG
+ ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+ state_ = UPDATE_POINTERS;
+#endif
+ UpdatingVisitor updating_visitor;
+ Heap::IterateRoots(&updating_visitor);
+ GlobalHandles::IterateWeakRoots(&updating_visitor);
+
+ int live_maps = IterateLiveObjects(Heap::map_space(),
+ &UpdatePointersInOldObject);
+ int live_olds = IterateLiveObjects(Heap::old_space(),
+ &UpdatePointersInOldObject);
+ int live_immutables = IterateLiveObjects(Heap::code_space(),
+ &UpdatePointersInOldObject);
+ int live_news = IterateLiveObjects(Heap::new_space(),
+ &UpdatePointersInNewObject);
+
+ // Large objects do not move, the map word can be updated directly.
+ LargeObjectIterator it(Heap::lo_space());
+ while (it.has_next()) UpdatePointersInNewObject(it.next());
+
+ USE(live_maps);
+ USE(live_olds);
+ USE(live_immutables);
+ USE(live_news);
+
+#ifdef DEBUG
+ ASSERT(live_maps == live_map_objects_);
+ ASSERT(live_olds == live_old_objects_);
+ ASSERT(live_immutables == live_immutable_objects_);
+ ASSERT(live_news == live_young_objects_);
+
+ if (FLAG_verify_global_gc) VerifyHeapAfterUpdatingPointers();
+#endif
+}
+
+
+int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
+ // Keep old map pointers
+ Map* old_map = obj->map();
+ ASSERT(old_map->IsHeapObject());
+
+ Address forwarded = GetForwardingAddressInOldSpace(old_map);
+
+ ASSERT(Heap::map_space()->Contains(old_map));
+ ASSERT(Heap::map_space()->Contains(forwarded));
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
+ forwarded);
+ }
+#endif
+ // Update the map pointer.
+ obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
+
+ // We have to compute the object size relying on the old map because
+ // map objects are not relocated yet.
+ int obj_size = obj->SizeFromMap(old_map);
+
+ // Update pointers in the object body.
+ UpdatingVisitor updating_visitor;
+ obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
+ return obj_size;
+}
+
+
+int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
+ // Decode the map pointer.
+ uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
+ Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+ // At this point, the first word of map_addr is also encoded, cannot
+ // cast it to Map* using Map::cast.
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
+ int obj_size = obj->SizeFromMap(map);
+ InstanceType type = map->instance_type();
+
+ // Update map pointer.
+ Address new_map_addr = GetForwardingAddressInOldSpace(map);
+ int offset = DecodeOffset(encoded);
+ encoded = EncodePointers(new_map_addr, offset);
+ obj->set_map(reinterpret_cast<Map*>(encoded));
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n", obj->address(),
+ map_addr, new_map_addr);
+ }
+#endif
+
+ // Update pointers in the object body.
+ UpdatingVisitor updating_visitor;
+ obj->IterateBody(type, obj_size, &updating_visitor);
+ return obj_size;
+}
+
+
+Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
+ // Object should either in old or map space.
+ uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
+
+ // Offset to the first live object's forwarding address.
+ int offset = DecodeOffset(encoded);
+ Address obj_addr = obj->address();
+
+ // Find the first live object's forwarding address.
+ Page* p = Page::FromAddress(obj_addr);
+ Address first_forwarded = p->mc_first_forwarded;
+
+ // Page start address of forwarded address.
+ Page* forwarded_page = Page::FromAddress(first_forwarded);
+ int forwarded_offset = forwarded_page->Offset(first_forwarded);
+
+ // Find end of allocation of in the page of first_forwarded.
+ Address mc_top = forwarded_page->mc_relocation_top;
+ int mc_top_offset = forwarded_page->Offset(mc_top);
+
+ // Check if current object's forward pointer is in the same page
+ // as the first live object's forwarding pointer
+ if (forwarded_offset + offset < mc_top_offset) {
+ // In the same page.
+ return first_forwarded + offset;
+ }
+
+ // Must be in the next page, NOTE: this may cross chunks.
+ Page* next_page = forwarded_page->next_page();
+ ASSERT(next_page->is_valid());
+
+ offset -= (mc_top_offset - forwarded_offset);
+ offset += Page::kObjectStartOffset;
+
+ ASSERT_PAGE_OFFSET(offset);
+ ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
+
+ return next_page->OffsetToAddress(offset);
+}
+
+void MarkCompactCollector::UpdatePointer(Object** p) {
+ // We need to check if p is in to_space.
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+ Address old_addr = obj->address();
+ Address new_addr;
+
+ ASSERT(!Heap::InFromSpace(obj));
+
+ if (Heap::new_space()->Contains(obj)) {
+ Address f_addr = Heap::new_space()->FromSpaceLow() +
+ Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ new_addr = Memory::Address_at(f_addr);
+
+#ifdef DEBUG
+ ASSERT(Heap::old_space()->Contains(new_addr) ||
+ Heap::code_space()->Contains(new_addr) ||
+ Heap::new_space()->FromSpaceContains(new_addr));
+
+ if (Heap::new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ }
+#endif
+
+ } else if (Heap::lo_space()->Contains(obj)) {
+ // Don't move objects in the large object space.
+ new_addr = obj->address();
+
+ } else {
+ ASSERT(Heap::old_space()->Contains(obj) ||
+ Heap::code_space()->Contains(obj) ||
+ Heap::map_space()->Contains(obj));
+
+ new_addr = GetForwardingAddressInOldSpace(obj);
+ ASSERT(Heap::old_space()->Contains(new_addr) ||
+ Heap::code_space()->Contains(new_addr) ||
+ Heap::map_space()->Contains(new_addr));
+
+#ifdef DEBUG
+ if (Heap::old_space()->Contains(obj)) {
+ ASSERT(Heap::old_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::old_space()->MCSpaceOffsetForAddress(old_addr));
+ } else if (Heap::code_space()->Contains(obj)) {
+ ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
+ } else {
+ ASSERT(Heap::map_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::map_space()->MCSpaceOffsetForAddress(old_addr));
+ }
+#endif
+ }
+
+ *p = HeapObject::FromAddress(new_addr);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n",
+ reinterpret_cast<Address>(p), old_addr, new_addr);
+ }
+#endif
+}
+
+
+#ifdef DEBUG
+void MarkCompactCollector::VerifyHeapAfterUpdatingPointers() {
+ ASSERT(state_ == UPDATE_POINTERS);
+
+ Heap::new_space()->Verify();
+ Heap::old_space()->Verify();
+ Heap::code_space()->Verify();
+ Heap::map_space()->Verify();
+
+ // We don't have object size info after updating pointers, not much we can
+ // do here.
+ VerifyPageHeaders(Heap::old_space());
+ VerifyPageHeaders(Heap::code_space());
+ VerifyPageHeaders(Heap::map_space());
+}
+#endif
+
+
+// ----------------------------------------------------------------------------
+// Phase 4: Relocate objects
+
+void MarkCompactCollector::RelocateObjects() {
+#ifdef DEBUG
+ ASSERT(state_ == UPDATE_POINTERS);
+ state_ = RELOCATE_OBJECTS;
+#endif
+ // Relocates objects, always relocate map objects first. Relocating
+ // objects in other space relies on map objects to get object size.
+ int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
+ int live_olds = IterateLiveObjects(Heap::old_space(), &RelocateOldObject);
+ int live_immutables =
+ IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
+ int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
+
+ USE(live_maps);
+ USE(live_olds);
+ USE(live_immutables);
+ USE(live_news);
+#ifdef DEBUG
+ ASSERT(live_maps == live_map_objects_);
+ ASSERT(live_olds == live_old_objects_);
+ ASSERT(live_immutables == live_immutable_objects_);
+ ASSERT(live_news == live_young_objects_);
+#endif
+
+ // Notify code object in LO to convert IC target to address
+ // This must happen after lo_space_->Compact
+ LargeObjectIterator it(Heap::lo_space());
+ while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
+
+ // Flips from and to spaces
+ Heap::new_space()->Flip();
+
+ // Sets age_mark to bottom in to space
+ Address mark = Heap::new_space()->bottom();
+ Heap::new_space()->set_age_mark(mark);
+
+ Heap::new_space()->MCCommitRelocationInfo();
+#ifdef DEBUG
+ // It is safe to write to the remembered sets as remembered sets on a
+ // page-by-page basis after committing the m-c forwarding pointer.
+ Page::set_rset_state(Page::IN_USE);
+#endif
+ Heap::map_space()->MCCommitRelocationInfo();
+ Heap::old_space()->MCCommitRelocationInfo();
+ Heap::code_space()->MCCommitRelocationInfo();
+
+#ifdef DEBUG
+ if (FLAG_verify_global_gc) VerifyHeapAfterRelocatingObjects();
+#endif
+}
+
+
+int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
+ if (obj->IsCode()) {
+ Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
+ }
+ return obj->Size();
+}
+
+
+int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
+ // decode map pointer (forwarded address)
+ uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
+ Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+ // Get forwarding address before resetting map pointer
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+ // recover map pointer
+ obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+
+ // The meta map object may not be copied yet.
+ Address old_addr = obj->address();
+
+ if (new_addr != old_addr) {
+ memmove(new_addr, old_addr, Map::kSize); // copy contents
+ }
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ }
+#endif
+
+ return Map::kSize;
+}
+
+
+int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
+ // decode map pointer (forwarded address)
+ uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
+ Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+ // Get forwarding address before resetting map pointer
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+ // recover map pointer
+ obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+
+ // This is a non-map object, it relies on the assumption that the Map space
+ // is compacted before the Old space (see RelocateObjects).
+ int obj_size = obj->Size();
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ Address old_addr = obj->address();
+
+ ASSERT(Heap::old_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::old_space()->MCSpaceOffsetForAddress(old_addr));
+
+ Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
+
+ if (new_addr != old_addr) {
+ memmove(new_addr, old_addr, obj_size); // copy contents
+ }
+
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsCode()) {
+ // may also update inline cache target.
+ Code::cast(copied_to)->Relocate(new_addr - old_addr);
+ // Notify the logger that compile code has moved.
+ LOG(CodeMoveEvent(old_addr, new_addr));
+ }
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ }
+#endif
+
+ return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
+ // decode map pointer (forwarded address)
+ uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
+ Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
+ ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+ // Get forwarding address before resetting map pointer
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+ // recover map pointer
+ obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+
+ // This is a non-map object, it relies on the assumption that the Map space
+ // is compacted before the other spaces (see RelocateObjects).
+ int obj_size = obj->Size();
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ Address old_addr = obj->address();
+
+ ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
+
+ Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
+
+ // convert inline cache target to address using old address
+ if (obj->IsCode()) {
+ // convert target to address first related to old_address
+ Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
+ }
+
+ if (new_addr != old_addr) {
+ memmove(new_addr, old_addr, obj_size); // copy contents
+ }
+
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsCode()) {
+ // may also update inline cache target.
+ Code::cast(copied_to)->Relocate(new_addr - old_addr);
+ // Notify the logger that compile code has moved.
+ LOG(CodeMoveEvent(old_addr, new_addr));
+ }
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ }
+#endif
+
+ return obj_size;
+}
+
+
+#ifdef DEBUG
+class VerifyCopyingVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ MarkCompactCollector::VerifyCopyingObjects(p);
+ }
+ }
+};
+
+#endif
+
+int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
+ int obj_size = obj->Size();
+
+ // Get forwarding address
+ Address old_addr = obj->address();
+ int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+
+ Address new_addr =
+ Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
+
+ if (Heap::new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ } else {
+ bool has_pointers = !obj->IsHeapNumber() && !obj->IsSeqString();
+ if (has_pointers) {
+ Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
+ } else {
+ Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
+ }
+ }
+
+ // New and old addresses cannot overlap.
+ memcpy(reinterpret_cast<void*>(new_addr),
+ reinterpret_cast<void*>(old_addr),
+ obj_size);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ }
+ if (FLAG_verify_global_gc) {
+ VerifyCopyingVisitor v;
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ copied_to->Iterate(&v);
+ }
+#endif
+
+ return obj_size;
+}
+
+
+#ifdef DEBUG
+void MarkCompactCollector::VerifyHeapAfterRelocatingObjects() {
+ ASSERT(state_ == RELOCATE_OBJECTS);
+
+ Heap::new_space()->Verify();
+ Heap::old_space()->Verify();
+ Heap::code_space()->Verify();
+ Heap::map_space()->Verify();
+
+ PageIterator old_it(Heap::old_space(), PageIterator::PAGES_IN_USE);
+ while (old_it.has_next()) {
+ Page* p = old_it.next();
+ ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
+ }
+
+ PageIterator code_it(Heap::code_space(), PageIterator::PAGES_IN_USE);
+ while (code_it.has_next()) {
+ Page* p = code_it.next();
+ ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
+ }
+
+ PageIterator map_it(Heap::map_space(), PageIterator::PAGES_IN_USE);
+ while (map_it.has_next()) {
+ Page* p = map_it.next();
+ ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
+ }
+}
+#endif
+
+
+#ifdef DEBUG
+void MarkCompactCollector::VerifyCopyingObjects(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+ ASSERT(!Heap::InToSpace(*p));
+}
+#endif // DEBUG
+
+
+// -----------------------------------------------------------------------------
+// Phase 5: rebuild remembered sets
+
+void MarkCompactCollector::RebuildRSets() {
+#ifdef DEBUG
+ ASSERT(state_ == RELOCATE_OBJECTS);
+ state_ = REBUILD_RSETS;
+#endif
+ Heap::RebuildRSets();
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARK_COMPACT_H_
+#define V8_MARK_COMPACT_H_
+
+namespace v8 { namespace internal {
+
+// Callback function, returns whether an object is alive. The heap size
+// of the object is returned in size. It optionally updates the offset
+// to the first live object in the page (only used for old and map objects).
+typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
+
+// Callback function for non-live blocks in the old generation.
+typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
+
+
+// ----------------------------------------------------------------------------
+// Mark-Compact collector
+//
+// All methods are static.
+
+class MarkCompactCollector : public AllStatic {
+ public:
+ // Type of functions to compute forwarding addresses of objects in
+ // compacted spaces. Given an object and its size, return a (non-failure)
+ // Object* that will be the object after forwarding. There is a separate
+ // allocation function for each (compactable) space based on the location
+ // of the object before compaction.
+ typedef Object* (*AllocationFunction)(HeapObject* object, int object_size);
+
+ // Type of functions to encode the forwarding address for an object.
+ // Given the object, its size, and the new (non-failure) object it will be
+ // forwarded to, encode the forwarding address. For paged spaces, the
+ // 'offset' input/output parameter contains the offset of the forwarded
+ // object from the forwarding address of the previous live object in the
+ // page as input, and is updated to contain the offset to be used for the
+ // next live object in the same page. For spaces using a different
+ // encoding (ie, contiguous spaces), the offset parameter is ignored.
+ typedef void (*EncodingFunction)(HeapObject* old_object,
+ int object_size,
+ Object* new_object,
+ int* offset);
+
+ // Type of functions to process non-live objects.
+ typedef void (*ProcessNonLiveFunction)(HeapObject* object);
+
+ // Performs a global garbage collection.
+ static void CollectGarbage();
+
+ // True if the last full GC performed heap compaction.
+ static bool HasCompacted() { return compacting_collection_; }
+
+ // True after the Prepare phase if the compaction is taking place.
+ static bool IsCompacting() { return compacting_collection_; }
+
+#ifdef DEBUG
+ // Checks whether performing mark-compact collection.
+ static bool in_use() { return state_ > PREPARE_GC; }
+#endif
+
+ private:
+#ifdef DEBUG
+ enum CollectorState {
+ IDLE,
+ PREPARE_GC,
+ MARK_LIVE_OBJECTS,
+ SWEEP_SPACES,
+ ENCODE_FORWARDING_ADDRESSES,
+ UPDATE_POINTERS,
+ RELOCATE_OBJECTS,
+ REBUILD_RSETS
+ };
+
+ // The current stage of the collector.
+ static CollectorState state_;
+#endif
+ // Global flag indicating whether spaces were compacted on the last GC.
+ static bool compacting_collection_;
+
+ // Prepares for GC by resetting relocation info in old and map spaces and
+ // choosing spaces to compact.
+ static void Prepare();
+
+ // Finishes GC, performs heap verification.
+ static void Finish();
+
+ // --------------------------------------------------------------------------
+ // Phase 1: functions related to marking phase.
+ // before: Heap is in normal state, collector is 'IDLE'.
+ //
+ // The first word of a page in old spaces has the end of
+ // allocation address of the page.
+ //
+ // The word at Chunk::high_ address has the address of the
+ // first page in the next chunk. (The address is tagged to
+ // distinguish it from end-of-allocation address).
+ //
+ // after: live objects are marked.
+
+ friend class MarkingVisitor;
+
+ // Marking operations for objects reachable from roots.
+ static void MarkLiveObjects();
+ static void UnmarkLiveObjects();
+
+ // Visit overflowed object, push overflowed object on the marking stack and
+ // clear the overflow bit. If the marking stack is overflowed during this
+ // process, return false;
+ static bool VisitOverflowedObject(HeapObject* obj);
+
+ static void MarkUnmarkedObject(HeapObject* obj);
+
+ static inline void MarkObject(HeapObject* obj) {
+ if (!is_marked(obj)) MarkUnmarkedObject(obj);
+ }
+
+ static void MarkObjectsReachableFromTopFrame();
+
+ // Callback function for telling whether the object *p must be marked.
+ static bool MustBeMarked(Object** p);
+
+#ifdef DEBUG
+ static void UpdateLiveObjectCount(HeapObject* obj);
+ static void VerifyHeapAfterMarkingPhase();
+#endif
+
+ // We sweep the large object space in the same way whether we are
+ // compacting or not, because the large object space is never compacted.
+ static void SweepLargeObjectSpace();
+
+ // --------------------------------------------------------------------------
+ // Phase 2: functions related to computing and encoding forwarding pointers
+ // before: live objects' map pointers are marked as '00'
+ // after: Map pointers of live old and map objects have encoded
+ // forwarding pointers and map pointers
+ //
+ // The 3rd word of a page has the page top offset after compaction.
+ //
+ // The 4th word of a page in the map space has the map index
+ // of this page in the map table. This word is not used in
+ // the old space.
+ //
+ // The 5th and 6th words of a page have the start and end
+ // addresses of the first free region in the page.
+ //
+ // The 7th word of a page in old spaces has the forwarding address
+ // of the first live object in the page.
+ //
+ // Live young objects have their forwarding pointers in
+ // the from space at the same offset to the beginning of the space.
+
+ // Encodes forwarding addresses of objects in compactable parts of the
+ // heap.
+ static void EncodeForwardingAddresses();
+
+ // Encodes the forwarding addresses of objects in new space.
+ static void EncodeForwardingAddressesInNewSpace();
+
+ // Function template to encode the forwarding addresses of objects in
+ // paged spaces, parameterized by allocation and non-live processing
+ // functions.
+ template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
+ static void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
+
+ // Iterates live objects in a space, passes live objects
+ // to a callback function which returns the heap size of the object.
+ // Returns the number of live objects iterated.
+ static int IterateLiveObjects(NewSpace* space, HeapObjectCallback size_f);
+ static int IterateLiveObjects(PagedSpace* space, HeapObjectCallback size_f);
+
+ // Iterates the live objects between a range of addresses, returning the
+ // number of live objects.
+ static int IterateLiveObjectsInRange(Address start, Address end,
+ HeapObjectCallback size_func);
+
+ // Callback functions for deallocating non-live blocks in the old
+ // generation.
+ static void DeallocateOldBlock(Address start, int size_in_bytes);
+ static void DeallocateCodeBlock(Address start, int size_in_bytes);
+ static void DeallocateMapBlock(Address start, int size_in_bytes);
+
+ // Phase 2: If we are not compacting the heap, we simply sweep the spaces
+ // except for the large object space, clearing mark bits and adding
+ // unmarked regions to each space's free list.
+ static void SweepSpaces();
+
+#ifdef DEBUG
+ static void VerifyHeapAfterEncodingForwardingAddresses();
+#endif
+
+ // --------------------------------------------------------------------------
+ // Phase 3: function related to updating pointers and decode map pointers
+ // before: see after phase 2
+ // after: all pointers are updated to forwarding addresses.
+
+ friend class UpdatingVisitor; // helper for updating visited objects
+
+ // Updates pointers in all spaces.
+ static void UpdatePointers();
+
+ // Updates pointers in an object in new space.
+ // Returns the heap size of the object.
+ static int UpdatePointersInNewObject(HeapObject* obj);
+
+ // Updates pointers in an object in old spaces.
+ // Returns the heap size of the object.
+ static int UpdatePointersInOldObject(HeapObject* obj);
+
+ // Updates the pointer in a slot.
+ static void UpdatePointer(Object** p);
+
+ // Calculates the forwarding address of an object in an old space.
+ static Address GetForwardingAddressInOldSpace(HeapObject* obj);
+
+#ifdef DEBUG
+ static void VerifyHeapAfterUpdatingPointers();
+#endif
+
+ // --------------------------------------------------------------------------
+ // Phase 4: functions related to relocating objects
+ // before: see after phase 3
+ // after: heap is in a normal state, except remembered set is not built
+
+ // Relocates objects in all spaces.
+ static void RelocateObjects();
+
+ // Converts a code object's inline target to addresses, convention from
+ // address to target happens in the marking phase.
+ static int ConvertCodeICTargetToAddress(HeapObject* obj);
+
+ // Relocate a map object.
+ static int RelocateMapObject(HeapObject* obj);
+
+ // Relocates an old object.
+ static int RelocateOldObject(HeapObject* obj);
+
+ // Relocates an immutable object in the code space.
+ static int RelocateCodeObject(HeapObject* obj);
+
+ // Copy a new object.
+ static int RelocateNewObject(HeapObject* obj);
+
+#ifdef DEBUG
+ static void VerifyHeapAfterRelocatingObjects();
+#endif
+
+ // ---------------------------------------------------------------------------
+ // Phase 5: functions related to rebuilding remembered sets
+
+ // Rebuild remembered set in old and map spaces.
+ static void RebuildRSets();
+
+#ifdef DEBUG
+ // ---------------------------------------------------------------------------
+ // Debugging variables, functions and classes
+ // Counters used for debugging the marking phase of mark-compact or
+ // mark-sweep collection.
+
+ // Number of live objects in Heap::to_space_.
+ static int live_young_objects_;
+
+ // Number of live objects in Heap::old_space_.
+ static int live_old_objects_;
+
+ // Number of live objects in Heap::code_space_.
+ static int live_immutable_objects_;
+
+ // Number of live objects in Heap::map_space_.
+ static int live_map_objects_;
+
+ // Number of live objects in Heap::lo_space_.
+ static int live_lo_objects_;
+
+ // Number of live bytes in this collection.
+ static int live_bytes_;
+
+ static void VerifyPageHeaders(PagedSpace* space);
+
+ // Verification functions when relocating objects.
+ friend class VerifyCopyingVisitor;
+ static void VerifyCopyingObjects(Object** p);
+
+ friend class MarkObjectVisitor;
+ static void VisitObject(HeapObject* obj);
+
+ friend class UnmarkObjectVisitor;
+ static void UnmarkObject(HeapObject* obj);
+#endif
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MARK_COMPACT_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Keep reference to original values of some global properties. This
+// has the added benefit that the code in this file is isolated from
+// changes to these properties.
+const $Infinity = global.Infinity;
+
+
+// Instance class name can only be set on functions. That is the only
+// purpose for MathConstructor.
+function MathConstructor() {};
+%FunctionSetInstanceClassName(MathConstructor, 'Math');
+const $Math = new MathConstructor();
+$Math.__proto__ = global.Object.prototype;
+%AddProperty(global, "Math", $Math, DONT_ENUM);
+
+
+// TODO(1240985): Make this work without arguments to the runtime
+// call.
+function $Math_random() { return %Math_random(0); }
+%AddProperty($Math, "random", $Math_random, DONT_ENUM);
+
+function $Math_abs(x) { return %Math_abs(ToNumber(x)); }
+%AddProperty($Math, "abs", $Math_abs, DONT_ENUM);
+
+function $Math_acos(x) { return %Math_acos(ToNumber(x)); }
+%AddProperty($Math, "acos", $Math_acos, DONT_ENUM);
+
+function $Math_asin(x) { return %Math_asin(ToNumber(x)); }
+%AddProperty($Math, "asin", $Math_asin, DONT_ENUM);
+
+function $Math_atan(x) { return %Math_atan(ToNumber(x)); }
+%AddProperty($Math, "atan", $Math_atan, DONT_ENUM);
+
+function $Math_ceil(x) { return %Math_ceil(ToNumber(x)); }
+%AddProperty($Math, "ceil", $Math_ceil, DONT_ENUM);
+
+function $Math_cos(x) { return %Math_cos(ToNumber(x)); }
+%AddProperty($Math, "cos", $Math_cos, DONT_ENUM);
+
+function $Math_exp(x) { return %Math_exp(ToNumber(x)); }
+%AddProperty($Math, "exp", $Math_exp, DONT_ENUM);
+
+function $Math_floor(x) { return %Math_floor(ToNumber(x)); }
+%AddProperty($Math, "floor", $Math_floor, DONT_ENUM);
+
+function $Math_log(x) { return %Math_log(ToNumber(x)); }
+%AddProperty($Math, "log", $Math_log, DONT_ENUM);
+
+function $Math_round(x) { return %Math_round(ToNumber(x)); }
+%AddProperty($Math, "round", $Math_round, DONT_ENUM);
+
+function $Math_sin(x) { return %Math_sin(ToNumber(x)); }
+%AddProperty($Math, "sin", $Math_sin, DONT_ENUM);
+
+function $Math_sqrt(x) { return %Math_sqrt(ToNumber(x)); }
+%AddProperty($Math, "sqrt", $Math_sqrt, DONT_ENUM);
+
+function $Math_tan(x) { return %Math_tan(ToNumber(x)); }
+%AddProperty($Math, "tan", $Math_tan, DONT_ENUM);
+
+function $Math_atan2(x, y) { return %Math_atan2(ToNumber(x), ToNumber(y)); }
+%AddProperty($Math, "atan2", $Math_atan2, DONT_ENUM);
+
+function $Math_pow(x, y) { return %Math_pow(ToNumber(x), ToNumber(y)); }
+%AddProperty($Math, "pow", $Math_pow, DONT_ENUM);
+
+function $Math_max(arg1, arg2) { // length == 2
+ var r = -$Infinity;
+ for (var i = %_ArgumentsLength() - 1; i >= 0; --i) {
+ var n = ToNumber(%_Arguments(i));
+ if (%NumberIsNaN(n)) return n;
+ // Make sure +0 is consider greater than -0.
+ if (n > r || (n === 0 && r === 0 && (1 / n) > (1 / r))) r = n;
+ }
+ return r;
+}
+%AddProperty($Math, "max", $Math_max, DONT_ENUM);
+
+function $Math_min(arg1, arg2) { // length == 2
+ var r = $Infinity;
+ for (var i = %_ArgumentsLength() - 1; i >= 0; --i) {
+ var n = ToNumber(%_Arguments(i));
+ if (%NumberIsNaN(n)) return n;
+ // Make sure -0 is consider less than +0.
+ if (n < r || (n === 0 && r === 0 && (1 / n) < (1 / r))) r = n;
+ }
+ return r;
+}
+%AddProperty($Math, "min", $Math_min, DONT_ENUM);
+
+
+// ECMA-262, section 15.8.1.1.
+%AddProperty($Math, "E", 2.7182818284590452354, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262, section 15.8.1.2.
+%AddProperty($Math, "LN10", 2.302585092994046, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262, section 15.8.1.3.
+%AddProperty($Math, "LN2", 0.6931471805599453, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262, section 15.8.1.4.
+%AddProperty($Math, "LOG2E", 1.4426950408889634, DONT_ENUM | DONT_DELETE | READ_ONLY);
+%AddProperty($Math, "LOG10E", 0.43429448190325176, DONT_ENUM | DONT_DELETE | READ_ONLY);
+%AddProperty($Math, "PI", 3.1415926535897932, DONT_ENUM | DONT_DELETE | READ_ONLY);
+%AddProperty($Math, "SQRT1_2", 0.7071067811865476, DONT_ENUM | DONT_DELETE | READ_ONLY);
+%AddProperty($Math, "SQRT2", 1.4142135623730951, DONT_ENUM | DONT_DELETE | READ_ONLY);
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MEMORY_H_
+#define V8_MEMORY_H_
+
+namespace v8 { namespace internal {
+
+// Memory provides an interface to 'raw' memory. It encapsulates the casts
+// that typically are needed when incompatible pointer types are used.
+
+class Memory {
+ public:
+ static uint32_t& uint32_at(Address addr) {
+ return *reinterpret_cast<uint32_t*>(addr);
+ }
+
+ static int32_t& int32_at(Address addr) {
+ return *reinterpret_cast<int32_t*>(addr);
+ }
+
+ static int& int_at(Address addr) {
+ return *reinterpret_cast<int*>(addr);
+ }
+
+ static Address& Address_at(Address addr) {
+ return *reinterpret_cast<Address*>(addr);
+ }
+
+ static Object*& Object_at(Address addr) {
+ return *reinterpret_cast<Object**>(addr);
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MEMORY_H_
--- /dev/null
+
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "execution.h"
+#include "spaces-inl.h"
+#include "top.h"
+
+namespace v8 { namespace internal {
+
+
+// If no message listeners have been registered this one is called
+// by default.
+void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
+ Handle<Object> message_obj) {
+ SmartPointer<char> str = GetLocalizedMessage(message_obj);
+ if (loc == NULL) {
+ PrintF("%s\n", *str);
+ } else {
+ HandleScope scope;
+ Handle<Object> data(loc->script()->name());
+ SmartPointer<char> data_str = NULL;
+ if (data->IsString())
+ data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
+ PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
+ loc->start_pos(), *str);
+ }
+}
+
+
+void MessageHandler::ReportMessage(const char* msg) {
+ PrintF("%s\n", msg);
+}
+
+
+void MessageHandler::ReportMessage(const char* type, MessageLocation* loc,
+ Vector< Handle<Object> > args) {
+ // Build error message object
+ HandleScope scope;
+ Handle<Object> type_str = Factory::LookupAsciiSymbol(type);
+ Handle<Object> array = Factory::NewJSArray(args.length());
+ for (int i = 0; i < args.length(); i++)
+ SetElement(Handle<JSArray>::cast(array), i, args[i]);
+
+ Handle<JSFunction> fun(Top::global_context()->make_message_fun());
+ int start, end;
+ Handle<Object> script;
+ if (loc) {
+ start = loc->start_pos();
+ end = loc->end_pos();
+ script = GetScriptWrapper(loc->script());
+ } else {
+ start = end = 0;
+ script = Factory::undefined_value();
+ }
+ Handle<Object> start_handle(Smi::FromInt(start));
+ Handle<Object> end_handle(Smi::FromInt(end));
+ const int argc = 5;
+ Object** argv[argc] = { type_str.location(),
+ array.location(),
+ start_handle.location(),
+ end_handle.location(),
+ script.location() };
+
+ bool caught_exception = false;
+ Handle<Object> message =
+ Execution::TryCall(fun, Factory::undefined_value(), argc, argv,
+ &caught_exception);
+ // If creating the message (in JS code) resulted in an exception, we
+ // skip doing the callback. This usually only happens in case of
+ // stack overflow exceptions being thrown by the parser when the
+ // stack is almost full.
+ if (caught_exception) return;
+
+ v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
+
+ v8::NeanderArray global_listeners(Factory::message_listeners());
+ int global_length = global_listeners.length();
+ if (global_length == 0) {
+ DefaultMessageReport(loc, message);
+ } else {
+ for (int i = 0; i < global_length; i++) {
+ HandleScope scope;
+ if (global_listeners.get(i)->IsUndefined()) continue;
+ v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
+ Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
+ v8::MessageCallback callback =
+ FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
+ Handle<Object> callback_data(listener.get(1));
+ callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+ }
+ }
+}
+
+
+Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
+ Handle<String> fmt_str = Factory::LookupAsciiSymbol("FormatMessage");
+ Handle<JSFunction> fun =
+ Handle<JSFunction>(
+ JSFunction::cast(
+ Top::security_context_builtins()->GetProperty(*fmt_str)));
+ Object** argv[1] = { data.location() };
+
+ bool caught_exception;
+ Handle<Object> result =
+ Execution::TryCall(fun, Top::security_context_builtins(), 1, argv,
+ &caught_exception);
+
+ if (caught_exception || !result->IsString()) {
+ return Factory::LookupAsciiSymbol("<error>");
+ }
+ Handle<String> result_string = Handle<String>::cast(result);
+ // A string that has been obtained from JS code in this way is
+ // likely to be a complicated ConsString of some sort. We flatten it
+ // here to improve the efficiency of converting it to a C string and
+ // other operations that are likely to take place (see GetLocalizedMessage
+ // for example).
+ FlattenString(result_string);
+ return result_string;
+}
+
+
+SmartPointer<char> MessageHandler::GetLocalizedMessage(Handle<Object> data) {
+ HandleScope scope;
+ return GetMessage(data)->ToCString(DISALLOW_NULLS);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The infrastructure used for (localized) message reporting in V8.
+//
+// Note: there's a big unresolved issue about ownership of the data
+// structures used by this framework.
+
+#ifndef V8_MESSAGES_H_
+#define V8_MESSAGES_H_
+
+#include "handles-inl.h"
+
+// Forward declaration of MessageLocation.
+namespace v8 { namespace internal {
+class MessageLocation;
+} } // namespace v8::internal
+
+
+class V8Message {
+ public:
+ V8Message(char* type,
+ v8::internal::Handle<v8::internal::JSArray> args,
+ const v8::internal::MessageLocation* loc) :
+ type_(type), args_(args), loc_(loc) { }
+ char* type() const { return type_; }
+ v8::internal::Handle<v8::internal::JSArray> args() const { return args_; }
+ const v8::internal::MessageLocation* loc() const { return loc_; }
+ private:
+ char* type_;
+ v8::internal::Handle<v8::internal::JSArray> const args_;
+ const v8::internal::MessageLocation* loc_;
+};
+
+
+namespace v8 { namespace internal {
+
+struct Language;
+class SourceInfo;
+
+class MessageLocation {
+ public:
+ MessageLocation(Handle<Script> script,
+ int start_pos,
+ int end_pos)
+ : script_(script),
+ start_pos_(start_pos),
+ end_pos_(end_pos) { }
+
+ Handle<Script> script() const { return script_; }
+ int start_pos() const { return start_pos_; }
+ int end_pos() const { return end_pos_; }
+
+ private:
+ Handle<Script> script_;
+ int start_pos_;
+ int end_pos_;
+};
+
+
+// A message handler is a convenience interface for accessing the list
+// of message listeners registered in an environment
+class MessageHandler {
+ public:
+ // Report a message (w/o JS heap allocation).
+ static void ReportMessage(const char* msg);
+
+ // Report a formatted message (needs JS allocation).
+ static void ReportMessage(const char* type,
+ MessageLocation* loc,
+ Vector< Handle<Object> > args);
+
+ static void DefaultMessageReport(const MessageLocation* loc,
+ Handle<Object> message_obj);
+ static Handle<String> GetMessage(Handle<Object> data);
+ static SmartPointer<char> GetLocalizedMessage(Handle<Object> data);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MESSAGES_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// -------------------------------------------------------------------
+
+const kVowelSounds = {a: true, e: true, i: true, o: true, u: true, y: true};
+const kCapitalVowelSounds = {a: true, e: true, i: true, o: true, u: true,
+ h: true, f: true, l: true, m: true, n: true, r: true, s: true, x: true,
+ y: true};
+
+function GetInstanceName(cons) {
+ if (cons.length == 0) {
+ return "";
+ }
+ var first = cons.charAt(0).toLowerCase();
+ var mapping = kVowelSounds;
+ if (cons.length > 1 && (cons.charAt(0) != first)) {
+ // First char is upper case
+ var second = cons.charAt(1).toLowerCase();
+ // Second char is upper case
+ if (cons.charAt(1) != second)
+ mapping = kCapitalVowelSounds;
+ }
+ var s = mapping[first] ? "an " : "a ";
+ return s + cons;
+};
+
+
+const kMessages = {
+ // Error
+ cyclic_proto: "Cyclic __proto__ value",
+ // TypeError
+ unexpected_token: "Unexpected token %0",
+ unexpected_token_number: "Unexpected number",
+ unexpected_token_string: "Unexpected string",
+ unexpected_token_identifier: "Unexpected identifier",
+ unexpected_eos: "Unexpected end of input",
+ expected_label: "Expected label",
+ malformed_regexp: "Invalid regular expression: /%0/: %1",
+ unterminated_regexp: "Invalid regular expression: missing /",
+ pcre_error: "PCRE function %0, error code %1",
+ regexp_flags: "Cannot supply flags when constructing one RegExp from another",
+ invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
+ invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
+ invalid_lhs_in_postfix_op: "Invalid left-hand side expression in postfix operation",
+ invalid_lhs_in_prefix_op: "Invalid left-hand side expression in prefix operation",
+ multiple_defaults_in_switch: "More than one default clause in switch statement",
+ newline_after_throw: "Illegal newline after throw",
+ redeclaration: "%0 '%1' has already been declared",
+ no_catch_or_finally: "Missing catch or finally after try",
+ unknown_label: "Undefined label '%0'",
+ invalid_break: "Invalid break statement",
+ invalid_continue: "Invalid continue statement",
+ uncaught_exception: "Uncaught %0",
+ stack_trace: "Stack Trace:\n%0",
+ called_non_callable: "%0 is not a function",
+ undefined_method: "Object %1 has no method '%0'",
+ property_not_function: "Property '%0' of object %1 is not a function",
+ null_or_undefined: "Cannot access property of null or undefined",
+ cannot_convert_to_primitive: "Cannot convert object to primitive value",
+ not_constructor: "%0 is not a constructor",
+ not_defined: "%0 is not defined",
+ non_object_property_load: "Cannot read property '%0' of %1",
+ non_object_property_store: "Cannot set property '%0' of %1",
+ non_object_property_call: "Cannot call method '%0' of %1",
+ illegal_eval: "Unsupported indirect eval() call",
+ with_expression: "%0 has no properties",
+ illegal_invocation: "Illegal invocation",
+ no_setter_in_callback: "Cannot set property %0 of %1 which has only a getter",
+ apply_non_function: "Function.prototype.apply was called on %0, which is a %1 and not a function",
+ apply_wrong_args: "Function.prototype.apply: Arguments list has wrong type",
+ invalid_in_operator_use: "Cannot use 'in' operator to search for '%0' in %1",
+ instanceof_function_expected: "Expecting a function in instanceof check, but got %0",
+ instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
+ null_to_object: "Cannot convert null to object",
+ // RangeError
+ invalid_array_length: "Invalid array length",
+ invalid_array_apply_length: "Function.prototype.apply supports only up to 1024 arguments",
+ stack_overflow: "Maximum call stack size exceeded",
+ apply_overflow: "Function.prototype.apply cannot support %0 arguments",
+ // SyntaxError
+ unable_to_parse: "Parse error",
+ duplicate_regexp_flag: "Duplicate RegExp flag %0",
+ unrecognized_regexp_flag: "Unrecognized RegExp flag %0",
+ invalid_regexp: "Invalid RegExp pattern /%0/",
+ illegal_break: "Illegal break statement",
+ illegal_continue: "Illegal continue statement",
+ illegal_return: "Illegal return statement",
+ error_loading_debugger: "Error loading debugger %0",
+};
+
+
+function FormatString(format, args) {
+ var result = format;
+ for (var i = 0; i < args.length; i++) {
+ var str;
+ try { str = ToDetailString(args[i]); }
+ catch (e) { str = "#<error>"; }
+ result = result.split("%" + i).join(str);
+ }
+ return result;
+};
+
+
+function ToDetailString(obj) {
+ if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
+ var constructor = obj.constructor;
+ if (!constructor) return ToString(obj);
+ var constructorName = constructor.name;
+ if (!constructorName) return ToString(obj);
+ return "#<" + GetInstanceName(constructorName) + ">";
+ } else {
+ return ToString(obj);
+ }
+};
+
+
+function MakeGenericError(constructor, type, args) {
+ if (args instanceof $Array) {
+ for (var i = 0; i < args.length; i++) {
+ var elem = args[i];
+ if (elem instanceof $Array && elem.length > 100) { // arbitrary limit, grab a reasonable slice to report
+ args[i] = elem.slice(0,20).concat("...");
+ }
+ }
+ }
+
+ var e = new constructor();
+ e.type = type;
+ e.arguments = args;
+ return e;
+};
+
+
+/**
+ * Setup the Script function and constructor.
+ */
+%FunctionSetInstanceClassName(Script, 'Script');
+%AddProperty(Script.prototype, 'constructor', Script, DONT_ENUM);
+%SetCode(Script, function(x) {
+ // Script objects can only be created by the VM.
+ throw new $Error("Not supported");
+});
+
+
+// Helper functions; called from the runtime system.
+function FormatMessage(message) {
+ var format = kMessages[message.type];
+ if (!format) return "<unknown message " + message.type + ">";
+ return FormatString(format, message.args);
+};
+
+
+function GetLineNumber(message) {
+ if (message.startPos == -1) return -1;
+ var location = message.script.locationFromPosition(message.startPos);
+ if (location == null) return -1;
+ return location.line + 1;
+};
+
+
+// Returns the source code line containing the given source
+// position, or the empty string if the position is invalid.
+function GetSourceLine(message) {
+ var location = message.script.locationFromPosition(message.startPos);
+ if (location == null) return "";
+ location.restrict();
+ return location.sourceText();
+};
+
+
+function MakeTypeError(type, args) {
+ return MakeGenericError($TypeError, type, args);
+};
+
+
+function MakeRangeError(type, args) {
+ return MakeGenericError($RangeError, type, args);
+};
+
+
+function MakeSyntaxError(type, args) {
+ return MakeGenericError($SyntaxError, type, args);
+};
+
+
+function MakeReferenceError(type, args) {
+ return MakeGenericError($ReferenceError, type, args);
+};
+
+
+function MakeEvalError(type, args) {
+ return MakeGenericError($EvalError, type, args);
+};
+
+
+function MakeError(type, args) {
+ return MakeGenericError($Error, type, args);
+};
+
+
+/**
+ * Initialize the cached source information in a script. Currently all line
+ * end positions are cached.
+ */
+Script.prototype.initSourceInfo_ = function () {
+ // Just return if initialized.
+ if (this.lineEnds_) return;
+
+ // Collect all line endings.
+ this.lineEnds_ = [];
+ for (var i = 0; i < this.source.length; i++) {
+ var current = this.source.charAt(i);
+ if (current == '\n') {
+ this.lineEnds_.push(i);
+ }
+ }
+
+ // If the script does not end with a line ending add the final end position
+ // as just past the last line ending.
+ if (this.lineEnds_[this.lineEnds_.length - 1] != this.source.length - 1) {
+ this.lineEnds_.push(this.source.length);
+ }
+};
+
+
+/**
+ * Get information on a specific source position.
+ * @param {number} position The source position
+ * @return {SourceLocation}
+ * If line is negative or not in the source null is returned.
+ */
+Script.prototype.locationFromPosition = function (position) {
+ // Make sure source info has been initialized.
+ this.initSourceInfo_();
+
+ var lineCount = this.lineCount();
+ var line = -1;
+ if (position <= this.lineEnds_[0]) {
+ line = 0;
+ } else {
+ for (var i = 1; i < lineCount; i++) {
+ if (this.lineEnds_[i - 1] < position && position <= this.lineEnds_[i]) {
+ line = i;
+ break;
+ }
+ }
+ }
+
+ if (line == -1) return null;
+
+ // Determine start, end and column.
+ var start = line == 0 ? 0 : this.lineEnds_[line - 1] + 1;
+ var end = this.lineEnds_[line];
+ if (end > 0 && this.source.charAt(end - 1) == '\r') end--;
+ var column = position - start;
+
+ // Adjust according to the offset within the resource.
+ line += this.line_offset;
+ if (line == this.line_offset) {
+ column += this.column_offset;
+ }
+
+ return new SourceLocation(this, position, line, column, start, end);
+};
+
+
+/**
+ * Get information on a specific source line and column possibly offset by a
+ * fixed source position. This function is used to find a source position from
+ * a line and column position. The fixed source position offset is typically
+ * used to find a source position in a function based on a line and column in
+ * the source for the function alone. The offset passed will then be the
+ * start position of the source for the function within the full script source.
+ * @param {number} opt_line The line within the source. Default value is 0
+ * @param {number} opt_column The column in within the line. Default value is 0
+ * @param {number} opt_offset_position The offset from the begining of the
+ * source from where the line and column calculation starts. Default value is 0
+ * @return {SourceLocation}
+ * If line is negative or not in the source null is returned.
+ */
+Script.prototype.locationFromLine = function (opt_line, opt_column, opt_offset_position) {
+ // Make soure source info has been initialized.
+ this.initSourceInfo_();
+
+ // Default is the first line in the script. Lines in the script is relative
+ // to the offset within the resource.
+ var line = 0;
+ if (!IS_UNDEFINED(opt_line)) {
+ line = opt_line - this.line_offset;
+ }
+
+ // Default is first column. If on the first line add the offset within the
+ // resource.
+ var column = opt_column || 0;
+ if (line == 0) {
+ column -= this.column_offset
+ }
+
+ var offset_position = opt_offset_position || 0;
+ if (line < 0 || column < 0 || offset_position < 0) return null;
+ if (line == 0) {
+ return this.locationFromPosition(offset_position + column);
+ } else {
+ // Find the line where the offset position is located
+ var lineCount = this.lineCount();
+ var offset_line;
+ for (var i = 0; i < lineCount; i++) {
+ if (offset_position <= this.lineEnds_[i]) {
+ offset_line = i;
+ break;
+ }
+ }
+ if (offset_line + line >= lineCount) return null;
+ return this.locationFromPosition(this.lineEnds_[offset_line + line - 1] + 1 + column); // line > 0 here.
+ }
+}
+
+
+/**
+ * Get a slice of source code from the script. The boundaries for the slice is
+ * specified in lines.
+ * @param {number} opt_from_line The first line (zero bound) in the slice.
+ * Default is 0
+ * @param {number} opt_to_column The last line (zero bound) in the slice (non
+ * inclusive). Default is the number of lines in the script
+ * @return {SourceSlice} The source slice or null of the parameters where
+ * invalid
+ */
+Script.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
+ // Make soure source info has been initialized.
+ this.initSourceInfo_();
+
+ var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
+ var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
+
+ // Adjust according to the offset within the resource.
+ from_line -= this.line_offset;
+ to_line -= this.line_offset;
+ if (from_line < 0) from_line = 0;
+ if (to_line > this.lineCount()) to_line = this.lineCount();
+
+ // Check parameters.
+ if (from_line >= this.lineCount() ||
+ to_line < 0 ||
+ from_line > to_line) {
+ return null;
+ }
+
+ var from_position = from_line == 0 ? 0 : this.lineEnds_[from_line - 1] + 1;
+ var to_position = to_line == 0 ? 0 : this.lineEnds_[to_line - 1] + 1;
+
+ // Return a source slice with line numbers re-adjusted to the resource.
+ return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
+ from_position, to_position);
+}
+
+
+Script.prototype.sourceLine = function (opt_line) {
+ // Default is the first line in the script. Lines in the script are relative
+ // to the offset within the resource.
+ var line = 0;
+ if (!IS_UNDEFINED(opt_line)) {
+ line = opt_line - this.line_offset;
+ }
+
+ // Check parameter.
+ if (line < 0 || this.lineCount() <= line) {
+ return null;
+ }
+
+ // Return the source line.
+ var start = line == 0 ? 0 : this.lineEnds_[line - 1] + 1;
+ var end = this.lineEnds_[line];
+ return this.source.substring(start, end);
+}
+
+
+/**
+ * Returns the number of source lines.
+ * @return {number}
+ * Number of source lines.
+ */
+Script.prototype.lineCount = function() {
+ // Make soure source info has been initialized.
+ this.initSourceInfo_();
+
+ // Return number of source lines.
+ return this.lineEnds_.length;
+};
+
+
+/**
+ * Class for source location. A source location is a position within some
+ * source with the following properties:
+ * script : script object for the source
+ * line : source line number
+ * column : source column within the line
+ * position : position within the source
+ * start : position of start of source context (inclusive)
+ * end : position of end of source context (not inclusive)
+ * Source text for the source context is the character interval [start, end[. In
+ * most cases end will point to a newline character. It might point just past
+ * the final position of the source if the last source line does not end with a
+ * newline character.
+ * @param {Script} script The Script object for which this is a location
+ * @param {number} position Source position for the location
+ * @param {number} line The line number for the location
+ * @param {number} column The column within the line for the location
+ * @param {number} start Source position for start of source context
+ * @param {number} end Source position for end of source context
+ * @constructor
+ */
+function SourceLocation(script, position, line, column, start, end) {
+ this.script = script;
+ this.position = position;
+ this.line = line;
+ this.column = column;
+ this.start = start;
+ this.end = end;
+};
+
+
+const kLineLengthLimit = 78;
+
+/**
+ * Restrict source location start and end positions to make the source slice
+ * no more that a certain number of characters wide.
+ * @param {number} opt_limit The with limit of the source text with a default
+ * of 78
+ * @param {number} opt_before The number of characters to prefer before the
+ * position with a default value of 10 less that the limit
+ */
+SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
+ // Find the actual limit to use.
+ var limit;
+ var before;
+ if (!IS_UNDEFINED(opt_limit)) {
+ limit = opt_limit;
+ } else {
+ limit = kLineLengthLimit;
+ }
+ if (!IS_UNDEFINED(opt_before)) {
+ before = opt_before;
+ } else {
+ // If no before is specified center for small limits and perfer more source
+ // before the the position that after for longer limits.
+ if (limit <= 20) {
+ before = $Math_floor(limit / 2);
+ } else {
+ before = limit - 10;
+ }
+ }
+ if (before >= limit) {
+ before = limit - 1;
+ }
+
+ // If the [start, end[ interval is too big we restrict
+ // it in one or both ends. We make sure to always produce
+ // restricted intervals of maximum allowed size.
+ if (this.end - this.start > limit) {
+ var start_limit = this.position - before;
+ var end_limit = this.position + limit - before;
+ if (this.start < start_limit && end_limit < this.end) {
+ this.start = start_limit;
+ this.end = end_limit;
+ } else if (this.start < start_limit) {
+ this.start = this.end - limit;
+ } else {
+ this.end = this.start + limit;
+ }
+ }
+};
+
+
+/**
+ * Get the source text for a SourceLocation
+ * @return {String}
+ * Source text for this location.
+ */
+SourceLocation.prototype.sourceText = function () {
+ return this.script.source.substring(this.start, this.end);
+};
+
+
+/**
+ * Class for a source slice. A source slice is a part of a script source with
+ * the following properties:
+ * script : script object for the source
+ * from_line : line number for the first line in the slice
+ * to_line : source line number for the last line in the slice
+ * from_position : position of the first character in the slice
+ * to_position : position of the last character in the slice
+ * The to_line and to_position are not included in the slice, that is the lines
+ * in the slice are [from_line, to_line[. Likewise the characters in the slice
+ * are [from_position, to_position[.
+ * @param {Script} script The Script object for the source slice
+ * @param {number} from_line
+ * @param {number} to_line
+ * @param {number} from_position
+ * @param {number} to_position
+ * @constructor
+ */
+function SourceSlice(script, from_line, to_line, from_position, to_position) {
+ this.script = script;
+ this.from_line = from_line;
+ this.to_line = to_line;
+ this.from_position = from_position;
+ this.to_position = to_position;
+}
+
+
+/**
+ * Get the source text for a SourceSlice
+ * @return {String} Source text for this slice. The last line will include
+ * the line terminating characters (if any)
+ */
+SourceSlice.prototype.sourceText = function () {
+ return this.script.source.substring(this.from_position, this.to_position);
+};
+
+
+// Returns the offset of the given position within the containing
+// line.
+function GetPositionInLine(message) {
+ var location = message.script.locationFromPosition(message.startPos);
+ if (location == null) return -1;
+ location.restrict();
+ return message.startPos - location.start;
+};
+
+
+function ErrorMessage(type, args, startPos, endPos, script) {
+ this.startPos = startPos;
+ this.endPos = endPos;
+ this.type = type;
+ this.args = args;
+ this.script = script;
+};
+
+
+function MakeMessage(type, args, startPos, endPos, script) {
+ return new ErrorMessage(type, args, startPos, endPos, script);
+};
+
+
+function GetStackTraceLine(recv, fun, pos, isGlobal) {
+ try {
+ return UnsafeGetStackTraceLine(recv, fun, pos, isGlobal);
+ } catch (e) {
+ return "<error: " + e + ">";
+ }
+};
+
+
+function GetFunctionName(fun, recv) {
+ var name = %FunctionGetName(fun);
+ if (name) return name;
+ for (var prop in recv) {
+ if (recv[prop] === fun)
+ return prop;
+ }
+ return "[anonymous]";
+};
+
+
+function UnsafeGetStackTraceLine(recv, fun, pos, isTopLevel) {
+ var result = "";
+ // The global frame has no meaningful function or receiver
+ if (!isTopLevel) {
+ // If the receiver is not the global object then prefix the
+ // message send
+ if (recv !== global)
+ result += ToDetailString(recv) + ".";
+ result += GetFunctionName(fun, recv);
+ }
+ if (pos != -1) {
+ var script = %FunctionGetScript(fun);
+ var file;
+ if (script) {
+ file = %FunctionGetScript(fun).data;
+ }
+ if (file) {
+ var location = %FunctionGetScript(fun).locationFromPosition(pos);
+ if (!isTopLevel) result += "(";
+ result += file;
+ if (location != null) {
+ result += ":" + (location.line + 1) + ":" + (location.column + 1);
+ }
+ if (!isTopLevel) result += ")";
+ }
+ }
+ return (result) ? " at " + result : result;
+};
+
+
+// ----------------------------------------------------------------------------
+// Error implementation
+
+function DefineError(name) {
+ var f = function(msg) {};
+ // Store the error function in both the global object
+ // and the runtime object. The function is fetched
+ // from the runtime object when throwing errors from
+ // within the runtime system to avoid strange side
+ // effects when overwriting the error functions from
+ // user code.
+ %AddProperty(global, name, f, DONT_ENUM);
+ this['$' + name] = f;
+ // Configure the error function.
+ // prototype of 'Error' must be as default: new Object().
+ if (name != 'Error') %FunctionSetPrototype(f, new $Error());
+ %FunctionSetInstanceClassName(f, 'Error');
+ f.prototype.name = name;
+ f.prototype.constructor = f;
+ %SetCode(f, function(m) {
+ if (%IsConstructCall(this)) {
+ if (!IS_UNDEFINED(m)) this.message = ToString(m);
+ } else {
+ return new f(m);
+ }
+ });
+};
+
+$Math.__proto__ = global.Object.prototype;
+
+DefineError('Error');
+DefineError('TypeError');
+DefineError('RangeError');
+DefineError('SyntaxError');
+DefineError('ReferenceError');
+DefineError('EvalError');
+DefineError('URIError');
+
+// Setup extra properties of the Error.prototype object.
+$Error.prototype.message = '';
+
+%AddProperty($Error.prototype, 'toString', function() {
+ var type = this.type;
+ if (type && !this.hasOwnProperty("message")) {
+ return this.name + ": " + FormatMessage({ type: type, args: this.arguments });
+ }
+ var message = this.message;
+ return this.name + (message ? (": " + message) : "");
+}, DONT_ENUM);
+
+
+// Boilerplate for exceptions for stack overflows. Used from
+// Top::StackOverflow().
+const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Touch the RegExp and Date functions to make sure that date-delay.js and
+// regexp-delay.js has been loaded. This is required as the mirrors use
+// functions within these files through the builtins object. See the
+// function DateToISO8601_ as an example.
+RegExp;
+Date;
+
+
+function MakeMirror(value) {
+ if (IS_UNDEFINED(value)) return new UndefinedMirror();
+ if (IS_NULL(value)) return new NullMirror();
+ if (IS_BOOLEAN(value)) return new BooleanMirror(value);
+ if (IS_NUMBER(value)) return new NumberMirror(value);
+ if (IS_STRING(value)) return new StringMirror(value);
+ if (IS_ARRAY(value)) return new ArrayMirror(value);
+ if (IS_DATE(value)) return new DateMirror(value);
+ if (IS_FUNCTION(value)) return new FunctionMirror(value);
+ if (IS_REGEXP(value)) return new RegExpMirror(value);
+ if (IS_ERROR(value)) return new ErrorMirror(value);
+ return new ObjectMirror(value);
+}
+
+
+/**
+ * Inherit the prototype methods from one constructor into another.
+ *
+ * The Function.prototype.inherits from lang.js rewritten as a standalone
+ * function (not on Function.prototype). NOTE: If this file is to be loaded
+ * during bootstrapping this function needs to be revritten using some native
+ * functions as prototype setup using normal JavaScript does not work as
+ * expected during bootstrapping (see mirror.js in r114903).
+ *
+ * @param {function} ctor Constructor function which needs to inherit the
+ * prototype
+ * @param {function} superCtor Constructor function to inherit prototype from
+ */
+function inherits(ctor, superCtor) {
+ var tempCtor = function(){};
+ tempCtor.prototype = superCtor.prototype;
+ ctor.super_ = superCtor.prototype;
+ ctor.prototype = new tempCtor();
+ ctor.prototype.constructor = ctor;
+}
+
+
+// Type names of the different mirrors.
+const UNDEFINED_TYPE = 'undefined';
+const NULL_TYPE = 'null';
+const BOOLEAN_TYPE = 'boolean';
+const NUMBER_TYPE = 'number';
+const STRING_TYPE = 'string';
+const OBJECT_TYPE = 'object';
+const FUNCTION_TYPE = 'function';
+const REGEXP_TYPE = 'regexp';
+const ERROR_TYPE = 'error';
+const PROPERTY_TYPE = 'property';
+const ACCESSOR_TYPE = 'accessor';
+const FRAME_TYPE = 'frame';
+const SCRIPT_TYPE = 'script';
+
+// Maximum length when sending strings through the JSON protocol.
+const kMaxProtocolStringLength = 80;
+
+// Different kind of properties.
+PropertyKind = {};
+PropertyKind.Named = 1;
+PropertyKind.Indexed = 2;
+
+
+// Different types of properties. NOTE value 1 is missing as it is used
+// internally for map transition.
+PropertyType = {};
+PropertyType.Normal = 0;
+PropertyType.ConstantFunction = 2;
+PropertyType.Field = 3;
+PropertyType.Callbacks = 4;
+PropertyType.Interceptor = 5;
+
+
+// Different attributes for a property.
+PropertyAttribute = {};
+PropertyAttribute.None = NONE;
+PropertyAttribute.ReadOnly = READ_ONLY;
+PropertyAttribute.DontEnum = DONT_ENUM;
+PropertyAttribute.DontDelete = DONT_DELETE;
+
+
+// Mirror hierarchy:
+// - Mirror
+// - ValueMirror
+// - UndefinedMirror
+// - NullMirror
+// - NumberMirror
+// - StringMirror
+// - ObjectMirror
+// - FunctionMirror
+// - UnresolvedFunctionMirror
+// - ArrayMirror
+// - DateMirror
+// - RegExpMirror
+// - ErrorMirror
+// - PropertyMirror
+// - InterceptorPropertyMirror
+// - AccessorMirror
+// - FrameMirror
+// - ScriptMirror
+
+
+/**
+ * Base class for all mirror objects.
+ * @param {string} type The type of the mirror
+ * @constructor
+ */
+function Mirror(type) {
+ this.type_ = type;
+};
+
+
+Mirror.prototype.type = function() {
+ return this.type_;
+};
+
+
+/**
+ * Check whether the mirror reflects the undefined value.
+ * @returns {boolean} True if the mirror reflects the undefined value.
+ */
+Mirror.prototype.isUndefined = function() {
+ return this instanceof UndefinedMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects the null value.
+ * @returns {boolean} True if the mirror reflects the null value
+ */
+Mirror.prototype.isNull = function() {
+ return this instanceof NullMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a boolean value.
+ * @returns {boolean} True if the mirror reflects a boolean value
+ */
+Mirror.prototype.isBoolean = function() {
+ return this instanceof BooleanMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a number value.
+ * @returns {boolean} True if the mirror reflects a number value
+ */
+Mirror.prototype.isNumber = function() {
+ return this instanceof NumberMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a string value.
+ * @returns {boolean} True if the mirror reflects a string value
+ */
+Mirror.prototype.isString = function() {
+ return this instanceof StringMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an object.
+ * @returns {boolean} True if the mirror reflects an object
+ */
+Mirror.prototype.isObject = function() {
+ return this instanceof ObjectMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a function.
+ * @returns {boolean} True if the mirror reflects a function
+ */
+Mirror.prototype.isFunction = function() {
+ return this instanceof FunctionMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an unresolved function.
+ * @returns {boolean} True if the mirror reflects an unresolved function
+ */
+Mirror.prototype.isUnresolvedFunction = function() {
+ return this instanceof UnresolvedFunctionMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an array.
+ * @returns {boolean} True if the mirror reflects an array
+ */
+Mirror.prototype.isArray = function() {
+ return this instanceof ArrayMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a date.
+ * @returns {boolean} True if the mirror reflects a date
+ */
+Mirror.prototype.isDate = function() {
+ return this instanceof DateMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a regular expression.
+ * @returns {boolean} True if the mirror reflects a regular expression
+ */
+Mirror.prototype.isRegExp = function() {
+ return this instanceof RegExpMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an error.
+ * @returns {boolean} True if the mirror reflects an error
+ */
+Mirror.prototype.isError = function() {
+ return this instanceof ErrorMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a property.
+ * @returns {boolean} True if the mirror reflects a property
+ */
+Mirror.prototype.isProperty = function() {
+ return this instanceof PropertyMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a property from an interceptor.
+ * @returns {boolean} True if the mirror reflects a property from an
+ * interceptor
+ */
+Mirror.prototype.isInterceptorProperty = function() {
+ return this instanceof InterceptorPropertyMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an accessor.
+ * @returns {boolean} True if the mirror reflects an accessor
+ */
+Mirror.prototype.isAccessor = function() {
+ return this instanceof AccessorMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a stack frame.
+ * @returns {boolean} True if the mirror reflects a stack frame
+ */
+Mirror.prototype.isFrame = function() {
+ return this instanceof FrameMirror;
+}
+
+
+Mirror.prototype.fillJSONType_ = function(content) {
+ content.push(MakeJSONPair_('type', StringToJSON_(this.type())));
+};
+
+
+Mirror.prototype.fillJSON_ = function(content) {
+ this.fillJSONType_(content);
+};
+
+
+/**
+ * Serialize object in JSON format. For the basic mirrors this includes only
+ * the type in the following format.
+ * {"type":"<type name>"}
+ * For specialized mirrors inheriting from the base Mirror
+ * @param {boolean} details Indicate level of details to include
+ * @return {string} JSON serialization
+ */
+Mirror.prototype.toJSONProtocol = function(details, propertiesKind, interceptorPropertiesKind) {
+ var content = new Array();
+ this.fillJSON_(content, details, propertiesKind, interceptorPropertiesKind);
+ content.push(MakeJSONPair_('text', StringToJSON_(this.toText())));
+ return ArrayToJSONObject_(content);
+}
+
+
+Mirror.prototype.toText = function() {
+ // Simpel to text which is used when on specialization in subclass.
+ return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
+}
+
+
+/**
+ * Base class for all value mirror objects.
+ * @param {string} type The type of the mirror
+ * @param {value} value The value reflected by this mirror
+ * @constructor
+ * @extends Mirror
+ */
+function ValueMirror(type, value) {
+ Mirror.call(this, type);
+ this.value_ = value;
+};
+inherits(ValueMirror, Mirror);
+
+
+/**
+ * Check whether this is a primitive value.
+ * @return {boolean} True if the mirror reflects a primitive value
+ */
+ValueMirror.prototype.isPrimitive = function() {
+ var type = this.type();
+ return type === 'undefined' ||
+ type === 'null' ||
+ type === 'boolean' ||
+ type === 'number' ||
+ type === 'string';
+};
+
+
+ /**
+ * Get the actual value reflected by this mirror.
+ * @return {value} The value reflected by this mirror
+ */
+ValueMirror.prototype.value = function() {
+ return this.value_;
+};
+
+
+/**
+ * Mirror object for Undefined.
+ * @constructor
+ * @extends ValueMirror
+ */
+function UndefinedMirror() {
+ ValueMirror.call(this, UNDEFINED_TYPE, void 0);
+};
+inherits(UndefinedMirror, ValueMirror);
+
+
+UndefinedMirror.prototype.toText = function() {
+ return 'undefined';
+}
+
+
+/**
+ * Mirror object for null.
+ * @constructor
+ * @extends ValueMirror
+ */
+function NullMirror() {
+ ValueMirror.call(this, NULL_TYPE, null);
+};
+inherits(NullMirror, ValueMirror);
+
+
+NullMirror.prototype.toText = function() {
+ return 'null';
+}
+
+
+/**
+ * Mirror object for boolean values.
+ * @param {boolean} value The boolean value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function BooleanMirror(value) {
+ ValueMirror.call(this, BOOLEAN_TYPE, value);
+};
+inherits(BooleanMirror, ValueMirror);
+
+
+BooleanMirror.prototype.fillJSON_ = function(content, details) {
+ BooleanMirror.super_.fillJSONType_.call(this, content);
+ content.push(MakeJSONPair_('value', BooleanToJSON_(this.value_)));
+}
+
+
+BooleanMirror.prototype.toText = function() {
+ return this.value_ ? 'true' : 'false';
+}
+
+
+/**
+ * Mirror object for number values.
+ * @param {number} value The number value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function NumberMirror(value) {
+ ValueMirror.call(this, NUMBER_TYPE, value);
+};
+inherits(NumberMirror, ValueMirror);
+
+
+NumberMirror.prototype.fillJSON_ = function(content, details) {
+ NumberMirror.super_.fillJSONType_.call(this, content);
+ content.push(MakeJSONPair_('value', NumberToJSON_(this.value_)));
+}
+
+
+NumberMirror.prototype.toText = function() {
+ return %NumberToString(this.value_);
+}
+
+
+/**
+ * Mirror object for string values.
+ * @param {string} value The string value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function StringMirror(value) {
+ ValueMirror.call(this, STRING_TYPE, value);
+};
+inherits(StringMirror, ValueMirror);
+
+
+StringMirror.prototype.length = function() {
+ return this.value_.length;
+};
+
+
+StringMirror.prototype.fillJSON_ = function(content, details) {
+ StringMirror.super_.fillJSONType_.call(this, content);
+ content.push(MakeJSONPair_('length', NumberToJSON_(this.length())));
+ if (this.length() > kMaxProtocolStringLength) {
+ content.push(MakeJSONPair_('fromIndex', NumberToJSON_(0)));
+ content.push(MakeJSONPair_('toIndex',
+ NumberToJSON_(kMaxProtocolStringLength)));
+ var str = this.value_.substring(0, kMaxProtocolStringLength);
+ content.push(MakeJSONPair_('value', StringToJSON_(str)));
+ } else {
+ content.push(MakeJSONPair_('value', StringToJSON_(this.value_)));
+ }
+}
+
+
+StringMirror.prototype.toText = function() {
+ if (this.length() > kMaxProtocolStringLength) {
+ return this.value_.substring(0, kMaxProtocolStringLength) +
+ '... (length: ' + this.length() + ')';
+ } else {
+ return this.value_;
+ }
+}
+
+
+/**
+ * Mirror object for objects.
+ * @param {object} value The object reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function ObjectMirror(value, type) {
+ ValueMirror.call(this, type || OBJECT_TYPE, value);
+};
+inherits(ObjectMirror, ValueMirror);
+
+
+ObjectMirror.prototype.className = function() {
+ return %ClassOf(this.value_);
+};
+
+
+ObjectMirror.prototype.constructorFunction = function() {
+ return MakeMirror(%DebugGetProperty(this.value_, 'constructor'));
+};
+
+
+ObjectMirror.prototype.prototypeObject = function() {
+ return MakeMirror(%DebugGetProperty(this.value_, 'prototype'));
+};
+
+
+ObjectMirror.prototype.protoObject = function() {
+ return MakeMirror(%GetPrototype(this.value_));
+};
+
+
+ObjectMirror.prototype.hasNamedInterceptor = function() {
+ // Get information on interceptors for this object.
+ var x = %DebugInterceptorInfo(this.value_);
+ return (x & 2) != 0;
+};
+
+
+ObjectMirror.prototype.hasIndexedInterceptor = function() {
+ // Get information on interceptors for this object.
+ var x = %DebugInterceptorInfo(this.value_);
+ return (x & 1) != 0;
+};
+
+
+/**
+ * Return the property names for this object.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ * properties are requested
+ * @param {number} limit Limit the number of names returend to the specified
+ value
+ * @return {Array} Property names for this object
+ */
+ObjectMirror.prototype.propertyNames = function(kind, limit) {
+ // Find kind and limit and allocate array for the result
+ kind = kind || PropertyKind.Named | PropertyKind.Indexed;
+
+ var propertyNames;
+ var elementNames;
+ var total = 0;
+ if (kind & PropertyKind.Named) {
+ propertyNames = %DebugLocalPropertyNames(this.value_);
+ total += propertyNames.length;
+ }
+ if (kind & PropertyKind.Indexed) {
+ elementNames = %DebugLocalElementNames(this.value_)
+ total += elementNames.length;
+ }
+ limit = Math.min(limit || total, total);
+
+ var names = new Array(limit);
+ var index = 0;
+
+ // Copy names for named properties.
+ if (kind & PropertyKind.Named) {
+ for (var i = 0; index < limit && i < propertyNames.length; i++) {
+ names[index++] = propertyNames[i];
+ }
+ }
+
+ // Copy names for indexed properties.
+ if (kind & PropertyKind.Indexed) {
+ for (var i = 0; index < limit && i < elementNames.length; i++) {
+ names[index++] = elementNames[i];
+ }
+ }
+
+ return names;
+};
+
+
+/**
+ * Return the properties for this object as an array of PropertyMirror objects.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ * properties are requested
+ * @param {number} limit Limit the number of properties returend to the
+ specified value
+ * @return {Array} Property mirrors for this object
+ */
+ObjectMirror.prototype.properties = function(kind, limit) {
+ var names = this.propertyNames(kind, limit);
+ var properties = new Array(names.length);
+ for (var i = 0; i < names.length; i++) {
+ properties[i] = this.property(names[i]);
+ }
+
+ return properties;
+};
+
+
+/**
+ * Return the interceptor property names for this object.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ * interceptor properties are requested
+ * @param {number} limit Limit the number of names returend to the specified
+ value
+ * @return {Array} interceptor property names for this object
+ */
+ObjectMirror.prototype.interceptorPropertyNames = function(kind, limit) {
+ // Find kind.
+ kind = kind || PropertyKind.Named | PropertyKind.Indexed;
+ var namedInterceptorNames;
+ var indexedInterceptorNames;
+
+ // Get names for named interceptor properties.
+ if (this.hasNamedInterceptor() && kind & PropertyKind.Named) {
+ namedInterceptorNames = %DebugNamedInterceptorPropertyNames(this.value_);
+ }
+
+ // Get names for indexed interceptor properties.
+ if (this.hasIndexedInterceptor() && kind & PropertyKind.Indexed) {
+ indexedInterceptorNames = %DebugIndexedInterceptorElementNames(this.value_);
+ }
+
+ // Return either retult or both concattenated.
+ if (namedInterceptorNames && indexedInterceptorNames) {
+ return namedInterceptorNames.concat(indexedInterceptorNames);
+ } else if (namedInterceptorNames) {
+ return namedInterceptorNames;
+ } else if (indexedInterceptorNames) {
+ return indexedInterceptorNames;
+ } else {
+ return new Array(0);
+ }
+};
+
+
+/**
+ * Return interceptor properties this object.
+ * @param {number} opt_kind Indicate whether named, indexed or both kinds of
+ * interceptor properties are requested
+ * @param {Array} opt_names Limit the number of properties returned to the
+ specified value
+ * @return {Array} properties this object as an array of PropertyMirror objects
+ */
+ObjectMirror.prototype.interceptorProperties = function(opt_kind, opt_names) {
+ // Find kind.
+ var kind = opt_kind || PropertyKind.Named | PropertyKind.Indexed;
+ var namedInterceptorProperties;
+ var indexedInterceptorProperties;
+
+ // Get values for named interceptor properties.
+ if (kind & PropertyKind.Named) {
+ var names = opt_names || this.interceptorPropertyNames(PropertyKind.Named);
+ namedInterceptorProperties = new Array(names.length);
+ for (i = 0; i < names.length; i++) {
+ var value = %DebugNamedInterceptorPropertyValue(this.value_, names[i]);
+ namedInterceptorProperties[i] = new InterceptorPropertyMirror(this, names[i], value);
+ }
+ }
+
+ // Get values for indexed interceptor properties.
+ if (kind & PropertyKind.Indexed) {
+ var names = opt_names || this.interceptorPropertyNames(PropertyKind.Indexed);
+ indexedInterceptorProperties = new Array(names.length);
+ for (i = 0; i < names.length; i++) {
+ // Don't try to get the value if the name is not a number.
+ if (IS_NUMBER(names[i])) {
+ var value = %DebugIndexedInterceptorElementValue(this.value_, names[i]);
+ indexedInterceptorProperties[i] = new InterceptorPropertyMirror(this, names[i], value);
+ }
+ }
+ }
+
+ // Return either result or both concattenated.
+ if (namedInterceptorProperties && indexedInterceptorProperties) {
+ return namedInterceptorProperties.concat(indexedInterceptorProperties);
+ } else if (namedInterceptorProperties) {
+ return namedInterceptorProperties;
+ } else {
+ return indexedInterceptorProperties;
+ }
+};
+
+
+ObjectMirror.prototype.property = function(name) {
+ var details = %DebugGetLocalPropertyDetails(this.value_, %ToString(name));
+ if (details) {
+ return new PropertyMirror(this, name, details[0], details[1]);
+ }
+
+ // Nothing found.
+ return new UndefinedMirror();
+};
+
+
+
+/**
+ * Try to find a property from its value.
+ * @param {Mirror} value The property value to look for
+ * @return {PropertyMirror} The property with the specified value. If no
+ * property was found with the specified value UndefinedMirror is returned
+ */
+ObjectMirror.prototype.lookupProperty = function(value) {
+ var properties = this.properties();
+
+ // Look for property value in properties.
+ for (var i = 0; i < properties.length; i++) {
+
+ // Skip properties which are defined through assessors.
+ var property = properties[i];
+ if (property.propertyType() != PropertyType.Callbacks) {
+ if (%ObjectEquals(property.value_, value.value_) == 0) {
+ return property;
+ }
+ }
+ }
+
+ // Nothing found.
+ return new UndefinedMirror();
+};
+
+
+/**
+ * Returns objects which has direct references to this object
+ * @param {number} opt_max_instances Optional parameter specifying the maximum
+ * number of instances to return.
+ * @return {Array} The objects which has direct references to this object.
+ */
+ObjectMirror.prototype.referencedBy = function(opt_max_instances) {
+ // Find all objects constructed from this function.
+ var result = %DebugReferencedBy(this.value_, Mirror.prototype, opt_max_instances || 0);
+
+ // Make mirrors for all the instances found.
+ for (var i = 0; i < result.length; i++) {
+ result[i] = MakeMirror(result[i]);
+ }
+
+ return result;
+};
+
+
+ObjectMirror.prototype.fillJSONProperties_ = function(content, kind, name, details) {
+ var propertyNames = this.propertyNames(kind);
+ var x = new Array(propertyNames.length);
+ for (var i = 0; i < propertyNames.length; i++) {
+ x[i] = this.property(propertyNames[i]).toJSONProtocol(details);
+ }
+ content.push(MakeJSONPair_(name || 'properties', ArrayToJSONArray_(x)));
+};
+
+
+ObjectMirror.prototype.fillJSONInterceptorProperties_ = function(content, kind, name, details) {
+ var propertyNames = this.interceptorPropertyNames(kind);
+ var x = new Array(propertyNames.length);
+ for (var i = 0; i < propertyNames.length; i++) {
+ x[i] = properties[i].toJSONProtocol(details);
+ }
+ content.push(MakeJSONPair_(name || 'interceptorProperties', ArrayToJSONArray_(x)));
+};
+
+
+ObjectMirror.prototype.fillJSON_ = function(content, details, propertiesKind, interceptorPropertiesKind) {
+ ObjectMirror.super_.fillJSONType_.call(this, content);
+ content.push(MakeJSONPair_('className', StringToJSON_(this.className())));
+ if (details) {
+ content.push(MakeJSONPair_('constructorFunction', this.constructorFunction().toJSONProtocol(false)));
+ content.push(MakeJSONPair_('protoObject', this.protoObject().toJSONProtocol(false)));
+ content.push(MakeJSONPair_('prototypeObject', this.prototypeObject().toJSONProtocol(false)));
+ }
+ if (details) {
+ this.fillJSONProperties_(content, propertiesKind)
+ if (interceptorPropertiesKind) {
+ this.fillJSONInterceptorProperties_(content, interceptorPropertiesKind)
+ }
+ }
+ if (this.hasNamedInterceptor()) {
+ content.push(MakeJSONPair_('namedInterceptor', BooleanToJSON_(true)));
+ }
+ if (this.hasIndexedInterceptor()) {
+ content.push(MakeJSONPair_('indexedInterceptor', BooleanToJSON_(true)));
+ }
+};
+
+
+ObjectMirror.prototype.toText = function() {
+ var name;
+ var ctor = this.constructorFunction();
+ if (ctor.isUndefined()) {
+ name = this.className();
+ } else {
+ name = ctor.name();
+ if (!name) {
+ name = this.className();
+ }
+ }
+ return '#<' + builtins.GetInstanceName(name) + '>';
+};
+
+
+/**
+ * Mirror object for functions.
+ * @param {function} value The function object reflected by this mirror.
+ * @constructor
+ * @extends ObjectMirror
+ */
+function FunctionMirror(value) {
+ ObjectMirror.call(this, value, FUNCTION_TYPE);
+ this.resolved_ = true;
+};
+inherits(FunctionMirror, ObjectMirror);
+
+
+/**
+ * Returns whether the function is resolved.
+ * @return {boolean} True if the function is resolved. Unresolved functions can
+ * only originate as functions from stack frames
+ */
+FunctionMirror.prototype.resolved = function() {
+ return this.resolved_;
+};
+
+
+/**
+ * Returns the name of the function.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.name = function() {
+ return %FunctionGetName(this.value_);
+};
+
+
+/**
+ * Returns the source code for the function.
+ * @return {string or undefined} The source code for the function. If the
+ * function is not resolved undefined will be returned.
+ */
+FunctionMirror.prototype.source = function() {
+ // Return source if function is resolved. Otherwise just fall through to
+ // return undefined.
+ if (this.resolved()) {
+ // This builtins function is context independant (only uses runtime
+ // calls and typeof.
+ return builtins.FunctionSourceString(this.value_);
+ }
+};
+
+
+/**
+ * Returns the script object for the function.
+ * @return {ScriptMirror or undefined} Script object for the function or
+ * undefined if the function has no script
+ */
+FunctionMirror.prototype.script = function() {
+ // Return script if function is resolved. Otherwise just fall through
+ // to return undefined.
+ if (this.resolved()) {
+ var script = %FunctionGetScript(this.value_);
+ if (script) {
+ return new ScriptMirror(script);
+ }
+ }
+};
+
+
+/**
+ * Returns objects constructed by this function.
+ * @param {number} opt_max_instances Optional parameter specifying the maximum
+ * number of instances to return.
+ * @return {Array or undefined} The objects constructed by this function.
+ */
+FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
+ if (this.resolved()) {
+ // Find all objects constructed from this function.
+ var result = %DebugConstructedBy(this.value_, opt_max_instances || 0);
+
+ // Make mirrors for all the instances found.
+ for (var i = 0; i < result.length; i++) {
+ result[i] = MakeMirror(result[i]);
+ }
+
+ return result;
+ } else {
+ return [];
+ }
+};
+
+
+FunctionMirror.prototype.fillJSON_ = function(content, details) {
+ // Fill JSON properties from parent (ObjectMirror).
+ FunctionMirror.super_.fillJSON_.call(this, content, details);
+ // Add function specific properties.
+ content.push(MakeJSONPair_('name', StringToJSON_(this.name())));
+ content.push(MakeJSONPair_('resolved', BooleanToJSON_(this.resolved())));
+ if (details && this.resolved()) {
+ content.push(MakeJSONPair_('source', StringToJSON_(this.source())));
+ }
+ if (this.script()) {
+ content.push(MakeJSONPair_('script', this.script().toJSONProtocol()));
+ }
+}
+
+
+FunctionMirror.prototype.toText = function() {
+ return this.source();
+}
+
+
+/**
+ * Mirror object for unresolved functions.
+ * @param {string} value The name for the unresolved function reflected by this
+ * mirror.
+ * @constructor
+ * @extends ObjectMirror
+ */
+function UnresolvedFunctionMirror(value) {
+ // Construct this using the ValueMirror as an unresolved function is not a
+ // real object but just a string.
+ ValueMirror.call(this, FUNCTION_TYPE, value);
+ this.propertyCount_ = 0;
+ this.elementCount_ = 0;
+ this.resolved_ = false;
+};
+inherits(UnresolvedFunctionMirror, FunctionMirror);
+
+
+UnresolvedFunctionMirror.prototype.className = function() {
+ return 'Function';
+};
+
+
+UnresolvedFunctionMirror.prototype.constructorFunction = function() {
+ return new UndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.prototypeObject = function() {
+ return new UndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.protoObject = function() {
+ return new UndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.name = function() {
+ return this.value_;
+};
+
+
+UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
+ return [];
+}
+
+
+/**
+ * Mirror object for arrays.
+ * @param {Array} value The Array object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function ArrayMirror(value) {
+ ObjectMirror.call(this, value);
+};
+inherits(ArrayMirror, ObjectMirror);
+
+
+ArrayMirror.prototype.length = function() {
+ return this.value_.length;
+};
+
+
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
+ var from_index = opt_from_index || 0;
+ var to_index = opt_to_index || this.length() - 1;
+ if (from_index > to_index) return new Array();
+ var values = new Array(to_index - from_index + 1);
+ for (var i = from_index; i <= to_index; i++) {
+ var details = %DebugGetLocalPropertyDetails(this.value_, %ToString(i));
+ var value;
+ if (details) {
+ value = new PropertyMirror(this, i, details[0], details[1]);
+ } else {
+ value = new UndefinedMirror();
+ }
+ values[i - from_index] = value;
+ }
+ return values;
+}
+
+
+ArrayMirror.prototype.fillJSON_ = function(content, details) {
+ // Fill JSON as for parent (ObjectMirror) but just with named properties.
+ ArrayMirror.super_.fillJSON_.call(this, content, details, PropertyKind.Named);
+ // Fill indexed properties seperately.
+ if (details) {
+ this.fillJSONProperties_(content, PropertyKind.Indexed, 'indexedProperties')
+ }
+ // Add the array length.
+ content.push(MakeJSONPair_('length', NumberToJSON_(this.length())));
+}
+
+
+/**
+ * Mirror object for dates.
+ * @param {Date} value The Date object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function DateMirror(value) {
+ ObjectMirror.call(this, value);
+};
+inherits(DateMirror, ObjectMirror);
+
+
+DateMirror.prototype.fillJSON_ = function(content, details) {
+ // Fill JSON properties from parent (ObjectMirror).
+ DateMirror.super_.fillJSON_.call(this, content, details);
+ // Add date specific properties.
+ content.push(MakeJSONPair_('value', DateToJSON_(this.value_)));
+}
+
+
+DateMirror.prototype.toText = function() {
+ return DateToISO8601_(this.value_);
+}
+
+
+/**
+ * Mirror object for regular expressions.
+ * @param {RegExp} value The RegExp object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function RegExpMirror(value) {
+ ObjectMirror.call(this, value, REGEXP_TYPE);
+};
+inherits(RegExpMirror, ObjectMirror);
+
+
+/**
+ * Returns the source to the regular expression.
+ * @return {string or undefined} The source to the regular expression
+ */
+RegExpMirror.prototype.source = function() {
+ return this.value_.source;
+};
+
+
+/**
+ * Returns whether this regular expression has the global (g) flag set.
+ * @return {boolean} Value of the global flag
+ */
+RegExpMirror.prototype.global = function() {
+ return this.value_.global;
+};
+
+
+/**
+ * Returns whether this regular expression has the ignore case (i) flag set.
+ * @return {boolean} Value of the ignore case flag
+ */
+RegExpMirror.prototype.ignoreCase = function() {
+ return this.value_.ignoreCase;
+};
+
+
+/**
+ * Returns whether this regular expression has the multiline (m) flag set.
+ * @return {boolean} Value of the multiline flag
+ */
+RegExpMirror.prototype.multiline = function() {
+ return this.value_.multiline;
+};
+
+
+RegExpMirror.prototype.fillJSON_ = function(content, details) {
+ // Fill JSON properties from parent (ObjectMirror).
+ RegExpMirror.super_.fillJSON_.call(this, content, details);
+ // Add regexp specific properties.
+ content.push(MakeJSONPair_('source', StringToJSON_(this.source())));
+ content.push(MakeJSONPair_('global', BooleanToJSON_(this.global())));
+ content.push(MakeJSONPair_('ignoreCase', BooleanToJSON_(this.ignoreCase())));
+ content.push(MakeJSONPair_('multiline', BooleanToJSON_(this.multiline())));
+}
+
+
+RegExpMirror.prototype.toText = function() {
+ // Simpel to text which is used when on specialization in subclass.
+ return "/" + this.source() + "/";
+}
+
+
+/**
+ * Mirror object for error objects.
+ * @param {Error} value The error object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function ErrorMirror(value) {
+ ObjectMirror.call(this, value, ERROR_TYPE);
+};
+inherits(ErrorMirror, ObjectMirror);
+
+
+/**
+ * Returns the message for this eror object.
+ * @return {string or undefined} The message for this eror object
+ */
+ErrorMirror.prototype.message = function() {
+ return this.value_.message;
+};
+
+
+ErrorMirror.prototype.fillJSON_ = function(content, details) {
+ // Fill JSON properties from parent (ObjectMirror).
+ ErrorMirror.super_.fillJSON_.call(this, content, details);
+ // Add error specific properties.
+ content.push(MakeJSONPair_('message', StringToJSON_(this.message())));
+}
+
+
+ErrorMirror.prototype.toText = function() {
+ // Use the same text representation as in messages.js.
+ var text;
+ try {
+ str = builtins.ToDetailString(this.value_);
+ } catch (e) {
+ str = '#<an Error>';
+ }
+ return str;
+}
+
+
+/**
+ * Base mirror object for properties.
+ * @param {ObjectMirror} mirror The mirror object having this property
+ * @param {string} name The name of the property
+ * @param {Object} value The value of the property
+ * @constructor
+ * @extends Mirror
+ */
+function PropertyMirror(mirror, name, value, details) {
+ Mirror.call(this, PROPERTY_TYPE);
+ this.mirror_ = mirror;
+ this.name_ = name;
+ this.value_ = value;
+ this.details_ = details;
+};
+inherits(PropertyMirror, Mirror);
+
+
+PropertyMirror.prototype.isReadOnly = function() {
+ return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
+}
+
+
+PropertyMirror.prototype.isEnum = function() {
+ return (this.attributes() & PropertyAttribute.DontEnum) == 0;
+}
+
+
+PropertyMirror.prototype.canDelete = function() {
+ return (this.attributes() & PropertyAttribute.DontDelete) == 0;
+}
+
+
+PropertyMirror.prototype.name = function() {
+ return this.name_;
+}
+
+
+PropertyMirror.prototype.isIndexed = function() {
+ for (var i = 0; i < this.name_.length; i++) {
+ if (this.name_[i] < '0' || '9' < this.name_[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+PropertyMirror.prototype.value = function() {
+ if (this.propertyType() == PropertyType.Callbacks) {
+ // TODO(1242933): AccessorMirror should have getter/setter values.
+ return new AccessorMirror();
+ } else if (this.type() == PropertyType.Interceptor) {
+ return new UndefinedMirror();
+ } else {
+ return MakeMirror(this.value_);
+ }
+}
+
+
+PropertyMirror.prototype.attributes = function() {
+ return %DebugPropertyAttributesFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.propertyType = function() {
+ return %DebugPropertyTypeFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.insertionIndex = function() {
+ return %DebugPropertyIndexFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.fillJSON_ = function(content, details) {
+ content.push(MakeJSONPair_('name', StringToJSON_(this.name())));
+ content.push(MakeJSONPair_('value', this.value().toJSONProtocol(details)));
+ if (this.attributes() != PropertyAttribute.None) {
+ content.push(MakeJSONPair_('attributes', NumberToJSON_(this.attributes())));
+ }
+ if (this.propertyType() != PropertyType.Normal) {
+ content.push(MakeJSONPair_('propertyType', NumberToJSON_(this.propertyType())));
+ }
+}
+
+
+/**
+ * Mirror object for interceptor named properties.
+ * @param {ObjectMirror} mirror The mirror object having this property
+ * @param {String} name The name of the property
+ * @param {value} value The value of the property
+ * @constructor
+ * @extends PropertyMirror
+ */
+function InterceptorPropertyMirror(mirror, name, value) {
+ PropertyMirror.call(this, mirror, name, value, PropertyType.Interceptor);
+};
+inherits(InterceptorPropertyMirror, PropertyMirror);
+
+
+/**
+ * Mirror object for property accessors.
+ * @param {Function} getter The getter function for this accessor
+ * @param {Function} setter The setter function for this accessor
+ * @constructor
+ * @extends Mirror
+ */
+function AccessorMirror(getter, setter) {
+ Mirror.call(this, ACCESSOR_TYPE);
+ this.getter_ = getter;
+ this.setter_ = setter;
+};
+inherits(AccessorMirror, Mirror);
+
+
+/**
+ * Returns whether this accessor is native or not. A native accessor is either
+ * a VM buildin or provided through the API. A non native accessor is defined
+ * in JavaScript using the __defineGetter__ and/or __defineGetter__ functions.
+ * @return {boolean} True is the accessor is native
+ */
+AccessorMirror.prototype.isNative = function() {
+ return IS_UNDEFINED(this.getter_) && IS_UNDEFINED(this.setter_);
+}
+
+
+/**
+ * Returns a mirror for the function of a non native getter.
+ * @return {FunctionMirror} Function mirror for the getter set using
+ * __defineGetter__.
+ */
+AccessorMirror.prototype.getter = function(details) {
+ return MakeMirror(this.getter_);
+}
+
+
+/**
+ * Returns a mirror for the function of a non native setter.
+ * @return {FunctionMirror} Function mirror for the getter set using
+ * __defineSetter__.
+ */
+AccessorMirror.prototype.setter = function(details) {
+ return MakeMirror(this.setter_);
+}
+
+
+/**
+ * Serialize the accessor mirror into JSON format. For accessor it has the
+ * following format.
+ * {"type":"accessor",
+ "native:"<boolean>,
+ "getter":<function mirror JSON serialization>,
+ "setter":<function mirror JSON serialization>}
+ * For specialized mirrors inheriting from the base Mirror
+ * @param {boolean} details Indicate level of details to include
+ * @return {string} JSON serialization
+ */
+AccessorMirror.prototype.fillJSON_ = function(content, details) {
+ AccessorMirror.super_.fillJSONType_.call(this, content);
+ if (this.isNative()) {
+ content.push(MakeJSONPair_('native', BooleanToJSON_(true)));
+ } else {
+ content.push(MakeJSONPair_('getter', this.getter().toJSONProtocol(false)));
+ content.push(MakeJSONPair_('setter', this.setter().toJSONProtocol(false)));
+ }
+}
+
+
+const kFrameDetailsFrameIdIndex = 0;
+const kFrameDetailsReceiverIndex = 1;
+const kFrameDetailsFunctionIndex = 2;
+const kFrameDetailsArgumentCountIndex = 3;
+const kFrameDetailsLocalCountIndex = 4;
+const kFrameDetailsSourcePositionIndex = 5;
+const kFrameDetailsConstructCallIndex = 6;
+const kFrameDetailsDebuggerFrameIndex = 7;
+const kFrameDetailsFirstDynamicIndex = 8;
+
+const kFrameDetailsNameIndex = 0;
+const kFrameDetailsValueIndex = 1;
+const kFrameDetailsNameValueSize = 2;
+
+/**
+ * Wrapper for the frame details information retreived from the VM. The frame
+ * details from the VM is an array with the following content. See runtime.cc
+ * Runtime_GetFrameDetails.
+ * 0: Id
+ * 1: Receiver
+ * 2: Function
+ * 3: Argument count
+ * 4: Local count
+ * 5: Source position
+ * 6: Construct call
+ * Arguments name, value
+ * Locals name, value
+ * @param {number} break_id Current break id
+ * @param {number} index Frame number
+ * @constructor
+ */
+function FrameDetails(break_id, index) {
+ this.break_id_ = break_id;
+ this.details_ = %GetFrameDetails(break_id, index);
+};
+
+
+FrameDetails.prototype.frameId = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsFrameIdIndex];
+}
+
+
+FrameDetails.prototype.receiver = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsReceiverIndex];
+}
+
+
+FrameDetails.prototype.func = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsFunctionIndex];
+}
+
+
+FrameDetails.prototype.isConstructCall = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsConstructCallIndex];
+}
+
+
+FrameDetails.prototype.isDebuggerFrame = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsDebuggerFrameIndex];
+}
+
+
+FrameDetails.prototype.argumentCount = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsArgumentCountIndex];
+}
+
+
+FrameDetails.prototype.argumentName = function(index) {
+ %CheckExecutionState(this.break_id_);
+ if (index >= 0 && index < this.argumentCount()) {
+ return this.details_[kFrameDetailsFirstDynamicIndex +
+ index * kFrameDetailsNameValueSize +
+ kFrameDetailsNameIndex]
+ }
+}
+
+
+FrameDetails.prototype.argumentValue = function(index) {
+ %CheckExecutionState(this.break_id_);
+ if (index >= 0 && index < this.argumentCount()) {
+ return this.details_[kFrameDetailsFirstDynamicIndex +
+ index * kFrameDetailsNameValueSize +
+ kFrameDetailsValueIndex]
+ }
+}
+
+
+FrameDetails.prototype.localCount = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsLocalCountIndex];
+}
+
+
+FrameDetails.prototype.sourcePosition = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsSourcePositionIndex];
+}
+
+
+FrameDetails.prototype.localName = function(index) {
+ %CheckExecutionState(this.break_id_);
+ if (index >= 0 && index < this.localCount()) {
+ var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+ return this.details_[locals_offset +
+ index * kFrameDetailsNameValueSize +
+ kFrameDetailsNameIndex]
+ }
+}
+
+
+FrameDetails.prototype.localValue = function(index) {
+ %CheckExecutionState(this.break_id_);
+ if (index >= 0 && index < this.localCount()) {
+ var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+ return this.details_[locals_offset +
+ index * kFrameDetailsNameValueSize +
+ kFrameDetailsValueIndex]
+ }
+}
+
+
+/**
+ * Mirror object for stack frames.
+ * @param {number} break_id The break id in the VM for which this frame is
+ valid
+ * @param {number} index The frame index (top frame is index 0)
+ * @constructor
+ * @extends Mirror
+ */
+function FrameMirror(break_id, index) {
+ Mirror.call(this, FRAME_TYPE);
+ this.break_id_ = break_id;
+ this.index_ = index;
+ this.details_ = new FrameDetails(break_id, index);
+};
+inherits(FrameMirror, Mirror);
+
+
+FrameMirror.prototype.index = function() {
+ return this.index_;
+};
+
+
+FrameMirror.prototype.func = function() {
+ // Get the function for this frame from the VM.
+ var f = this.details_.func();
+
+ // Create a function mirror. NOTE: MakeMirror cannot be used here as the
+ // value returned from the VM might be a string if the function for the
+ // frame is unresolved.
+ if (IS_FUNCTION(f)) {
+ return new FunctionMirror(f);
+ } else {
+ return new UnresolvedFunctionMirror(f);
+ }
+};
+
+
+FrameMirror.prototype.receiver = function() {
+ return MakeMirror(this.details_.receiver());
+};
+
+
+FrameMirror.prototype.isConstructCall = function() {
+ return this.details_.isConstructCall();
+};
+
+
+FrameMirror.prototype.isDebuggerFrame = function() {
+ return this.details_.isDebuggerFrame();
+};
+
+
+FrameMirror.prototype.argumentCount = function() {
+ return this.details_.argumentCount();
+};
+
+
+FrameMirror.prototype.argumentName = function(index) {
+ return this.details_.argumentName(index);
+};
+
+
+FrameMirror.prototype.argumentValue = function(index) {
+ return MakeMirror(this.details_.argumentValue(index));
+};
+
+
+FrameMirror.prototype.localCount = function() {
+ return this.details_.localCount();
+};
+
+
+FrameMirror.prototype.localName = function(index) {
+ return this.details_.localName(index);
+};
+
+
+FrameMirror.prototype.localValue = function(index) {
+ return MakeMirror(this.details_.localValue(index));
+};
+
+
+FrameMirror.prototype.sourcePosition = function() {
+ return this.details_.sourcePosition();
+};
+
+
+FrameMirror.prototype.sourceLocation = function() {
+ if (this.func().resolved() && this.func().script()) {
+ return this.func().script().locationFromPosition(this.sourcePosition());
+ }
+};
+
+
+FrameMirror.prototype.sourceLine = function() {
+ if (this.func().resolved()) {
+ var location = this.sourceLocation();
+ if (location) {
+ return location.line;
+ }
+ }
+};
+
+
+FrameMirror.prototype.sourceColumn = function() {
+ if (this.func().resolved()) {
+ var location = this.sourceLocation();
+ if (location) {
+ return location.column;
+ }
+ }
+};
+
+
+FrameMirror.prototype.sourceLineText = function() {
+ if (this.func().resolved()) {
+ var location = this.sourceLocation();
+ if (location) {
+ return location.sourceText();
+ }
+ }
+};
+
+
+FrameMirror.prototype.evaluate = function(source) {
+ var result = %DebugEvaluate(this.break_id_, this.details_.frameId(), source);
+ return MakeMirror(result);
+};
+
+
+FrameMirror.prototype.fillJSON_ = function(content, details) {
+ FrameMirror.super_.fillJSONType_.call(this, content);
+ content.push(MakeJSONPair_('index', NumberToJSON_(this.index())));
+ content.push(MakeJSONPair_('receiver', this.receiver().toJSONProtocol(false)));
+ content.push(MakeJSONPair_('func', this.func().toJSONProtocol(false)));
+ content.push(MakeJSONPair_('constructCall', BooleanToJSON_(this.isConstructCall())));
+ content.push(MakeJSONPair_('debuggerFrame', BooleanToJSON_(this.isDebuggerFrame())));
+ var x = new Array(this.argumentCount());
+ for (var i = 0; i < this.argumentCount(); i++) {
+ arg = new Array();
+ var argument_name = this.argumentName(i)
+ if (argument_name) {
+ arg.push(MakeJSONPair_('name', StringToJSON_(argument_name)));
+ }
+ arg.push(MakeJSONPair_('value', this.argumentValue(i).toJSONProtocol(false)));
+ x[i] = ArrayToJSONObject_(arg);
+ }
+ content.push(MakeJSONPair_('arguments', ArrayToJSONArray_(x)));
+ var x = new Array(this.localCount());
+ for (var i = 0; i < this.localCount(); i++) {
+ var name = MakeJSONPair_('name', StringToJSON_(this.localName(i)));
+ var value = MakeJSONPair_('value', this.localValue(i).toJSONProtocol(false));
+ x[i] = '{' + name + ',' + value + '}';
+ }
+ content.push(MakeJSONPair_('locals', ArrayToJSONArray_(x)));
+ content.push(MakeJSONPair_('position', NumberToJSON_(this.sourcePosition())));
+ var line = this.sourceLine();
+ if (!IS_UNDEFINED(line)) {
+ content.push(MakeJSONPair_('line', NumberToJSON_(line)));
+ }
+ var column = this.sourceColumn();
+ if (!IS_UNDEFINED(column)) {
+ content.push(MakeJSONPair_('column', NumberToJSON_(column)));
+ }
+ var source_line_text = this.sourceLineText();
+ if (!IS_UNDEFINED(source_line_text)) {
+ content.push(MakeJSONPair_('sourceLineText', StringToJSON_(source_line_text)));
+ }
+}
+
+
+FrameMirror.prototype.invocationText = function() {
+ // Format frame invoaction (receiver, function and arguments).
+ var result = '';
+ var func = this.func();
+ var receiver = this.receiver();
+ if (this.isConstructCall()) {
+ // For constructor frames display new followed by the function name.
+ result += 'new ';
+ result += func.name() ? func.name() : '[anonymous]';
+ } else if (this.isDebuggerFrame()) {
+ result += '[debugger]';
+ } else {
+ // If the receiver has a className which is 'global' don't display it.
+ var display_receiver = !receiver.className || receiver.className() != 'global';
+ if (display_receiver) {
+ result += receiver.toText();
+ }
+ // Try to find the function as a property in the receiver. Include the
+ // prototype chain in the lookup.
+ var property = new UndefinedMirror();
+ if (!receiver.isUndefined()) {
+ for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) {
+ property = r.lookupProperty(func);
+ }
+ }
+ if (!property.isUndefined()) {
+ // The function invoked was found on the receiver. Use the property name
+ // for the backtrace.
+ if (!property.isIndexed()) {
+ if (display_receiver) {
+ result += '.';
+ }
+ result += property.name();
+ } else {
+ result += '[';
+ result += property.name();
+ result += ']';
+ }
+ // Also known as - if the name in the function doesn't match the name
+ // under which it was looked up.
+ if (func.name() && func.name() != property.name()) {
+ result += '(aka ' + func.name() + ')';
+ }
+ } else {
+ // The function invoked was not found on the receiver. Use the function
+ // name if available for the backtrace.
+ if (display_receiver) {
+ result += '.';
+ }
+ result += func.name() ? func.name() : '[anonymous]';
+ }
+ }
+
+ // Render arguments for normal frames.
+ if (!this.isDebuggerFrame()) {
+ result += '(';
+ for (var i = 0; i < this.argumentCount(); i++) {
+ if (i != 0) result += ', ';
+ if (this.argumentName(i)) {
+ result += this.argumentName(i);
+ result += '=';
+ }
+ result += this.argumentValue(i).toText();
+ }
+ result += ')';
+ }
+
+ return result;
+}
+
+
+FrameMirror.prototype.sourceAndPositionText = function() {
+ // Format source and position.
+ var result = '';
+ var func = this.func();
+ if (func.resolved()) {
+ if (func.script()) {
+ if (func.script().name()) {
+ result += func.script().name();
+ } else {
+ result += '[unnamed]';
+ }
+ if (!this.isDebuggerFrame()) {
+ var location = this.sourceLocation();
+ result += ' line ';
+ result += !IS_UNDEFINED(location) ? (location.line + 1) : '?';
+ result += ' column ';
+ result += !IS_UNDEFINED(location) ? (location.column + 1) : '?';
+ if (!IS_UNDEFINED(this.sourcePosition())) {
+ result += ' (position ' + (this.sourcePosition() + 1) + ')';
+ }
+ }
+ } else {
+ result += '[no source]';
+ }
+ } else {
+ result += '[unresolved]';
+ }
+
+ return result;
+}
+
+
+FrameMirror.prototype.localsText = function() {
+ // Format local variables.
+ var result = '';
+ var locals_count = this.localCount()
+ if (locals_count > 0) {
+ for (var i = 0; i < locals_count; ++i) {
+ result += ' var ';
+ result += this.localName(i);
+ result += ' = ';
+ result += this.localValue(i).toText();
+ if (i < locals_count - 1) result += '\n';
+ }
+ }
+
+ return result;
+}
+
+
+FrameMirror.prototype.toText = function(opt_locals) {
+ var result = '';
+ result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
+ result += ' ';
+ result += this.invocationText();
+ result += ' ';
+ result += this.sourceAndPositionText();
+ if (opt_locals) {
+ result += '\n';
+ result += this.localsText();
+ }
+ return result;
+}
+
+
+/**
+ * Mirror object for script source.
+ * @param {Script} script The script object
+ * @constructor
+ * @extends Mirror
+ */
+function ScriptMirror(script) {
+ Mirror.call(this, SCRIPT_TYPE);
+ this.script_ = script;
+};
+inherits(ScriptMirror, Mirror);
+
+
+ScriptMirror.prototype.name = function() {
+ return this.script_.name;
+};
+
+
+ScriptMirror.prototype.lineOffset = function() {
+ return this.script_.line_offset;
+};
+
+
+ScriptMirror.prototype.columnOffset = function() {
+ return this.script_.column_offset;
+};
+
+
+ScriptMirror.prototype.scriptType = function() {
+ return this.script_.type;
+};
+
+
+ScriptMirror.prototype.lineCount = function() {
+ return this.script_.lineCount();
+};
+
+
+ScriptMirror.prototype.locationFromPosition = function(position) {
+ return this.script_.locationFromPosition(position);
+}
+
+
+ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
+ return this.script_.sourceSlice(opt_from_line, opt_to_line);
+}
+
+
+ScriptMirror.prototype.fillJSON_ = function(content, details) {
+ ScriptMirror.super_.fillJSONType_.call(this, content);
+ if (this.name()) {
+ content.push(MakeJSONPair_('name', StringToJSON_(this.name())));
+ }
+ content.push(MakeJSONPair_('lineOffset', NumberToJSON_(this.lineOffset())));
+ content.push(MakeJSONPair_('columnOffset', NumberToJSON_(this.columnOffset())));
+ content.push(MakeJSONPair_('lineCount', NumberToJSON_(this.lineCount())));
+ content.push(MakeJSONPair_('scriptType', NumberToJSON_(this.scriptType())));
+}
+
+
+ScriptMirror.prototype.toText = function() {
+ var result = '';
+ result += this.name();
+ result += ' (lines: ';
+ if (this.lineOffset() > 0) {
+ result += this.lineOffset();
+ result += '-';
+ result += this.lineOffset() + this.lineCount() - 1;
+ } else {
+ result += this.lineCount();
+ }
+ result += ')';
+ return result;
+}
+
+
+function MakeJSONPair_(name, value) {
+ return '"' + name + '":' + value;
+};
+
+
+function ArrayToJSONObject_(content) {
+ return '{' + content.join(',') + '}';
+};
+
+
+function ArrayToJSONArray_(content) {
+ return '[' + content.join(',') + ']';
+};
+
+
+function BooleanToJSON_(value) {
+ return String(value);
+};
+
+
+function NumberToJSON_(value) {
+ return String(value);
+};
+
+
+// Mapping of some control characters to avoid the \uXXXX syntax for most
+// commonly used control cahracters.
+const ctrlCharMap_ = {
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '"' : '\\"',
+ '\\': '\\\\'
+};
+
+
+// Regular expression testing for ", \ and control characters (0x00 - 0x1F).
+const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
+
+
+// Regular expression matching ", \ and control characters (0x00 - 0x1F)
+// globally.
+const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
+
+
+/**
+ * Convert a String to its JSON representation (see http://www.json.org/). To
+ * avoid depending on the String object this method calls the functions in
+ * string.js directly and not through the value.
+ * @param {String} value The String value to format as JSON
+ * @return {string} JSON formatted String value
+ */
+function StringToJSON_(value) {
+ // Check for" , \ and control characters (0x00 - 0x1F). No need to call
+ // RegExpTest as ctrlchar is constructed using RegExp.
+ if (ctrlCharTest_.test(value)) {
+ // Replace ", \ and control characters (0x00 - 0x1F).
+ return '"' +
+ value.replace(ctrlCharMatch_, function (char) {
+ // Use charmap if possible.
+ var mapped = ctrlCharMap_[char];
+ if (mapped) return mapped;
+ mapped = char.charCodeAt();
+ // Convert control character to unicode escape sequence.
+ return '\\u00' +
+ %NumberToRadixString(Math.floor(mapped / 16), 16) +
+ %NumberToRadixString(mapped % 16, 16);
+ })
+ + '"';
+ }
+
+ // Simple string with no special characters.
+ return '"' + value + '"';
+};
+
+
+/**
+ * Convert a Date to ISO 8601 format. To avoid depending on the Date object
+ * this method calls the functions in date.js directly and not through the
+ * value.
+ * @param {Date} value The Date value to format as JSON
+ * @return {string} JSON formatted Date value
+ */
+function DateToISO8601_(value) {
+ function f(n) {
+ return n < 10 ? '0' + n : n;
+ }
+ function g(n) {
+ return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
+ }
+ return builtins.GetUTCFullYearFrom(value) + '-' +
+ f(builtins.GetUTCMonthFrom(value) + 1) + '-' +
+ f(builtins.GetUTCDateFrom(value)) + 'T' +
+ f(builtins.GetUTCHoursFrom(value)) + ':' +
+ f(builtins.GetUTCMinutesFrom(value)) + ':' +
+ f(builtins.GetUTCSecondsFrom(value)) + '.' +
+ g(builtins.GetUTCMillisecondsFrom(value)) + 'Z';
+};
+
+/**
+ * Convert a Date to ISO 8601 format. To avoid depending on the Date object
+ * this method calls the functions in date.js directly and not through the
+ * value.
+ * @param {Date} value The Date value to format as JSON
+ * @return {string} JSON formatted Date value
+ */
+function DateToJSON_(value) {
+ return '"' + DateToISO8601_(value) + '"';
+};
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <signal.h>
+#include <string>
+#include <map>
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "natives.h"
+#include "platform.h"
+#include "serialize.h"
+
+DEFINE_bool(h, false, "print this message");
+
+namespace v8 { namespace internal {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ DECLARE_bool(log_code);
+#endif
+} }
+
+// use explicit namespace to avoid clashing with types in namespace v8
+namespace i = v8::internal;
+using namespace v8;
+
+static const unsigned int kMaxCounters = 256;
+
+// A single counter in a counter collection.
+class Counter {
+ public:
+ static const int kMaxNameSize = 64;
+ int32_t* Bind(const wchar_t* name) {
+ int i;
+ for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
+ name_[i] = static_cast<char>(name[i]);
+ }
+ name_[i] = '\0';
+ return &counter_;
+ }
+ private:
+ int32_t counter_;
+ uint8_t name_[kMaxNameSize];
+};
+
+
+// A set of counters and associated information. An instance of this
+// class is stored directly in the memory-mapped counters file if
+// the --save-counters options is used
+class CounterCollection {
+ public:
+ CounterCollection() {
+ magic_number_ = 0xDEADFACE;
+ max_counters_ = kMaxCounters;
+ max_name_size_ = Counter::kMaxNameSize;
+ counters_in_use_ = 0;
+ }
+ Counter* GetNextCounter() {
+ if (counters_in_use_ == kMaxCounters) return NULL;
+ return &counters_[counters_in_use_++];
+ }
+ private:
+ uint32_t magic_number_;
+ uint32_t max_counters_;
+ uint32_t max_name_size_;
+ uint32_t counters_in_use_;
+ Counter counters_[kMaxCounters];
+};
+
+
+// We statically allocate a set of local counters to be used if we
+// don't want to store the stats in a memory-mapped file
+static CounterCollection local_counters;
+static CounterCollection* counters = &local_counters;
+
+
+typedef std::map<std::wstring, int*> CounterMap;
+typedef std::map<std::wstring, int*>::iterator CounterMapIterator;
+static CounterMap counter_table_;
+
+// Callback receiver when v8 has a counter to track.
+static int* counter_callback(const wchar_t* name) {
+ std::wstring counter = name;
+ // See if this counter name is already known.
+ if (counter_table_.find(counter) != counter_table_.end())
+ return counter_table_[counter];
+
+ Counter* ctr = counters->GetNextCounter();
+ if (ctr == NULL) return NULL;
+ int* ptr = ctr->Bind(name);
+ counter_table_[counter] = ptr;
+ return ptr;
+}
+
+
+// Write C++ code that defines Snapshot::snapshot_ to contain the snapshot
+// to the file given by filename. Only the first size chars are written.
+static int WriteInternalSnapshotToFile(const char* filename,
+ const char* str,
+ int size) {
+ FILE* f = fopen(filename, "wb");
+ if (f == NULL) {
+ i::OS::PrintError("Cannot open file %s for reading.\n", filename);
+ return 0;
+ }
+ fprintf(f, "// Autogenerated snapshot file. Do not edit.\n\n");
+ fprintf(f, "#include \"v8.h\"\n");
+ fprintf(f, "#include \"platform.h\"\n\n");
+ fprintf(f, "#include \"snapshot.h\"\n\n");
+ fprintf(f, "namespace v8 {\nnamespace internal {\n\n");
+ fprintf(f, "char Snapshot::data_[] = {");
+ int written = 0;
+ written += fprintf(f, "%i", str[0]);
+ for (int i = 1; i < size; ++i) {
+ written += fprintf(f, ",%i", str[i]);
+ // The following is needed to keep the line length low on Visual C++:
+ if (i % 512 == 0) fprintf(f, "\n");
+ }
+ fprintf(f, "};\n\n");
+ fprintf(f, "int Snapshot::size_ = %d;\n\n", size);
+ fprintf(f, "} } // namespace v8::internal\n");
+ fclose(f);
+ return written;
+}
+
+
+int main(int argc, char** argv) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // By default, log code create information in the snapshot.
+ i::FLAG_log_code = true;
+#endif
+ // Print the usage if an error occurs when parsing the command line
+ // flags or if the help flag is set.
+ int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ if (result > 0 || argc != 2 || FLAG_h) {
+ ::printf("Usage: %s [flag] ... outfile\n", argv[0]);
+ i::FlagList::Print(NULL, false);
+ return !FLAG_h;
+ }
+
+ v8::V8::SetCounterFunction(counter_callback);
+ v8::HandleScope scope;
+
+ const int kExtensionCount = 5;
+ const char* extension_list[kExtensionCount] = { "v8/print",
+ "v8/load",
+ "v8/quit",
+ "v8/version",
+ "v8/gc" };
+ v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
+ v8::Context::New(&extensions);
+
+ // TODO(1247464): Cache delayed scripts.
+ // Get rid of unreferenced scripts.
+ i::Heap::CollectGarbage(0, i::OLD_SPACE);
+ i::Serializer ser;
+ ser.Serialize();
+ char* str;
+ int len;
+ ser.Finalize(&str, &len);
+
+ WriteInternalSnapshotToFile(argv[1], str, len);
+
+ i::DeleteArray(str);
+
+ return 0;
+}
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_NATIVES_H_
+#define V8_NATIVES_H_
+
+namespace v8 { namespace internal {
+
+typedef bool (*NativeSourceCallback)(Vector<const char> name,
+ Vector<const char> source,
+ int index);
+
+class Natives {
+ public:
+ // Number of built-in scripts.
+ static int GetBuiltinsCount();
+ // Number of delayed/lazy loading scripts.
+ static int GetDelayCount();
+
+ // These are used to access built-in scripts.
+ // The delayed script has an index in the interval [0, GetDelayCount()).
+ // The non-delayed script has an index in the interval
+ // [GetDelayCount(), GetNativesCount()).
+ static int GetIndex(const char* name);
+ static Vector<const char> GetScriptSource(int index);
+ static Vector<const char> GetScriptName(int index);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_NATIVES_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "disasm.h"
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+
+static const char* TypeToString(InstanceType type);
+
+
+void Object::Print() {
+ if (IsSmi()) {
+ Smi::cast(this)->SmiPrint();
+ } else if (IsFailure()) {
+ Failure::cast(this)->FailurePrint();
+ } else {
+ HeapObject::cast(this)->HeapObjectPrint();
+ }
+ Flush();
+}
+
+
+void Object::PrintLn() {
+ Print();
+ PrintF("\n");
+}
+
+
+void Object::Verify() {
+ if (IsSmi()) {
+ Smi::cast(this)->SmiVerify();
+ } else if (IsFailure()) {
+ Failure::cast(this)->FailureVerify();
+ } else {
+ HeapObject::cast(this)->HeapObjectVerify();
+ }
+}
+
+
+void Object::VerifyPointer(Object* p) {
+ if (p->IsHeapObject()) {
+ HeapObject::VerifyHeapPointer(p);
+ } else {
+ ASSERT(p->IsSmi());
+ }
+}
+
+
+void Smi::SmiVerify() {
+ ASSERT(IsSmi());
+}
+
+
+void Failure::FailureVerify() {
+ ASSERT(IsFailure());
+}
+
+
+void HeapObject::PrintHeader(const char* id) {
+ PrintF("%p: [%s]\n", this, id);
+}
+
+
+void HeapObject::HeapObjectPrint() {
+ InstanceType instance_type = map()->instance_type();
+
+ HandleScope scope;
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ String::cast(this)->StringPrint();
+ return;
+ }
+
+ switch (instance_type) {
+ case MAP_TYPE:
+ Map::cast(this)->MapPrint();
+ break;
+ case HEAP_NUMBER_TYPE:
+ HeapNumber::cast(this)->HeapNumberPrint();
+ break;
+ case FIXED_ARRAY_TYPE:
+ FixedArray::cast(this)->FixedArrayPrint();
+ break;
+ case BYTE_ARRAY_TYPE:
+ ByteArray::cast(this)->ByteArrayPrint();
+ break;
+ case FILLER_TYPE:
+ PrintF("filler");
+ break;
+ case JS_OBJECT_TYPE: // fall through
+ case JS_ARRAY_TYPE:
+ JSObject::cast(this)->JSObjectPrint();
+ break;
+ case ODDBALL_TYPE:
+ Oddball::cast(this)->to_string()->Print();
+ break;
+ case JS_FUNCTION_TYPE:
+ JSFunction::cast(this)->JSFunctionPrint();
+ break;
+ case JS_GLOBAL_OBJECT_TYPE:
+ JSGlobalObject::cast(this)->JSGlobalObjectPrint();
+ break;
+ case JS_BUILTINS_OBJECT_TYPE:
+ JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint();
+ break;
+ case JS_VALUE_TYPE:
+ JSValue::cast(this)->value()->Print();
+ break;
+ case CODE_TYPE:
+ Code::cast(this)->CodePrint();
+ break;
+ case PROXY_TYPE:
+ Proxy::cast(this)->ProxyPrint();
+ break;
+ case SHARED_FUNCTION_INFO_TYPE:
+ SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint();
+ break;
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ Name::cast(this)->Name##Print(); \
+ break;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+
+ default:
+ PrintF("UNKNOWN TYPE %d", map()->instance_type());
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void HeapObject::HeapObjectVerify() {
+ InstanceType instance_type = map()->instance_type();
+
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ String::cast(this)->StringVerify();
+ return;
+ }
+
+ switch (instance_type) {
+ case MAP_TYPE:
+ Map::cast(this)->MapVerify();
+ break;
+ case HEAP_NUMBER_TYPE:
+ HeapNumber::cast(this)->HeapNumberVerify();
+ break;
+ case FIXED_ARRAY_TYPE:
+ FixedArray::cast(this)->FixedArrayVerify();
+ break;
+ case BYTE_ARRAY_TYPE:
+ ByteArray::cast(this)->ByteArrayVerify();
+ break;
+ case CODE_TYPE:
+ Code::cast(this)->CodeVerify();
+ break;
+ case ODDBALL_TYPE:
+ Oddball::cast(this)->OddballVerify();
+ break;
+ case JS_OBJECT_TYPE:
+ JSObject::cast(this)->JSObjectVerify();
+ break;
+ case JS_VALUE_TYPE:
+ JSValue::cast(this)->JSValueVerify();
+ break;
+ case JS_FUNCTION_TYPE:
+ JSFunction::cast(this)->JSFunctionVerify();
+ break;
+ case JS_GLOBAL_OBJECT_TYPE:
+ JSGlobalObject::cast(this)->JSGlobalObjectVerify();
+ break;
+ case JS_BUILTINS_OBJECT_TYPE:
+ JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
+ break;
+ case JS_ARRAY_TYPE:
+ JSArray::cast(this)->JSArrayVerify();
+ break;
+ case FILLER_TYPE:
+ break;
+ case PROXY_TYPE:
+ Proxy::cast(this)->ProxyVerify();
+ break;
+ case SHARED_FUNCTION_INFO_TYPE:
+ SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
+ break;
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ Name::cast(this)->Name##Verify(); \
+ break;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void HeapObject::VerifyHeapPointer(Object* p) {
+ ASSERT(p->IsHeapObject());
+ ASSERT(Heap::Contains(HeapObject::cast(p)));
+}
+
+
+void HeapNumber::HeapNumberVerify() {
+ ASSERT(IsHeapNumber());
+}
+
+
+void ByteArray::ByteArrayPrint() {
+ PrintF("byte array, data starts at %p", GetDataStartAddress());
+}
+
+
+void ByteArray::ByteArrayVerify() {
+ ASSERT(IsByteArray());
+}
+
+
+void JSObject::PrintProperties() {
+ if (HasFastProperties()) {
+ for (DescriptorReader r(map()->instance_descriptors());
+ !r.eos();
+ r.advance()) {
+ PrintF(" ");
+ r.GetKey()->StringPrint();
+ PrintF(": ");
+ if (r.type() == FIELD) {
+ properties()->get(r.GetFieldIndex())->ShortPrint();
+ PrintF(" (field at offset %d)\n", r.GetFieldIndex());
+ } else if (r.type() == CONSTANT_FUNCTION) {
+ r.GetConstantFunction()->ShortPrint();
+ PrintF(" (constant function)\n");
+ } else if (r.type() == CALLBACKS) {
+ r.GetCallbacksObject()->ShortPrint();
+ PrintF(" (callback)\n");
+ } else if (r.type() == MAP_TRANSITION) {
+ PrintF(" (map transition)\n");
+ } else {
+ UNREACHABLE();
+ }
+ }
+ } else {
+ property_dictionary()->Print();
+ }
+}
+
+
+void JSObject::PrintElements() {
+ if (HasFastElements()) {
+ FixedArray* p = FixedArray::cast(elements());
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(" %d: ", i);
+ p->get(i)->ShortPrint();
+ PrintF("\n");
+ }
+ } else {
+ elements()->Print();
+ }
+}
+
+
+void JSObject::JSObjectPrint() {
+ PrintF("%p: [JSObject]\n", this);
+ PrintF(" - map = %p\n", map());
+ PrintF(" - prototype = %p\n", GetPrototype());
+ PrintF(" {\n");
+ PrintProperties();
+ PrintElements();
+ PrintF(" }\n");
+}
+
+
+void JSObject::JSObjectVerify() {
+ VerifyHeapPointer(properties());
+ VerifyHeapPointer(elements());
+ if (HasFastProperties()) {
+ CHECK(map()->unused_property_fields() ==
+ (properties()->length() - map()->NextFreePropertyIndex()));
+ }
+}
+
+
+static const char* TypeToString(InstanceType type) {
+ switch (type) {
+ case MAP_TYPE: return "MAP";
+ case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
+ case SHORT_SYMBOL_TYPE:
+ case MEDIUM_SYMBOL_TYPE:
+ case LONG_SYMBOL_TYPE: return "SYMBOL";
+ case SHORT_ASCII_SYMBOL_TYPE:
+ case MEDIUM_ASCII_SYMBOL_TYPE:
+ case LONG_ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
+ case SHORT_SLICED_SYMBOL_TYPE:
+ case MEDIUM_SLICED_SYMBOL_TYPE:
+ case LONG_SLICED_SYMBOL_TYPE: return "SLICED_SYMBOL";
+ case SHORT_SLICED_ASCII_SYMBOL_TYPE:
+ case MEDIUM_SLICED_ASCII_SYMBOL_TYPE:
+ case LONG_SLICED_ASCII_SYMBOL_TYPE: return "SLICED_ASCII_SYMBOL";
+ case SHORT_CONS_SYMBOL_TYPE:
+ case MEDIUM_CONS_SYMBOL_TYPE:
+ case LONG_CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
+ case SHORT_CONS_ASCII_SYMBOL_TYPE:
+ case MEDIUM_CONS_ASCII_SYMBOL_TYPE:
+ case LONG_CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
+ case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
+ case MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE:
+ case LONG_EXTERNAL_ASCII_SYMBOL_TYPE:
+ case SHORT_EXTERNAL_SYMBOL_TYPE:
+ case MEDIUM_EXTERNAL_SYMBOL_TYPE:
+ case LONG_EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+ case SHORT_ASCII_STRING_TYPE:
+ case MEDIUM_ASCII_STRING_TYPE:
+ case LONG_ASCII_STRING_TYPE: return "ASCII_STRING";
+ case SHORT_STRING_TYPE:
+ case MEDIUM_STRING_TYPE:
+ case LONG_STRING_TYPE: return "TWO_BYTE_STRING";
+ case SHORT_CONS_STRING_TYPE:
+ case MEDIUM_CONS_STRING_TYPE:
+ case LONG_CONS_STRING_TYPE:
+ case SHORT_CONS_ASCII_STRING_TYPE:
+ case MEDIUM_CONS_ASCII_STRING_TYPE:
+ case LONG_CONS_ASCII_STRING_TYPE: return "CONS_STRING";
+ case SHORT_SLICED_STRING_TYPE:
+ case MEDIUM_SLICED_STRING_TYPE:
+ case LONG_SLICED_STRING_TYPE:
+ case SHORT_SLICED_ASCII_STRING_TYPE:
+ case MEDIUM_SLICED_ASCII_STRING_TYPE:
+ case LONG_SLICED_ASCII_STRING_TYPE: return "SLICED_STRING";
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case MEDIUM_EXTERNAL_ASCII_STRING_TYPE:
+ case LONG_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case MEDIUM_EXTERNAL_STRING_TYPE:
+ case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+ case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
+ case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+ case FILLER_TYPE: return "FILLER";
+ case JS_OBJECT_TYPE: return "JS_OBJECT";
+ case ODDBALL_TYPE: return "ODDBALL";
+ case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
+ case JS_FUNCTION_TYPE: return "JS_FUNCTION";
+ case CODE_TYPE: return "CODE";
+ case JS_ARRAY_TYPE: return "JS_ARRAY";
+ case JS_VALUE_TYPE: return "JS_VALUE";
+ case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
+ case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
+ case PROXY_TYPE: return "PROXY";
+ case SMI_TYPE: return "SMI";
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ }
+ return "UNKNOWN";
+}
+
+
+void Map::MapPrint() {
+ HeapObject::PrintHeader("Map");
+ PrintF(" - type: %s\n", TypeToString(instance_type()));
+ PrintF(" - instance size: %d\n", instance_size());
+ PrintF(" - unused property fields: %d\n", unused_property_fields());
+ PrintF(" - instance descriptors: ");
+ instance_descriptors()->ShortPrint();
+ PrintF("\n - prototype: ");
+ prototype()->ShortPrint();
+ PrintF("\n - constructor: ");
+ constructor()->ShortPrint();
+ PrintF("\n");
+}
+
+
+void Map::MapVerify() {
+ ASSERT(!Heap::InNewSpace(this));
+ ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
+ ASSERT(kPointerSize <= instance_size()
+ && instance_size() < Heap::Capacity());
+ VerifyHeapPointer(prototype());
+ VerifyHeapPointer(instance_descriptors());
+}
+
+
+void FixedArray::FixedArrayPrint() {
+ HeapObject::PrintHeader("FixedArray");
+ PrintF(" - length: %d", length());
+ for (int i = 0; i < length(); i++) {
+ PrintF("\n [%d]: ", i);
+ get(i)->ShortPrint();
+ }
+ PrintF("\n");
+}
+
+
+void FixedArray::FixedArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ if (e->IsHeapObject()) {
+ VerifyHeapPointer(e);
+ } else {
+ e->Verify();
+ }
+ }
+}
+
+
+void JSValue::JSValuePrint() {
+ HeapObject::PrintHeader("ValueObject");
+ value()->Print();
+}
+
+
+void JSValue::JSValueVerify() {
+ Object* v = value();
+ if (v->IsHeapObject()) {
+ VerifyHeapPointer(v);
+ }
+}
+
+
+void String::StringPrint() {
+ if (IsSymbol()) {
+ PrintF("#");
+ } else if (IsConsString()) {
+ PrintF("c\"");
+ } else {
+ PrintF("\"");
+ }
+
+ for (int i = 0; i < length(); i++) {
+ PrintF("%c", Get(i));
+ }
+
+ if (!IsSymbol()) PrintF("\"");
+}
+
+
+void String::StringVerify() {
+ CHECK(IsString());
+ CHECK(length() >= 0 && length() <= Smi::kMaxValue);
+ if (IsSymbol()) {
+ CHECK(!Heap::InNewSpace(this));
+ }
+}
+
+
+void JSFunction::JSFunctionPrint() {
+ HeapObject::PrintHeader("Function");
+ PrintF(" - map = 0x%p\n", map());
+ PrintF(" - is boilerplate: %s\n", IsBoilerplate() ? "yes" : "no");
+ PrintF(" - initial_map = ");
+ if (has_initial_map()) {
+ initial_map()->ShortPrint();
+ }
+ PrintF("\n - shared_info = ");
+ shared()->ShortPrint();
+ PrintF("\n - name = ");
+ shared()->name()->Print();
+ PrintF("\n - context = ");
+ unchecked_context()->ShortPrint();
+ PrintF("\n - code = ");
+ code()->ShortPrint();
+ PrintF("\n");
+
+ PrintProperties();
+ PrintElements();
+
+ PrintF("\n");
+}
+
+
+void JSFunction::JSFunctionVerify() {
+ CHECK(IsJSFunction());
+ VerifyObjectField(kPrototypeOrInitialMapOffset);
+}
+
+
+void SharedFunctionInfo::SharedFunctionInfoPrint() {
+ HeapObject::PrintHeader("SharedFunctionInfo");
+ PrintF(" - name: ");
+ name()->ShortPrint();
+ PrintF("\n - expected_nof_properties: %d", expected_nof_properties());
+ PrintF("\n - instance class name =");
+ instance_class_name()->Print();
+ PrintF("\n - code =");
+ code()->ShortPrint();
+ PrintF("\n - source code =");
+ GetSourceCode()->ShortPrint();
+ PrintF("\n - lazy load: %s",
+ lazy_load_data() == Heap::undefined_value() ? "no" : "yes");
+ // Script files are often large, hard to read.
+ // PrintF("\n - script =");
+ // script()->Print();
+ PrintF("\n - function token position = %d", function_token_position());
+ PrintF("\n - start position = %d", start_position());
+ PrintF("\n - end position = %d", end_position());
+ PrintF("\n - is expression = %d", is_expression());
+ PrintF("\n - debug info = ");
+ debug_info()->Print();
+ PrintF("\n - length = %d", length());
+ PrintF("\n");
+}
+
+void SharedFunctionInfo::SharedFunctionInfoVerify() {
+ CHECK(IsSharedFunctionInfo());
+ VerifyObjectField(kNameOffset);
+ VerifyObjectField(kCodeOffset);
+ VerifyObjectField(kInstanceClassNameOffset);
+ VerifyObjectField(kExternalReferenceDataOffset);
+ VerifyObjectField(kLazyLoadDataOffset);
+ VerifyObjectField(kScriptOffset);
+ VerifyObjectField(kDebugInfoOffset);
+}
+
+
+void JSGlobalObject::JSGlobalObjectPrint() {
+ PrintF("global ");
+ JSObjectPrint();
+}
+
+
+void JSGlobalObject::JSGlobalObjectVerify() {
+ CHECK(IsJSGlobalObject());
+ JSObjectVerify();
+ for (int i = GlobalObject::kBuiltinsOffset;
+ i < JSGlobalObject::kSize;
+ i += kPointerSize) {
+ VerifyObjectField(i);
+ }
+}
+
+
+void JSBuiltinsObject::JSBuiltinsObjectPrint() {
+ PrintF("builtins ");
+ JSObjectPrint();
+}
+
+
+void JSBuiltinsObject::JSBuiltinsObjectVerify() {
+ CHECK(IsJSBuiltinsObject());
+ JSObjectVerify();
+ for (int i = GlobalObject::kBuiltinsOffset;
+ i < JSBuiltinsObject::kSize;
+ i += kPointerSize) {
+ VerifyObjectField(i);
+ }
+}
+
+
+void Oddball::OddballVerify() {
+ CHECK(IsOddball());
+ VerifyHeapPointer(to_string());
+ Object* number = to_number();
+ if (number->IsHeapObject()) {
+ ASSERT(number == Heap::nan_value());
+ } else {
+ ASSERT(number->IsSmi());
+ int value = Smi::cast(number)->value();
+ ASSERT(value == 0 || value == 1 || value == -1);
+ }
+}
+
+
+const char* Code::Kind2String(Kind kind) {
+ switch (kind) {
+ case FUNCTION: return "FUNCTION";
+ case STUB: return "STUB";
+ case BUILTIN: return "BUILTIN";
+ case LOAD_IC: return "LOAD_IC";
+ case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
+ case STORE_IC: return "STORE_IC";
+ case KEYED_STORE_IC: return "KEYED_STORE_IC";
+ case CALL_IC: return "CALL_IC";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Code::CodePrint() {
+ HeapObject::PrintHeader("Code");
+ PrintF("kind = %s", Kind2String(kind()));
+
+ PrintF("\nInstructions (size = %d)\n", instruction_size());
+ Disassembler::Decode(NULL /*use PrintF*/, this);
+ PrintF("\n");
+
+ PrintF("RelocInfo (size = %d)\n", relocation_size());
+ for (RelocIterator it(this); !it.done(); it.next())
+ it.rinfo()->Print();
+ PrintF("\n");
+}
+
+
+void Code::CodeVerify() {
+ ASSERT(ic_flag() == IC_TARGET_IS_ADDRESS);
+ for (RelocIterator it(this); !it.done(); it.next()) {
+ it.rinfo()->Verify();
+ }
+}
+
+
+void JSArray::JSArrayVerify() {
+ JSObjectVerify();
+ ASSERT(length()->IsNumber() || length()->IsUndefined());
+ ASSERT(elements()->IsUndefined() || elements()->IsFixedArray());
+}
+
+
+void Proxy::ProxyPrint() {
+ PrintF("proxy to %p", proxy());
+}
+
+
+void Proxy::ProxyVerify() {
+ ASSERT(IsProxy());
+}
+
+
+void Dictionary::Print() {
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ PrintF(" ");
+ if (k->IsString()) {
+ String::cast(k)->StringPrint();
+ } else {
+ k->ShortPrint();
+ }
+ PrintF(": ");
+ ValueAt(i)->ShortPrint();
+ PrintF("\n");
+ }
+ }
+}
+
+
+void AccessorInfo::AccessorInfoVerify() {
+ CHECK(IsAccessorInfo());
+ VerifyPointer(getter());
+ VerifyPointer(setter());
+ VerifyPointer(name());
+ VerifyPointer(data());
+ VerifyPointer(flag());
+}
+
+void AccessorInfo::AccessorInfoPrint() {
+ PrintF("AccessorInfo");
+ PrintF("\n - getter: ");
+ getter()->ShortPrint();
+ PrintF("\n - setter: ");
+ setter()->ShortPrint();
+ PrintF("\n - name: ");
+ name()->ShortPrint();
+ PrintF("\n - data: ");
+ data()->ShortPrint();
+ PrintF("\n - flag: ");
+ flag()->ShortPrint();
+}
+
+void AccessCheckInfo::AccessCheckInfoVerify() {
+ CHECK(IsAccessCheckInfo());
+ VerifyPointer(named_callback());
+ VerifyPointer(indexed_callback());
+ VerifyPointer(data());
+}
+
+void AccessCheckInfo::AccessCheckInfoPrint() {
+ PrintF("AccessCheckInfo");
+ PrintF("\n - named_callback: ");
+ named_callback()->ShortPrint();
+ PrintF("\n - indexed_callback: ");
+ indexed_callback()->ShortPrint();
+ PrintF("\n - data: ");
+ data()->ShortPrint();
+}
+
+void InterceptorInfo::InterceptorInfoVerify() {
+ CHECK(IsInterceptorInfo());
+ VerifyPointer(getter());
+ VerifyPointer(setter());
+ VerifyPointer(query());
+ VerifyPointer(deleter());
+ VerifyPointer(enumerator());
+ VerifyPointer(data());
+}
+
+void InterceptorInfo::InterceptorInfoPrint() {
+ PrintF("InterceptorInfo");
+ PrintF("\n - getter: ");
+ getter()->ShortPrint();
+ PrintF("\n - setter: ");
+ setter()->ShortPrint();
+ PrintF("\n - query: ");
+ query()->ShortPrint();
+ PrintF("\n - deleter: ");
+ deleter()->ShortPrint();
+ PrintF("\n - enumerator: ");
+ enumerator()->ShortPrint();
+ PrintF("\n - data: ");
+ data()->ShortPrint();
+}
+
+void CallHandlerInfo::CallHandlerInfoVerify() {
+ CHECK(IsCallHandlerInfo());
+ VerifyPointer(callback());
+ VerifyPointer(data());
+}
+
+void CallHandlerInfo::CallHandlerInfoPrint() {
+ PrintF("CallHandlerInfo");
+ PrintF("\n - callback: ");
+ callback()->ShortPrint();
+ PrintF("\n - data: ");
+ data()->ShortPrint();
+}
+
+void TemplateInfo::TemplateInfoVerify() {
+ VerifyPointer(tag());
+ VerifyPointer(property_list());
+}
+
+void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
+ CHECK(IsFunctionTemplateInfo());
+ TemplateInfoVerify();
+ VerifyPointer(serial_number());
+ VerifyPointer(call_code());
+ VerifyPointer(internal_field_count());
+ VerifyPointer(property_accessors());
+ VerifyPointer(prototype_template());
+ VerifyPointer(parent_template());
+ VerifyPointer(named_property_handler());
+ VerifyPointer(indexed_property_handler());
+ VerifyPointer(instance_template());
+ VerifyPointer(signature());
+ VerifyPointer(access_check_info());
+}
+
+void FunctionTemplateInfo::FunctionTemplateInfoPrint() {
+ PrintF("FunctionTemplateInfo");
+ PrintF("\n - tag: ");
+ tag()->ShortPrint();
+ PrintF("\n - property_list: ");
+ property_list()->ShortPrint();
+ PrintF("\n - serial_number: ");
+ serial_number()->ShortPrint();
+ PrintF("\n - call_code: ");
+ call_code()->ShortPrint();
+ PrintF("\n - internal_field_count: ");
+ internal_field_count()->ShortPrint();
+ PrintF("\n - property_accessors: ");
+ property_accessors()->ShortPrint();
+ PrintF("\n - prototype_template: ");
+ prototype_template()->ShortPrint();
+ PrintF("\n - parent_template: ");
+ parent_template()->ShortPrint();
+ PrintF("\n - named_property_handler: ");
+ named_property_handler()->ShortPrint();
+ PrintF("\n - indexed_property_handler: ");
+ indexed_property_handler()->ShortPrint();
+ PrintF("\n - instance_template: ");
+ instance_template()->ShortPrint();
+ PrintF("\n - signature: ");
+ signature()->ShortPrint();
+ PrintF("\n - access_check_info: ");
+ access_check_info()->ShortPrint();
+ PrintF("\n - hidden_prototype: %s", hidden_prototype() ? "true" : "false");
+ PrintF("\n - undetectable: %s", undetectable() ? "true" : "false");
+ PrintF("\n - need_access_check: %s", needs_access_check() ? "true" : "false");
+}
+
+void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
+ CHECK(IsObjectTemplateInfo());
+ TemplateInfoVerify();
+ VerifyPointer(constructor());
+}
+
+void ObjectTemplateInfo::ObjectTemplateInfoPrint() {
+ PrintF("ObjectTemplateInfo");
+ PrintF("\n - constructor");
+ constructor()->ShortPrint();
+}
+
+void SignatureInfo::SignatureInfoVerify() {
+ CHECK(IsSignatureInfo());
+ VerifyPointer(receiver());
+ VerifyPointer(args());
+}
+
+void SignatureInfo::SignatureInfoPrint() {
+ PrintF("SignatureInfo");
+ PrintF("\n - receiver");
+ receiver()->ShortPrint();
+ PrintF("\n - args");
+ args()->ShortPrint();
+}
+
+void TypeSwitchInfo::TypeSwitchInfoVerify() {
+ CHECK(IsTypeSwitchInfo());
+ VerifyPointer(types());
+}
+
+void TypeSwitchInfo::TypeSwitchInfoPrint() {
+ PrintF("TypeSwitchInfo");
+ PrintF("\n - types");
+ types()->ShortPrint();
+}
+
+
+void Script::ScriptVerify() {
+ CHECK(IsScript());
+ VerifyPointer(source());
+ VerifyPointer(name());
+ line_offset()->SmiVerify();
+ column_offset()->SmiVerify();
+ type()->SmiVerify();
+}
+
+
+void Script::ScriptPrint() {
+ HeapObject::PrintHeader("Script");
+ PrintF("\n - source: ");
+ source()->ShortPrint();
+ PrintF("\n - name: ");
+ name()->ShortPrint();
+ PrintF("\n - line_offset: ");
+ line_offset()->ShortPrint();
+ PrintF("\n - column_offset: ");
+ column_offset()->ShortPrint();
+ PrintF("\n - type: ");
+ type()->ShortPrint();
+ PrintF("\n");
+}
+
+
+void DebugInfo::DebugInfoVerify() {
+ CHECK(IsDebugInfo());
+ VerifyPointer(shared());
+ VerifyPointer(original_code());
+ VerifyPointer(code());
+ VerifyPointer(break_points());
+}
+
+
+void DebugInfo::DebugInfoPrint() {
+ PrintF("DebugInfo");
+ PrintF("\n - shared");
+ shared()->ShortPrint();
+ PrintF("\n - original_code");
+ original_code()->ShortPrint();
+ PrintF("\n - code");
+ code()->ShortPrint();
+ PrintF("\n - break_points");
+ break_points()->ShortPrint();
+}
+
+
+void BreakPointInfo::BreakPointInfoVerify() {
+ CHECK(IsBreakPointInfo());
+ code_position()->SmiVerify();
+ source_position()->SmiVerify();
+ statement_position()->SmiVerify();
+ VerifyPointer(break_point_objects());
+}
+
+
+void BreakPointInfo::BreakPointInfoPrint() {
+ PrintF("BreakPointInfo");
+ PrintF("\n - code_position %d", code_position());
+ PrintF("\n - source_position %d", source_position());
+ PrintF("\n - statement_position %d", statement_position());
+ PrintF("\n - break_point_objects ");
+ break_point_objects()->ShortPrint();
+}
+
+
+void JSObject::IncrementSpillStatistics(SpillInformation* info) {
+ info->number_of_objects_++;
+ // Named properties
+ if (HasFastProperties()) {
+ info->number_of_objects_with_fast_properties_++;
+ info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
+ info->number_of_fast_unused_fields_ += map()->unused_property_fields();
+ } else {
+ Dictionary* dict = property_dictionary();
+ info->number_of_slow_used_properties_ += dict->NumberOfElements();
+ info->number_of_slow_unused_properties_ +=
+ dict->Capacity() - dict->NumberOfElements();
+ }
+ // Indexed properties
+ if (HasFastElements()) {
+ info->number_of_objects_with_fast_elements_++;
+ int holes = 0;
+ FixedArray* e = FixedArray::cast(elements());
+ int len = e->length();
+ for (int i = 0; i < len; i++) {
+ if (e->get(i) == Heap::the_hole_value()) holes++;
+ }
+ info->number_of_fast_used_elements_ += len - holes;
+ info->number_of_fast_unused_elements_ += holes;
+ } else {
+ Dictionary* dict = element_dictionary();
+ info->number_of_slow_used_elements_ += dict->NumberOfElements();
+ info->number_of_slow_unused_elements_ +=
+ dict->Capacity() - dict->NumberOfElements();
+ }
+}
+
+
+void JSObject::SpillInformation::Clear() {
+ number_of_objects_ = 0;
+ number_of_objects_with_fast_properties_ = 0;
+ number_of_objects_with_fast_elements_ = 0;
+ number_of_fast_used_fields_ = 0;
+ number_of_fast_unused_fields_ = 0;
+ number_of_slow_used_properties_ = 0;
+ number_of_slow_unused_properties_ = 0;
+ number_of_fast_used_elements_ = 0;
+ number_of_fast_unused_elements_ = 0;
+ number_of_slow_used_elements_ = 0;
+ number_of_slow_unused_elements_ = 0;
+}
+
+void JSObject::SpillInformation::Print() {
+ PrintF("\n JSObject Spill Statistics (#%d):\n", number_of_objects_);
+
+ PrintF(" - fast properties (#%d): %d (used) %d (unused)\n",
+ number_of_objects_with_fast_properties_,
+ number_of_fast_used_fields_, number_of_fast_unused_fields_);
+
+ PrintF(" - slow properties (#%d): %d (used) %d (unused)\n",
+ number_of_objects_ - number_of_objects_with_fast_properties_,
+ number_of_slow_used_properties_, number_of_slow_unused_properties_);
+
+ PrintF(" - fast elements (#%d): %d (used) %d (unused)\n",
+ number_of_objects_with_fast_elements_,
+ number_of_fast_used_elements_, number_of_fast_unused_elements_);
+
+ PrintF(" - slow elements (#%d): %d (used) %d (unused)\n",
+ number_of_objects_ - number_of_objects_with_fast_elements_,
+ number_of_slow_used_elements_, number_of_slow_unused_elements_);
+
+ PrintF("\n");
+}
+
+
+void DescriptorArray::PrintDescriptors() {
+ PrintF("Descriptor array %d\n", number_of_descriptors());
+ int number = 0;
+ for (DescriptorReader r(this); !r.eos(); r.advance()) {
+ Descriptor desc;
+ r.Get(&desc);
+ PrintF(" %d: ", number++);
+ desc.Print();
+ }
+ PrintF("\n");
+}
+
+
+#endif // DEBUG
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Review notes:
+//
+// - The use of macros in these inline fuctions may seem superfluous
+// but it is absolutely needed to make sure gcc generates optimal
+// code. gcc is not happy when attempting to inline too deep.
+//
+
+#ifndef V8_OBJECTS_INL_H_
+#define V8_OBJECTS_INL_H_
+
+#include "objects.h"
+#include "contexts.h"
+#include "conversions-inl.h"
+#include "property.h"
+
+namespace v8 { namespace internal {
+
+PropertyDetails::PropertyDetails(Smi* smi) {
+ value_ = smi->value();
+}
+
+
+Smi* PropertyDetails::AsSmi() {
+ return Smi::FromInt(value_);
+}
+
+
+#define CAST_ACCESSOR(type) \
+ type* type::cast(Object* object) { \
+ ASSERT(object->Is##type()); \
+ return reinterpret_cast<type*>(object); \
+ }
+
+
+#define INT_ACCESSORS(holder, name, offset) \
+ int holder::name() { return READ_INT_FIELD(this, offset); } \
+ void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
+
+
+#define ACCESSORS(holder, name, type, offset) \
+ type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(type* value) { \
+ WRITE_FIELD(this, offset, value); \
+ WRITE_BARRIER(this, offset); \
+ }
+
+
+#define SMI_ACCESSORS(holder, name, offset) \
+ int holder::name() { \
+ Object* value = READ_FIELD(this, offset); \
+ return Smi::cast(value)->value(); \
+ } \
+ void holder::set_##name(int value) { \
+ WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ }
+
+
+#define BOOL_ACCESSORS(holder, field, name, offset) \
+ bool holder::name() { \
+ return BooleanBit::get(field(), offset); \
+ } \
+ void holder::set_##name(bool value) { \
+ set_##field(BooleanBit::set(field(), offset, value)); \
+ }
+
+
+bool Object::IsSmi() {
+ return HAS_SMI_TAG(this);
+}
+
+
+bool Object::IsHeapObject() {
+ return HAS_HEAP_OBJECT_TAG(this);
+}
+
+
+bool Object::IsHeapNumber() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
+}
+
+
+bool Object::IsString() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
+}
+
+
+bool Object::IsSeqString() {
+ return IsString()
+ && (String::cast(this)->representation_tag() == kSeqStringTag);
+}
+
+
+bool Object::IsAsciiString() {
+ return IsString() && (String::cast(this)->is_ascii());
+}
+
+
+bool Object::IsTwoByteString() {
+ return IsString() && (!String::cast(this)->is_ascii());
+}
+
+
+bool Object::IsConsString() {
+ return IsString()
+ && (String::cast(this)->representation_tag() == kConsStringTag);
+}
+
+
+bool Object::IsSlicedString() {
+ return IsString()
+ && (String::cast(this)->representation_tag() == kSlicedStringTag);
+}
+
+
+bool Object::IsExternalString() {
+ return IsString()
+ && (String::cast(this)->representation_tag() == kExternalStringTag);
+}
+
+
+bool Object::IsExternalAsciiString() {
+ return IsExternalString() && (String::cast(this)->is_ascii());
+}
+
+
+bool Object::IsExternalTwoByteString() {
+ return IsExternalString() && (!String::cast(this)->is_ascii());
+}
+
+
+bool Object::IsShortString() {
+ return IsString() && (String::cast(this)->size_tag() == kShortStringTag);
+}
+
+
+bool Object::IsMediumString() {
+ return IsString() && (String::cast(this)->size_tag() == kMediumStringTag);
+}
+
+
+bool Object::IsLongString() {
+ return IsString() && (String::cast(this)->size_tag() == kLongStringTag);
+}
+
+
+bool Object::IsSymbol() {
+ return IsString() && (String::cast(this)->is_symbol());
+}
+
+
+bool Object::IsNumber() {
+ return IsSmi() || IsHeapNumber();
+}
+
+
+bool Object::IsByteArray() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsFailure() {
+ return HAS_FAILURE_TAG(this);
+}
+
+
+bool Object::IsRetryAfterGC() {
+ return HAS_FAILURE_TAG(this)
+ && Failure::cast(this)->type() == Failure::RETRY_AFTER_GC;
+}
+
+
+bool Object::IsException() {
+ return this == Failure::Exception();
+}
+
+
+bool Object::IsJSObject() {
+ return IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() >= JS_OBJECT_TYPE;
+}
+
+
+bool Object::IsMap() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
+}
+
+
+bool Object::IsFixedArray() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
+}
+
+
+bool Object::IsDescriptorArray() {
+ return IsFixedArray();
+}
+
+
+bool Object::IsContext() {
+ return Object::IsHeapObject()
+ && (HeapObject::cast(this)->map() == Heap::context_map() ||
+ HeapObject::cast(this)->map() == Heap::global_context_map());
+}
+
+
+bool Object::IsGlobalContext() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map() == Heap::global_context_map();
+}
+
+
+bool Object::IsJSFunction() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
+}
+
+
+template <> static inline bool Is<JSFunction>(Object* obj) {
+ return obj->IsJSFunction();
+}
+
+
+bool Object::IsCode() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
+}
+
+
+bool Object::IsOddball() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
+}
+
+
+bool Object::IsSharedFunctionInfo() {
+ return Object::IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() ==
+ SHARED_FUNCTION_INFO_TYPE);
+}
+
+
+bool Object::IsJSValue() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
+}
+
+
+bool Object::IsProxy() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
+}
+
+
+bool Object::IsBoolean() {
+ return IsTrue() || IsFalse();
+}
+
+
+bool Object::IsJSArray() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
+}
+
+
+template <> static inline bool Is<JSArray>(Object* obj) {
+ return obj->IsJSArray();
+}
+
+
+bool Object::IsHashTable() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map() == Heap::hash_table_map();
+}
+
+
+bool Object::IsDictionary() {
+ return IsHashTable() && this != Heap::symbol_table();
+}
+
+
+bool Object::IsSymbolTable() {
+ return IsHashTable() && this == Heap::symbol_table();
+}
+
+
+bool Object::IsPrimitive() {
+ return IsOddball() || IsNumber() || IsString();
+}
+
+
+bool Object::IsGlobalObject() {
+ return IsHeapObject() &&
+ ((HeapObject::cast(this)->map()->instance_type() ==
+ JS_GLOBAL_OBJECT_TYPE) ||
+ (HeapObject::cast(this)->map()->instance_type() ==
+ JS_BUILTINS_OBJECT_TYPE));
+}
+
+
+bool Object::IsJSGlobalObject() {
+#ifdef DEBUG
+ if (IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() ==
+ JS_GLOBAL_OBJECT_TYPE)) {
+ ASSERT(IsAccessCheckNeeded());
+ }
+#endif
+ return IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() ==
+ JS_GLOBAL_OBJECT_TYPE);
+}
+
+
+bool Object::IsJSBuiltinsObject() {
+ return IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() ==
+ JS_BUILTINS_OBJECT_TYPE);
+}
+
+
+bool Object::IsUndetectableObject() {
+ return IsHeapObject()
+ && HeapObject::cast(this)->map()->is_undetectable();
+}
+
+
+bool Object::IsAccessCheckNeeded() {
+ return IsHeapObject()
+ && HeapObject::cast(this)->map()->needs_access_check();
+}
+
+
+bool Object::IsStruct() {
+ if (!IsHeapObject()) return false;
+ switch (HeapObject::cast(this)->map()->instance_type()) {
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ default: return false;
+ }
+}
+
+
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() { \
+ return Object::IsHeapObject() \
+ && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
+ }
+ STRUCT_LIST(MAKE_STRUCT_PREDICATE)
+#undef MAKE_STRUCT_PREDICATE
+
+
+bool Object::IsUndefined() {
+ return this == Heap::undefined_value();
+}
+
+
+bool Object::IsTheHole() {
+ return this == Heap::the_hole_value();
+}
+
+
+bool Object::IsNull() {
+ return this == Heap::null_value();
+}
+
+
+bool Object::IsTrue() {
+ return this == Heap::true_value();
+}
+
+
+bool Object::IsFalse() {
+ return this == Heap::false_value();
+}
+
+
+double Object::Number() {
+ ASSERT(IsNumber());
+ return IsSmi()
+ ? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
+ : reinterpret_cast<HeapNumber*>(this)->value();
+}
+
+
+
+Object* Object::ToSmi() {
+ if (IsSmi()) return this;
+ if (IsHeapNumber()) {
+ double value = HeapNumber::cast(this)->value();
+ int int_value = FastD2I(value);
+ if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+ return Smi::FromInt(int_value);
+ }
+ }
+ return Failure::Exception();
+}
+
+
+Object* Object::GetElement(uint32_t index) {
+ return GetElementWithReceiver(this, index);
+}
+
+
+Object* Object::GetProperty(String* key) {
+ PropertyAttributes attributes;
+ return GetPropertyWithReceiver(this, key, &attributes);
+}
+
+
+Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
+ return GetPropertyWithReceiver(this, key, attributes);
+}
+
+
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+#define READ_FIELD(p, offset) \
+ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FIELD(p, offset, value) \
+ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+
+#define WRITE_BARRIER(object, offset) \
+ Heap::RecordWrite(object->address(), offset);
+
+#define READ_DOUBLE_FIELD(p, offset) \
+ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_DOUBLE_FIELD(p, offset, value) \
+ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT_FIELD(p, offset) \
+ (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT_FIELD(p, offset, value) \
+ (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_SHORT_FIELD(p, offset) \
+ (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_SHORT_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_BYTE_FIELD(p, offset) \
+ (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_BYTE_FIELD(p, offset, value) \
+ (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
+
+
+Object* HeapObject::GetHeapObjectField(HeapObject* obj, int index) {
+ return READ_FIELD(obj, HeapObject::kSize + kPointerSize * index);
+}
+
+
+int Smi::value() {
+ return reinterpret_cast<int>(this) >> kSmiTagSize;
+}
+
+
+Smi* Smi::FromInt(int value) {
+ ASSERT(Smi::IsValid(value));
+ return reinterpret_cast<Smi*>((value << kSmiTagSize) | kSmiTag);
+}
+
+
+Failure::Type Failure::type() const {
+ return static_cast<Type>(value() & kFailureTypeTagMask);
+}
+
+
+bool Failure::IsInternalError() const {
+ return type() == INTERNAL_ERROR;
+}
+
+
+bool Failure::IsOutOfMemoryException() const {
+ return type() == OUT_OF_MEMORY_EXCEPTION;
+}
+
+
+int Failure::requested() const {
+ const int kShiftBits =
+ kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
+ STATIC_ASSERT(kShiftBits >= 0);
+ ASSERT(type() == RETRY_AFTER_GC);
+ return value() >> kShiftBits;
+}
+
+
+AllocationSpace Failure::allocation_space() const {
+ ASSERT_EQ(RETRY_AFTER_GC, type());
+ return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
+ & kSpaceTagMask);
+}
+
+
+Failure* Failure::InternalError() {
+ return Construct(INTERNAL_ERROR);
+}
+
+
+Failure* Failure::Exception() {
+ return Construct(EXCEPTION);
+}
+
+Failure* Failure::OutOfMemoryException() {
+ return Construct(OUT_OF_MEMORY_EXCEPTION);
+}
+
+
+int Failure::value() const {
+ return reinterpret_cast<int>(this) >> kFailureTagSize;
+}
+
+
+Failure* Failure::Construct(Type type, int value) {
+ int info = (value << kFailureTypeTagSize) | type;
+ ASSERT(Smi::IsValid(info)); // Same validation check as in Smi
+ return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
+}
+
+
+bool Smi::IsValid(int value) {
+#ifdef DEBUG
+ bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+#endif
+ // To be representable as an tagged small integer, the two
+ // most-significant bits of 'value' must be either 00 or 11 due to
+ // sign-extension. To check this we add 01 to the two
+ // most-significant bits, and check if the most-significant bit is 0
+ //
+ // CAUTION: The original code below:
+ // bool result = ((value + 0x40000000) & 0x80000000) == 0;
+ // may lead to incorrect results according to the C language spec, and
+ // in fact doesn't work correctly with gcc4.1.1 in some cases: The
+ // compiler may produce undefined results in case of signed integer
+ // overflow. The computation must be done w/ unsigned ints.
+ bool result =
+ ((static_cast<unsigned int>(value) + 0x40000000U) & 0x80000000U) == 0;
+ ASSERT(result == in_range);
+ return result;
+}
+
+
+#ifdef DEBUG
+void HeapObject::VerifyObjectField(int offset) {
+ VerifyPointer(READ_FIELD(this, offset));
+}
+#endif
+
+
+Map* HeapObject::map() {
+ return reinterpret_cast<Map*> READ_FIELD(this, kMapOffset);
+}
+
+
+void HeapObject::set_map(Map* value) {
+ WRITE_FIELD(this, kMapOffset, value);
+}
+
+
+
+
+HeapObject* HeapObject::FromAddress(Address address) {
+ ASSERT_TAG_ALIGNED(address);
+ return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
+}
+
+
+Address HeapObject::address() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag;
+}
+
+
+int HeapObject::Size() {
+ return SizeFromMap(map());
+}
+
+
+void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
+ v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
+ reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
+}
+
+
+void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
+ v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
+}
+
+
+void HeapObject::CopyBody(JSObject* from) {
+ ASSERT(map() == from->map());
+ ASSERT(Size() == from->Size());
+ int object_size = Size();
+ for (int offset = kSize; offset < object_size; offset += kPointerSize) {
+ Object* value = READ_FIELD(from, offset);
+ // Note: WRITE_FIELD does not update the write barrier.
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset);
+ }
+}
+
+
+double HeapNumber::value() {
+ return READ_DOUBLE_FIELD(this, kValueOffset);
+}
+
+
+void HeapNumber::set_value(double value) {
+ WRITE_DOUBLE_FIELD(this, kValueOffset, value);
+}
+
+
+ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
+ACCESSORS(JSObject, elements, HeapObject, kElementsOffset)
+
+
+void JSObject::initialize_properties() {
+ ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+ WRITE_FIELD(this, kPropertiesOffset, Heap::empty_fixed_array());
+}
+
+
+void JSObject::initialize_elements() {
+ ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+ WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
+}
+
+
+ACCESSORS(Oddball, to_string, String, kToStringOffset)
+ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+
+
+int JSObject::GetHeaderSize() {
+ switch (map()->instance_type()) {
+ case JS_GLOBAL_OBJECT_TYPE:
+ return JSGlobalObject::kSize;
+ case JS_BUILTINS_OBJECT_TYPE:
+ return JSBuiltinsObject::kSize;
+ case JS_FUNCTION_TYPE:
+ return JSFunction::kSize;
+ case JS_VALUE_TYPE:
+ return JSValue::kSize;
+ case JS_ARRAY_TYPE:
+ return JSValue::kSize;
+ case JS_OBJECT_TYPE:
+ return JSObject::kHeaderSize;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int JSObject::GetInternalFieldCount() {
+ ASSERT(1 << kPointerSizeLog2 == kPointerSize);
+ return (Size() - GetHeaderSize()) >> kPointerSizeLog2;
+}
+
+
+Object* JSObject::GetInternalField(int index) {
+ ASSERT(index < GetInternalFieldCount() && index >= 0);
+ return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
+}
+
+
+void JSObject::SetInternalField(int index, Object* value) {
+ ASSERT(index < GetInternalFieldCount() && index >= 0);
+ int offset = GetHeaderSize() + (kPointerSize * index);
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset);
+}
+
+
+void JSObject::InitializeBody(int object_size) {
+ for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+ WRITE_FIELD(this, offset, Heap::undefined_value());
+ }
+}
+
+
+void Struct::InitializeBody(int object_size) {
+ for (int offset = kSize; offset < object_size; offset += kPointerSize) {
+ WRITE_FIELD(this, offset, Heap::undefined_value());
+ }
+}
+
+
+bool JSObject::HasFastProperties() {
+ return !properties()->IsDictionary();
+}
+
+
+bool Array::IndexFromObject(Object* object, uint32_t* index) {
+ if (object->IsSmi()) {
+ int value = Smi::cast(object)->value();
+ if (value < 0) return false;
+ *index = value;
+ return true;
+ }
+ if (object->IsHeapNumber()) {
+ double value = HeapNumber::cast(object)->value();
+ uint32_t uint_value = static_cast<uint32_t>(value);
+ if (value == static_cast<double>(uint_value)) {
+ *index = uint_value;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
+ if (!this->IsJSValue()) return false;
+
+ JSValue* js_value = JSValue::cast(this);
+ if (!js_value->value()->IsString()) return false;
+
+ String* str = String::cast(js_value->value());
+ if (index >= (uint32_t)str->length()) return false;
+
+ return true;
+}
+
+
+Object* FixedArray::get(int index) {
+ ASSERT(index >= 0 && index < this->length());
+ return READ_FIELD(this, kHeaderSize + index * kPointerSize);
+}
+
+
+void FixedArray::set(int index, Object* value) {
+ ASSERT(index >= 0 && index < this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset);
+}
+
+
+FixedArray::WriteBarrierMode FixedArray::GetWriteBarrierMode() {
+ if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ return UPDATE_WRITE_BARRIER;
+}
+
+
+void FixedArray::set(int index,
+ Object* value,
+ FixedArray::WriteBarrierMode mode) {
+ ASSERT(index >= 0 && index < this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ WRITE_FIELD(this, offset, value);
+ if (mode == UPDATE_WRITE_BARRIER) {
+ WRITE_BARRIER(this, offset);
+ } else {
+ ASSERT(mode == SKIP_WRITE_BARRIER);
+ ASSERT(Heap::InNewSpace(this) || !Heap::InNewSpace(value));
+ }
+}
+
+
+void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
+ ASSERT(index >= 0 && index < array->length());
+ WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+}
+
+
+void FixedArray::set_undefined(int index) {
+ ASSERT(index >= 0 && index < this->length());
+ ASSERT(!Heap::InNewSpace(Heap::undefined_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
+ Heap::undefined_value());
+}
+
+
+void FixedArray::set_the_hole(int index) {
+ ASSERT(index >= 0 && index < this->length());
+ ASSERT(!Heap::InNewSpace(Heap::the_hole_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::the_hole_value());
+}
+
+
+void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
+ Object* tmp = array->get(first);
+ fast_set(array, first, array->get(second));
+ fast_set(array, second, tmp);
+}
+
+
+int DescriptorArray::Search(String* name) {
+ SLOW_ASSERT(IsSortedNoDuplicates());
+
+ // Check for empty descriptor array.
+ int nof = number_of_descriptors();
+ if (nof == 0) return kNotFound;
+
+ // Fast case: do linear search for small arrays.
+ const int kMaxElementsForLinearSearch = 8;
+ if (name->IsSymbol() && nof < kMaxElementsForLinearSearch) {
+ for (int number = 0; number < nof; number++) {
+ if (name == GetKey(number)) return number;
+ }
+ return kNotFound;
+ }
+
+ // Slow case: perform binary search.
+ return BinarySearch(name, 0, nof - 1);
+}
+
+
+
+String* DescriptorArray::GetKey(int descriptor_number) {
+ ASSERT(descriptor_number < number_of_descriptors());
+ return String::cast(get(ToKeyIndex(descriptor_number)));
+}
+
+
+Object* DescriptorArray::GetValue(int descriptor_number) {
+ ASSERT(descriptor_number < number_of_descriptors());
+ return GetContentArray()->get(ToValueIndex(descriptor_number));
+}
+
+
+Smi* DescriptorArray::GetDetails(int descriptor_number) {
+ ASSERT(descriptor_number < number_of_descriptors());
+ return Smi::cast(GetContentArray()->get(ToDetailsIndex(descriptor_number)));
+}
+
+
+void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
+ desc->Init(GetKey(descriptor_number),
+ GetValue(descriptor_number),
+ GetDetails(descriptor_number));
+}
+
+
+void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+ // Range check.
+ ASSERT(descriptor_number < number_of_descriptors());
+
+ // Make sure non of the elements in desc are in new space.
+ ASSERT(!Heap::InNewSpace(desc->GetKey()));
+ ASSERT(!Heap::InNewSpace(desc->GetValue()));
+
+ fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
+ FixedArray* content_array = GetContentArray();
+ fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
+ fast_set(content_array, ToDetailsIndex(descriptor_number),
+ desc->GetDetails().AsSmi());
+}
+
+
+void DescriptorArray::Swap(int first, int second) {
+ fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
+ FixedArray* content_array = GetContentArray();
+ fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
+ fast_swap(content_array, ToDetailsIndex(first), ToDetailsIndex(second));
+}
+
+
+bool Dictionary::requires_slow_elements() {
+ Object* max_index_object = get(kPrefixStartIndex);
+ if (!max_index_object->IsSmi()) return false;
+ return 0 !=
+ (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
+}
+
+
+uint32_t Dictionary::max_number_key() {
+ ASSERT(!requires_slow_elements());
+ Object* max_index_object = get(kPrefixStartIndex);
+ if (!max_index_object->IsSmi()) return 0;
+ uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
+ return value >> kRequiresSlowElementsTagSize;
+}
+
+
+// ------------------------------------
+// Cast operations
+
+
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(Dictionary)
+CAST_ACCESSOR(SymbolTable)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(AsciiString)
+CAST_ACCESSOR(TwoByteString)
+CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(ExternalString)
+CAST_ACCESSOR(ExternalAsciiString)
+CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(Smi)
+CAST_ACCESSOR(Failure)
+CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(HeapNumber)
+CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(Map)
+CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSBuiltinsObject)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(JSArray)
+CAST_ACCESSOR(Proxy)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(Struct)
+
+
+#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
+ STRUCT_LIST(MAKE_STRUCT_CAST)
+#undef MAKE_STRUCT_CAST
+
+template <int prefix_size, int elem_size>
+HashTable<prefix_size, elem_size>* HashTable<prefix_size, elem_size>::cast(
+ Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<HashTable*>(obj);
+}
+
+
+INT_ACCESSORS(Array, length, kLengthOffset)
+
+
+bool String::Equals(String* other) {
+ if (other == this) return true;
+ if (IsSymbol() && other->IsSymbol()) return false;
+ return SlowEquals(other);
+}
+
+
+int String::length() {
+ uint32_t len = READ_INT_FIELD(this, kLengthOffset);
+
+ switch (size_tag()) {
+ case kShortStringTag:
+ return len >> kShortLengthShift;
+ case kMediumStringTag:
+ return len >> kMediumLengthShift;
+ case kLongStringTag:
+ return len >> kLongLengthShift;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+void String::set_length(int value) {
+ switch (size_tag()) {
+ case kShortStringTag:
+ WRITE_INT_FIELD(this, kLengthOffset, value << kShortLengthShift);
+ break;
+ case kMediumStringTag:
+ WRITE_INT_FIELD(this, kLengthOffset, value << kMediumLengthShift);
+ break;
+ case kLongStringTag:
+ WRITE_INT_FIELD(this, kLengthOffset, value << kLongLengthShift);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+int String::length_field() {
+ return READ_INT_FIELD(this, kLengthOffset);
+}
+
+
+void String::set_length_field(int value) {
+ WRITE_INT_FIELD(this, kLengthOffset, value);
+}
+
+
+void String::TryFlatten() {
+ Flatten();
+}
+
+
+uint16_t String::Get(int index) {
+ ASSERT(index >= 0 && index < length());
+ switch (representation_tag()) {
+ case kSeqStringTag:
+ return is_ascii()
+ ? AsciiString::cast(this)->AsciiStringGet(index)
+ : TwoByteString::cast(this)->TwoByteStringGet(index);
+ case kConsStringTag:
+ return ConsString::cast(this)->ConsStringGet(index);
+ case kSlicedStringTag:
+ return SlicedString::cast(this)->SlicedStringGet(index);
+ case kExternalStringTag:
+ return is_ascii()
+ ? ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index)
+ : ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return 0;
+}
+
+
+void String::Set(int index, uint16_t value) {
+ ASSERT(index >= 0 && index < length());
+ ASSERT(IsSeqString());
+
+ return is_ascii()
+ ? AsciiString::cast(this)->AsciiStringSet(index, value)
+ : TwoByteString::cast(this)->TwoByteStringSet(index, value);
+}
+
+
+bool String::IsAscii() {
+ return is_ascii();
+}
+
+
+bool String::StringIsConsString() {
+ return representation_tag() == kConsStringTag;
+}
+
+
+bool String::StringIsSlicedString() {
+ return representation_tag() == kSlicedStringTag;
+}
+
+
+uint32_t String::size_tag() {
+ return map_size_tag(map());
+}
+
+
+uint32_t String::map_size_tag(Map* map) {
+ return map->instance_type() & kStringSizeMask;
+}
+
+
+bool String::is_symbol() {
+ return is_symbol_map(map());
+}
+
+
+bool String::is_symbol_map(Map* map) {
+ return (map->instance_type() & kIsSymbolMask) != 0;
+}
+
+
+bool String::is_ascii() {
+ return is_ascii_map(map());
+}
+
+
+bool String::is_ascii_map(Map* map) {
+ return (map->instance_type() & kStringEncodingMask) != 0;
+}
+
+
+StringRepresentationTag String::representation_tag() {
+ return map_representation_tag(map());
+}
+
+
+StringRepresentationTag String::map_representation_tag(Map* map) {
+ uint32_t tag = map->instance_type() & kStringRepresentationMask;
+ return static_cast<StringRepresentationTag>(tag);
+}
+
+
+bool String::IsFlat() {
+ String* current = this;
+ while (true) {
+ switch (current->representation_tag()) {
+ case kConsStringTag:
+ return String::cast(ConsString::cast(current)->second())->length() == 0;
+ case kSlicedStringTag:
+ current = String::cast(SlicedString::cast(this)->buffer());
+ break;
+ default:
+ return true;
+ }
+ }
+}
+
+
+uint16_t AsciiString::AsciiStringGet(int index) {
+ ASSERT(index >= 0 && index < length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+
+void AsciiString::AsciiStringSet(int index, uint16_t value) {
+ ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
+ static_cast<byte>(value));
+}
+
+
+Address AsciiString::GetCharsAddress() {
+ return FIELD_ADDR(this, kHeaderSize);
+}
+
+
+uint16_t TwoByteString::TwoByteStringGet(int index) {
+ ASSERT(index >= 0 && index < length());
+ return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
+}
+
+
+void TwoByteString::TwoByteStringSet(int index, uint16_t value) {
+ ASSERT(index >= 0 && index < length());
+ WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
+}
+
+
+int TwoByteString::TwoByteStringSize(Map* map) {
+ uint32_t length = READ_INT_FIELD(this, kLengthOffset);
+
+ // Use the map (and not 'this') to compute the size tag, since
+ // TwoByteStringSize is called during GC when maps are encoded.
+ switch (map_size_tag(map)) {
+ case kShortStringTag:
+ length = length >> kShortLengthShift;
+ break;
+ case kMediumStringTag:
+ length = length >> kMediumLengthShift;
+ break;
+ case kLongStringTag:
+ length = length >> kLongLengthShift;
+ break;
+ default:
+ break;
+ }
+ return SizeFor(length);
+}
+
+
+int AsciiString::AsciiStringSize(Map* map) {
+ uint32_t length = READ_INT_FIELD(this, kLengthOffset);
+
+ // Use the map (and not 'this') to compute the size tag, since
+ // AsciiStringSize is called during GC when maps are encoded.
+ switch (map_size_tag(map)) {
+ case kShortStringTag:
+ length = length >> kShortLengthShift;
+ break;
+ case kMediumStringTag:
+ length = length >> kMediumLengthShift;
+ break;
+ case kLongStringTag:
+ length = length >> kLongLengthShift;
+ break;
+ default:
+ break;
+ }
+
+ return SizeFor(length);
+}
+
+
+Object* ConsString::first() {
+ return READ_FIELD(this, kFirstOffset);
+}
+
+
+void ConsString::set_first(Object* value) {
+ WRITE_FIELD(this, kFirstOffset, value);
+ WRITE_BARRIER(this, kFirstOffset);
+}
+
+
+Object* ConsString::second() {
+ return READ_FIELD(this, kSecondOffset);
+}
+
+
+void ConsString::set_second(Object* value) {
+ WRITE_FIELD(this, kSecondOffset, value);
+ WRITE_BARRIER(this, kSecondOffset);
+}
+
+
+Object* SlicedString::buffer() {
+ return READ_FIELD(this, kBufferOffset);
+}
+
+
+void SlicedString::set_buffer(Object* buffer) {
+ WRITE_FIELD(this, kBufferOffset, buffer);
+ WRITE_BARRIER(this, kBufferOffset);
+}
+
+
+int SlicedString::start() {
+ return READ_INT_FIELD(this, kStartOffset);
+}
+
+
+void SlicedString::set_start(int start) {
+ WRITE_INT_FIELD(this, kStartOffset, start);
+}
+
+
+ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+ return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+}
+
+
+void ExternalAsciiString::set_resource(
+ ExternalAsciiString::Resource* resource) {
+ *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+}
+
+
+ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+ return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+}
+
+
+void ExternalTwoByteString::set_resource(
+ ExternalTwoByteString::Resource* resource) {
+ *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+}
+
+
+byte ByteArray::get(int index) {
+ ASSERT(index >= 0 && index < this->length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+
+void ByteArray::set(int index, byte value) {
+ ASSERT(index >= 0 && index < this->length());
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+}
+
+
+int ByteArray::get_int(int index) {
+ ASSERT(index >= 0 && (index * kIntSize) < this->length());
+ return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
+}
+
+
+ByteArray* ByteArray::FromDataStartAddress(Address address) {
+ ASSERT_TAG_ALIGNED(address);
+ return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
+}
+
+
+Address ByteArray::GetDataStartAddress() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
+}
+
+
+int Map::instance_size() {
+ return READ_BYTE_FIELD(this, kInstanceSizeOffset);
+}
+
+
+int HeapObject::SizeFromMap(Map* map) {
+ InstanceType instance_type = map->instance_type();
+ // Only inline the two most frequent cases.
+ if (instance_type == JS_OBJECT_TYPE) return map->instance_size();
+ if (instance_type == FIXED_ARRAY_TYPE) {
+ return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
+ }
+ // Otherwise do the general size computation.
+ return SlowSizeFromMap(map);
+}
+
+
+void Map::set_instance_size(int value) {
+ ASSERT(0 <= value && value < 256);
+ WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
+}
+
+
+InstanceType Map::instance_type() {
+ return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
+}
+
+
+void Map::set_instance_type(InstanceType value) {
+ ASSERT(0 <= value && value < 256);
+ WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
+}
+
+
+int Map::unused_property_fields() {
+ return READ_BYTE_FIELD(this, kUnusedPropertyFieldsOffset);
+}
+
+
+void Map::set_unused_property_fields(int value) {
+ WRITE_BYTE_FIELD(this, kUnusedPropertyFieldsOffset, Min(value, 255));
+}
+
+
+byte Map::bit_field() {
+ return READ_BYTE_FIELD(this, kBitFieldOffset);
+}
+
+
+void Map::set_bit_field(byte value) {
+ WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
+}
+
+
+void Map::set_non_instance_prototype(bool value) {
+ if (value) {
+ set_bit_field(bit_field() | (1 << kHasNonInstancePrototype));
+ } else {
+ set_bit_field(bit_field() & ~(1 << kHasNonInstancePrototype));
+ }
+}
+
+
+bool Map::has_non_instance_prototype() {
+ return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
+}
+
+
+Code::Flags Code::flags() {
+ return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
+}
+
+
+void Code::set_flags(Code::Flags flags) {
+ // Make sure that all call stubs have an arguments count.
+ ASSERT(ExtractKindFromFlags(flags) != CALL_IC ||
+ ExtractArgumentsCountFromFlags(flags) >= 0);
+ WRITE_INT_FIELD(this, kFlagsOffset, flags);
+}
+
+
+Code::Kind Code::kind() {
+ return ExtractKindFromFlags(flags());
+}
+
+
+InlineCacheState Code::state() {
+ InlineCacheState result = ExtractStateFromFlags(flags());
+ // Only allow uninitialized or debugger states for non-IC code
+ // objects. This is used in the debugger to determine whether or not
+ // a call to code object has been replaced with a debug break call.
+ ASSERT(is_inline_cache_stub() ||
+ result == UNINITIALIZED ||
+ result == DEBUG_BREAK ||
+ result == DEBUG_PREPARE_STEP_IN);
+ return result;
+}
+
+
+PropertyType Code::type() {
+ ASSERT(state() == MONOMORPHIC);
+ return ExtractTypeFromFlags(flags());
+}
+
+
+int Code::arguments_count() {
+ ASSERT(is_call_stub() || kind() == STUB);
+ return ExtractArgumentsCountFromFlags(flags());
+}
+
+
+CodeStub::Major Code::major_key() {
+ // TODO(1238541): Simplify this somewhat complicated encoding.
+ ASSERT(kind() == STUB);
+ int low = ExtractStateFromFlags(flags());
+ int high = ExtractTypeFromFlags(flags());
+ return static_cast<CodeStub::Major>(high << 3 | low);
+}
+
+
+bool Code::is_inline_cache_stub() {
+ Kind kind = this->kind();
+ return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
+}
+
+
+Code::Flags Code::ComputeFlags(Kind kind,
+ InlineCacheState state,
+ PropertyType type,
+ int argc) {
+ // Compute the bit mask.
+ int bits = kind << kFlagsKindShift;
+ bits |= state << kFlagsStateShift;
+ bits |= type << kFlagsTypeShift;
+ bits |= argc << kFlagsArgumentsCountShift;
+ // Cast to flags and validate result before returning it.
+ Flags result = static_cast<Flags>(bits);
+ ASSERT(ExtractKindFromFlags(result) == kind);
+ ASSERT(ExtractStateFromFlags(result) == state);
+ ASSERT(ExtractTypeFromFlags(result) == type);
+ ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
+ return result;
+}
+
+
+Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
+ PropertyType type,
+ int argc) {
+ return ComputeFlags(kind, MONOMORPHIC, type, argc);
+}
+
+
+Code::Kind Code::ExtractKindFromFlags(Flags flags) {
+ int bits = (flags & kFlagsKindMask) >> kFlagsKindShift;
+ return static_cast<Kind>(bits);
+}
+
+
+InlineCacheState Code::ExtractStateFromFlags(Flags flags) {
+ int bits = (flags & kFlagsStateMask) >> kFlagsStateShift;
+ return static_cast<InlineCacheState>(bits);
+}
+
+
+PropertyType Code::ExtractTypeFromFlags(Flags flags) {
+ int bits = (flags & kFlagsTypeMask) >> kFlagsTypeShift;
+ return static_cast<PropertyType>(bits);
+}
+
+
+int Code::ExtractArgumentsCountFromFlags(Flags flags) {
+ return (flags & kFlagsArgumentsCountMask) >> kFlagsArgumentsCountShift;
+}
+
+
+Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
+ int bits = flags & ~kFlagsTypeMask;
+ return static_cast<Flags>(bits);
+}
+
+
+Object* Map::prototype() {
+ return READ_FIELD(this, kPrototypeOffset);
+}
+
+
+void Map::set_prototype(Object* value) {
+ ASSERT(value->IsNull() || value->IsJSObject());
+ WRITE_FIELD(this, kPrototypeOffset, value);
+ WRITE_BARRIER(this, kPrototypeOffset);
+}
+
+
+ACCESSORS(Map, instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
+ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
+ACCESSORS(Map, constructor, Object, kConstructorOffset)
+
+ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
+ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
+
+ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
+ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
+
+ACCESSORS(JSGlobalObject, security_token, Object, kSecurityTokenOffset)
+
+ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, data, Object, kDataOffset)
+ACCESSORS(AccessorInfo, name, Object, kNameOffset)
+ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+
+ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
+ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
+ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
+
+ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
+ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
+ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
+ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
+ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
+ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
+
+ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
+ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
+
+ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
+ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
+
+ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
+ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
+ACCESSORS(FunctionTemplateInfo, internal_field_count, Object,
+ kInternalFieldCountOffset)
+ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
+ kPropertyAccessorsOffset)
+ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
+ kPrototypeTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
+ kNamedPropertyHandlerOffset)
+ACCESSORS(FunctionTemplateInfo, indexed_property_handler, Object,
+ kIndexedPropertyHandlerOffset)
+ACCESSORS(FunctionTemplateInfo, instance_template, Object,
+ kInstanceTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, class_name, Object, kClassNameOffset)
+ACCESSORS(FunctionTemplateInfo, signature, Object, kSignatureOffset)
+ACCESSORS(FunctionTemplateInfo, lookup_callback, Object, kLookupCallbackOffset)
+ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
+ kInstanceCallHandlerOffset)
+ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
+ kAccessCheckInfoOffset)
+ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+
+ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
+
+ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
+ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
+
+ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
+
+ACCESSORS(Script, source, Object, kSourceOffset)
+ACCESSORS(Script, name, Object, kNameOffset)
+ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
+ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
+ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
+ACCESSORS(Script, type, Smi, kTypeOffset)
+
+ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
+ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
+ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
+ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
+
+ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
+ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
+ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
+ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
+
+ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
+ kInstanceClassNameOffset)
+ACCESSORS(SharedFunctionInfo, function_data, Object,
+ kExternalReferenceDataOffset)
+ACCESSORS(SharedFunctionInfo, lazy_load_data, Object, kLazyLoadDataOffset)
+ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
+ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
+
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
+ kHiddenPrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
+ kNeedsAccessCheckBit)
+BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
+ kIsExpressionBit)
+BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
+ kIsTopLevelBit)
+
+INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
+INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
+ kFormalParameterCountOffset)
+INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
+INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+ kStartPositionAndTypeOffset)
+INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
+INT_ACCESSORS(SharedFunctionInfo, function_token_position,
+ kFunctionTokenPositionOffset)
+
+
+int SharedFunctionInfo::start_position() {
+ return start_position_and_type() >> kStartPositionShift;
+}
+
+
+void SharedFunctionInfo::set_start_position(int start_position) {
+ set_start_position_and_type((start_position << kStartPositionShift)
+ | (start_position_and_type() & ~kStartPositionMask));
+}
+
+
+Code* SharedFunctionInfo::code() {
+ return Code::cast(READ_FIELD(this, kCodeOffset));
+}
+
+
+void SharedFunctionInfo::set_code(Code* value) {
+ WRITE_FIELD(this, kCodeOffset, value);
+ WRITE_BARRIER(this, kCodeOffset);
+}
+
+
+bool SharedFunctionInfo::is_compiled() {
+ // TODO(1242782): Create a code kind for uncompiled code.
+ return code()->kind() != Code::STUB;
+}
+
+
+bool JSFunction::IsBoilerplate() {
+ return map() == Heap::boilerplate_function_map();
+}
+
+
+bool JSFunction::IsLoaded() {
+ return shared()->lazy_load_data() == Heap::undefined_value();
+}
+
+
+Code* JSFunction::code() {
+ return shared()->code();
+}
+
+
+void JSFunction::set_code(Code* value) {
+ shared()->set_code(value);
+}
+
+
+Context* JSFunction::context() {
+ return Context::cast(READ_FIELD(this, kContextOffset));
+}
+
+
+Object* JSFunction::unchecked_context() {
+ return READ_FIELD(this, kContextOffset);
+}
+
+
+void JSFunction::set_context(Object* value) {
+ ASSERT(value == Heap::undefined_value() || value->IsContext());
+ WRITE_FIELD(this, kContextOffset, value);
+ WRITE_BARRIER(this, kContextOffset);
+}
+
+ACCESSORS(JSFunction, prototype_or_initial_map, Object,
+ kPrototypeOrInitialMapOffset)
+
+
+Map* JSFunction::initial_map() {
+ return Map::cast(prototype_or_initial_map());
+}
+
+
+void JSFunction::set_initial_map(Map* value) {
+ set_prototype_or_initial_map(value);
+}
+
+
+bool JSFunction::has_initial_map() {
+ return prototype_or_initial_map()->IsMap();
+}
+
+
+bool JSFunction::has_instance_prototype() {
+ return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+}
+
+
+bool JSFunction::has_prototype() {
+ return map()->has_non_instance_prototype() || has_instance_prototype();
+}
+
+
+Object* JSFunction::instance_prototype() {
+ ASSERT(has_instance_prototype());
+ if (has_initial_map()) return initial_map()->prototype();
+ // When there is no initial map and the prototype is a JSObject, the
+ // initial map field is used for the prototype field.
+ return prototype_or_initial_map();
+}
+
+
+Object* JSFunction::prototype() {
+ ASSERT(has_prototype());
+ // If the function's prototype property has been set to a non-JSObject
+ // value, that value is stored in the constructor field of the map.
+ if (map()->has_non_instance_prototype()) return map()->constructor();
+ return instance_prototype();
+}
+
+
+bool JSFunction::is_compiled() {
+ return shared()->is_compiled();
+}
+
+
+Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
+ ASSERT(0 <= id && id < kJSBuiltinsCount);
+ return READ_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize));
+}
+
+
+void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
+ Object* value) {
+ ASSERT(0 <= id && id < kJSBuiltinsCount);
+ WRITE_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize), value);
+ WRITE_BARRIER(this, kJSBuiltinsOffset + (id * kPointerSize));
+}
+
+
+Address Proxy::proxy() {
+ return AddressFrom<Address>(READ_INT_FIELD(this, kProxyOffset));
+}
+
+
+void Proxy::set_proxy(Address value) {
+ WRITE_INT_FIELD(this, kProxyOffset, OffsetFrom(value));
+}
+
+
+void Proxy::ProxyIterateBody(ObjectVisitor* visitor) {
+ visitor->VisitExternalReference(
+ reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+}
+
+
+ACCESSORS(JSValue, value, Object, kValueOffset)
+
+
+JSValue* JSValue::cast(Object* obj) {
+ ASSERT(obj->IsJSValue());
+ ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize);
+ return reinterpret_cast<JSValue*>(obj);
+}
+
+
+INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, relocation_size, kRelocationSizeOffset)
+INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
+
+
+Code::ICTargetState Code::ic_flag() {
+ return static_cast<ICTargetState>(READ_INT_FIELD(this, kICFlagOffset));
+}
+
+
+void Code::set_ic_flag(ICTargetState value) {
+ WRITE_INT_FIELD(this, kICFlagOffset, value);
+}
+
+
+byte* Code::instruction_start() {
+ return FIELD_ADDR(this, kHeaderSize);
+}
+
+
+int Code::body_size() {
+ return RoundUp(instruction_size() + relocation_size(), kObjectAlignment);
+}
+
+
+byte* Code::relocation_start() {
+ return FIELD_ADDR(this, CodeSize() - sinfo_size() - relocation_size());
+}
+
+
+byte* Code::entry() {
+ return instruction_start();
+}
+
+
+bool Code::contains(byte* pc) {
+ return (instruction_start() <= pc) &&
+ (pc < instruction_start() + instruction_size());
+}
+
+
+byte* Code::sinfo_start() {
+ return FIELD_ADDR(this, CodeSize() - sinfo_size());
+}
+
+
+ACCESSORS(JSArray, length, Object, kLengthOffset)
+
+
+bool JSObject::HasFastElements() {
+ return !elements()->IsDictionary();
+}
+
+
+bool JSObject::HasNamedInterceptor() {
+ return map()->has_named_interceptor();
+}
+
+
+bool JSObject::HasIndexedInterceptor() {
+ return map()->has_indexed_interceptor();
+}
+
+
+Dictionary* JSObject::property_dictionary() {
+ ASSERT(!HasFastProperties());
+ return Dictionary::cast(properties());
+}
+
+
+Dictionary* JSObject::element_dictionary() {
+ ASSERT(!HasFastElements());
+ return Dictionary::cast(elements());
+}
+
+
+bool String::HasHashCode() {
+ return (length_field() & kHashComputedMask) != 0;
+}
+
+
+uint32_t String::Hash() {
+ // Fast case: has hash code already been computed?
+ int hash = length_field();
+ if (hash & kHashComputedMask) return hash;
+ // Slow case: compute hash code and set it..
+ return ComputeAndSetHash();
+}
+
+
+bool String::AsArrayIndex(uint32_t* index) {
+ int hash = length_field();
+ if ((hash & kHashComputedMask) && !(hash & kIsArrayIndexMask)) return false;
+ return SlowAsArrayIndex(index);
+}
+
+
+Object* JSObject::GetPrototype() {
+ return JSObject::cast(this)->map()->prototype();
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
+ return GetPropertyAttributeWithReceiver(this, key);
+}
+
+
+bool JSObject::HasElement(uint32_t index) {
+ return HasElementWithReceiver(this, index);
+}
+
+
+bool AccessorInfo::all_can_read() {
+ return BooleanBit::get(flag(), kAllCanReadBit);
+}
+
+
+void AccessorInfo::set_all_can_read(bool value) {
+ set_flag(BooleanBit::set(flag(), kAllCanReadBit, value));
+}
+
+
+bool AccessorInfo::all_can_write() {
+ return BooleanBit::get(flag(), kAllCanWriteBit);
+}
+
+
+void AccessorInfo::set_all_can_write(bool value) {
+ set_flag(BooleanBit::set(flag(), kAllCanWriteBit, value));
+}
+
+
+PropertyAttributes AccessorInfo::property_attributes() {
+ return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
+}
+
+
+void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
+ ASSERT(AttributesField::is_valid(attributes));
+ int rest_value = flag()->value() & ~AttributesField::mask();
+ set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
+}
+
+void Dictionary::SetEntry(int entry,
+ Object* key,
+ Object* value,
+ PropertyDetails details) {
+ ASSERT(!key->IsString() || details.index() > 0);
+ int index = EntryToIndex(entry);
+ WriteBarrierMode mode = GetWriteBarrierMode();
+ set(index, key, mode);
+ set(index+1, value, mode);
+ fast_set(this, index+2, details.AsSmi());
+}
+
+
+void Map::ClearCodeCache() {
+ // No write barrier is needed since empty_fixed_array is not in new space.
+ // Please note this function is used during marking:
+ // - MarkCompactCollector::MarkUnmarkedObject
+ ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+ WRITE_FIELD(this, kCodeCacheOffset, Heap::empty_fixed_array());
+}
+
+
+#undef CAST_ACCESSOR
+#undef INT_ACCESSORS
+#undef SMI_ACCESSORS
+#undef ACCESSORS
+#undef FIELD_ADDR
+#undef READ_FIELD
+#undef WRITE_FIELD
+#undef WRITE_BARRIER
+#undef READ_MEMADDR_FIELD
+#undef WRITE_MEMADDR_FIELD
+#undef READ_DOUBLE_FIELD
+#undef WRITE_DOUBLE_FIELD
+#undef READ_INT_FIELD
+#undef WRITE_INT_FIELD
+#undef READ_SHORT_FIELD
+#undef WRITE_SHORT_FIELD
+#undef READ_BYTE_FIELD
+#undef WRITE_BYTE_FIELD
+
+
+} } // namespace v8::internal
+
+#endif // V8_OBJECTS_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "debug.h"
+#include "execution.h"
+#include "objects-inl.h"
+#include "macro-assembler.h"
+#include "scanner.h"
+#include "scopeinfo.h"
+#include "string-stream.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DEFINE_bool(trace_normalization,
+ false,
+ "prints when objects are turned into dictionaries.");
+#endif
+
+// Getters and setters are stored in a fixed array property. These are
+// constants for their indices.
+const int kGetterIndex = 0;
+const int kSetterIndex = 1;
+
+bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
+ // There is a constraint on the object; check
+ if (!this->IsJSObject()) return false;
+ // Fetch the constructor function of the object
+ Object* cons_obj = JSObject::cast(this)->map()->constructor();
+ if (!cons_obj->IsJSFunction()) return false;
+ JSFunction* fun = JSFunction::cast(cons_obj);
+ // Iterate through the chain of inheriting function templates to
+ // see if the required one occurs.
+ for (Object* type = fun->shared()->function_data();
+ type->IsFunctionTemplateInfo();
+ type = FunctionTemplateInfo::cast(type)->parent_template()) {
+ if (type == expected) return true;
+ }
+ // Didn't find the required type in the inheritance chain.
+ return false;
+}
+
+
+static Object* CreateJSValue(JSFunction* constructor, Object* value) {
+ Object* result = Heap::AllocateJSObject(constructor);
+ if (result->IsFailure()) return result;
+ JSValue::cast(result)->set_value(value);
+ return result;
+}
+
+
+Object* Object::ToObject(Context* global_context) {
+ if (IsNumber()) {
+ return CreateJSValue(global_context->number_function(), this);
+ } else if (IsBoolean()) {
+ return CreateJSValue(global_context->boolean_function(), this);
+ } else if (IsString()) {
+ return CreateJSValue(global_context->string_function(), this);
+ }
+ ASSERT(IsJSObject());
+ return this;
+}
+
+
+Object* Object::ToObject() {
+ Context* global_context = Top::context()->global_context();
+ if (IsJSObject()) {
+ return this;
+ } else if (IsNumber()) {
+ return CreateJSValue(global_context->number_function(), this);
+ } else if (IsBoolean()) {
+ return CreateJSValue(global_context->boolean_function(), this);
+ } else if (IsString()) {
+ return CreateJSValue(global_context->string_function(), this);
+ }
+
+ // Throw a type error.
+ return Failure::InternalError();
+}
+
+
+Object* Object::ToBoolean() {
+ if (IsTrue()) return Heap::true_value();
+ if (IsFalse()) return Heap::false_value();
+ if (IsSmi()) {
+ return Heap::ToBoolean(Smi::cast(this)->value() != 0);
+ }
+ if (IsUndefined() || IsNull()) return Heap::false_value();
+ // Undetectable object is false
+ if (IsUndetectableObject()) {
+ return Heap::false_value();
+ }
+ if (IsString()) {
+ return Heap::ToBoolean(String::cast(this)->length() != 0);
+ }
+ if (IsHeapNumber()) {
+ return HeapNumber::cast(this)->HeapNumberToBoolean();
+ }
+ return Heap::true_value();
+}
+
+
+void Object::Lookup(String* name, LookupResult* result) {
+ if (IsJSObject()) return JSObject::cast(this)->Lookup(name, result);
+ Object* holder = NULL;
+ Context* global_context = Top::context()->global_context();
+ if (IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (IsNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (IsBoolean()) {
+ holder = global_context->boolean_function()->instance_prototype();
+ }
+ ASSERT(holder != NULL); // cannot handle null or undefined.
+ JSObject::cast(holder)->Lookup(name, result);
+}
+
+
+Object* Object::GetPropertyWithReceiver(Object* receiver,
+ String* name,
+ PropertyAttributes* attributes) {
+ LookupResult result;
+ Lookup(name, &result);
+ return GetProperty(receiver, &result, name, attributes);
+}
+
+
+Object* Object::GetPropertyWithCallback(Object* receiver,
+ Object* structure,
+ String* name,
+ Object* holder) {
+ // To accommodate both the old and the new api we switch on the
+ // data structure used to store the callbacks. Eventually proxy
+ // callbacks should be phased out.
+ if (structure->IsProxy()) {
+ AccessorDescriptor* callback =
+ reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+ Object* value = (callback->getter)(receiver, callback->data);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return value;
+ }
+
+ // api style callbacks.
+ if (structure->IsAccessorInfo()) {
+ AccessorInfo* data = AccessorInfo::cast(structure);
+ Object* fun_obj = data->getter();
+ v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ HandleScope scope;
+ Handle<JSObject> self(JSObject::cast(receiver));
+ Handle<JSObject> holder_handle(JSObject::cast(holder));
+ Handle<String> key(name);
+ Handle<Object> fun_data(data->data());
+ LOG(ApiNamedPropertyAccess("load", *self, name));
+ v8::AccessorInfo info(v8::Utils::ToLocal(self),
+ v8::Utils::ToLocal(fun_data),
+ v8::Utils::ToLocal(holder_handle));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = call_fun(v8::Utils::ToLocal(key), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (result.IsEmpty()) return Heap::undefined_value();
+ return *v8::Utils::OpenHandle(*result);
+ }
+
+ // __defineGetter__ callback
+ if (structure->IsFixedArray()) {
+ Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+ if (getter->IsJSFunction()) {
+ HandleScope scope;
+ Handle<JSFunction> fun(JSFunction::cast(getter));
+ Handle<Object> self(receiver);
+ bool has_pending_exception;
+ Object* result =
+ *Execution::Call(fun, self, 0, NULL, &has_pending_exception);
+ // Check for pending exception and return the result.
+ if (has_pending_exception) return Failure::Exception();
+ return result;
+ }
+ // Getter is not a function.
+ return Heap::undefined_value();
+ }
+
+ UNREACHABLE();
+ return 0;
+}
+
+
+// Only deal with CALLBACKS and INTERCEPTOR
+Object* JSObject::GetPropertyWithFailedAccessCheck(Object* receiver,
+ LookupResult* result,
+ String* name) {
+ if (result->IsValid()) {
+ switch (result->type()) {
+ case CALLBACKS: {
+ // Only allow API accessors.
+ Object* obj = result->GetCallbackObject();
+ if (obj->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(obj);
+ if (info->all_can_read()) {
+ return GetPropertyWithCallback(receiver,
+ result->GetCallbackObject(),
+ name,
+ result->holder());
+ }
+ }
+ break;
+ }
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION: {
+ // Search ALL_CAN_READ accessors in prototype chain.
+ LookupResult r;
+ result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+ if (r.IsValid()) {
+ return GetPropertyWithFailedAccessCheck(receiver, &r, name);
+ }
+ break;
+ }
+ case INTERCEPTOR: {
+ // If the object has an interceptor, try real named properties.
+ // No access check in GetPropertyAttributeWithInterceptor.
+ LookupResult r;
+ result->holder()->LookupRealNamedProperty(name, &r);
+ if (r.IsValid()) {
+ return GetPropertyWithFailedAccessCheck(receiver, &r, name);
+ }
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ return Heap::undefined_value();
+}
+
+
+Object* JSObject::GetLazyProperty(Object* receiver,
+ LookupResult* result,
+ String* name,
+ PropertyAttributes* attributes) {
+ HandleScope scope;
+ Handle<Object> this_handle(this);
+ Handle<Object> receiver_handle(receiver);
+ Handle<String> name_handle(name);
+ bool pending_exception;
+ LoadLazy(Handle<JSFunction>(JSFunction::cast(result->GetValue())),
+ &pending_exception);
+ if (pending_exception) return Failure::Exception();
+ return this_handle->GetPropertyWithReceiver(*receiver_handle,
+ *name_handle,
+ attributes);
+}
+
+
+Object* JSObject::SetLazyProperty(LookupResult* result,
+ String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ HandleScope scope;
+ Handle<JSObject> this_handle(this);
+ Handle<String> name_handle(name);
+ Handle<Object> value_handle(value);
+ bool pending_exception;
+ LoadLazy(Handle<JSFunction>(JSFunction::cast(result->GetValue())),
+ &pending_exception);
+ if (pending_exception) return Failure::Exception();
+ return this_handle->SetProperty(*name_handle, *value_handle, attributes);
+}
+
+
+Object* JSObject::DeleteLazyProperty(LookupResult* result, String* name) {
+ HandleScope scope;
+ Handle<JSObject> this_handle(this);
+ Handle<String> name_handle(name);
+ bool pending_exception;
+ LoadLazy(Handle<JSFunction>(JSFunction::cast(result->GetValue())),
+ &pending_exception);
+ if (pending_exception) return Failure::Exception();
+ return this_handle->DeleteProperty(*name_handle);
+}
+
+
+Object* Object::GetProperty(Object* receiver,
+ LookupResult* result,
+ String* name,
+ PropertyAttributes* attributes) {
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+
+ // Traverse the prototype chain from the current object (this) to
+ // the holder and check for access rights. This avoid traversing the
+ // objects more than once in case of interceptors, because the
+ // holder will always be the interceptor holder and the search may
+ // only continue with a current object just after the interceptor
+ // holder in the prototype chain.
+ Object* last = result->IsValid() ? result->holder() : Heap::null_value();
+ for (Object* current = this; true; current = current->GetPrototype()) {
+ if (current->IsAccessCheckNeeded()) {
+ // Check if we're allowed to read from the current object. Note
+ // that even though we may not actually end up loading the named
+ // property from the current object, we still check that we have
+ // access to the it.
+ JSObject* checked = JSObject::cast(current);
+ if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+ return checked->GetPropertyWithFailedAccessCheck(receiver,
+ result,
+ name);
+ }
+ }
+ // Stop traversing the chain once we reach the last object in the
+ // chain; either the holder of the result or null in case of an
+ // absent property.
+ if (current == last) break;
+ }
+
+ if (!result->IsValid()) {
+ *attributes = ABSENT;
+ return Heap::undefined_value();
+ }
+ *attributes = result->GetAttributes();
+ if (!result->IsLoaded()) {
+ return JSObject::cast(this)->GetLazyProperty(receiver,
+ result,
+ name,
+ attributes);
+ }
+ Object* value;
+ JSObject* holder = result->holder();
+ switch (result->type()) {
+ case NORMAL:
+ value =
+ holder->property_dictionary()->ValueAt(result->GetDictionaryEntry());
+ ASSERT(!value->IsTheHole() || result->IsReadOnly());
+ return value->IsTheHole() ? Heap::undefined_value() : value;
+ case FIELD:
+ value = holder->properties()->get(result->GetFieldIndex());
+ ASSERT(!value->IsTheHole() || result->IsReadOnly());
+ return value->IsTheHole() ? Heap::undefined_value() : value;
+ case CONSTANT_FUNCTION:
+ return result->GetConstantFunction();
+ case CALLBACKS:
+ return GetPropertyWithCallback(receiver,
+ result->GetCallbackObject(),
+ name,
+ holder);
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ *attributes = ABSENT;
+ return Heap::undefined_value();
+ case INTERCEPTOR: {
+ JSObject* recvr = JSObject::cast(receiver);
+ return holder->GetPropertyWithInterceptor(recvr, name, attributes);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Object* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
+ // Non-JS objects do not have integer indexed properties.
+ if (!IsJSObject()) return Heap::undefined_value();
+ return JSObject::cast(this)->GetElementWithReceiver(JSObject::cast(receiver),
+ index);
+}
+
+
+Object* Object::GetPrototype() {
+ // The object is either a number, a string, a boolean, or a real JS object.
+ if (IsJSObject()) return JSObject::cast(this)->map()->prototype();
+ Context* context = Top::context()->global_context();
+
+ if (IsNumber()) return context->number_function()->instance_prototype();
+ if (IsString()) return context->string_function()->instance_prototype();
+ if (IsBoolean()) {
+ return context->boolean_function()->instance_prototype();
+ } else {
+ return Heap::null_value();
+ }
+}
+
+
+void Object::ShortPrint() {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ ShortPrint(&accumulator);
+ accumulator.OutputToStdOut();
+}
+
+
+void Object::ShortPrint(StringStream* accumulator) {
+ if (IsSmi()) {
+ Smi::cast(this)->SmiPrint(accumulator);
+ } else if (IsFailure()) {
+ Failure::cast(this)->FailurePrint(accumulator);
+ } else {
+ HeapObject::cast(this)->HeapObjectShortPrint(accumulator);
+ }
+}
+
+
+void Smi::SmiPrint() {
+ PrintF("%d", value());
+}
+
+
+void Smi::SmiPrint(StringStream* accumulator) {
+ accumulator->Add("%d", value());
+}
+
+
+void Failure::FailurePrint(StringStream* accumulator) {
+ accumulator->Add("Failure(%d)", value());
+}
+
+
+void Failure::FailurePrint() {
+ PrintF("Failure(%d)", value());
+}
+
+
+Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
+ ASSERT((space & ~kSpaceTagMask) == 0);
+ int requested = requested_bytes >> kObjectAlignmentBits;
+ int value = (requested << kSpaceTagSize) | space;
+ // We can't very well allocate a heap number in this situation, and if the
+ // requested memory is so large it seems reasonable to say that this is an
+ // out of memory situation. This fixes a crash in
+ // js1_5/Regress/regress-303213.js.
+ if (value >> kSpaceTagSize != requested ||
+ !Smi::IsValid(value) ||
+ value != ((value << kFailureTypeTagSize) >> kFailureTypeTagSize) ||
+ !Smi::IsValid(value << kFailureTypeTagSize)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ return Construct(RETRY_AFTER_GC, value);
+}
+
+
+// Should a word be prefixed by 'a' or 'an' in order to read naturally in
+// English? Returns false for non-ASCII or words that don't start with
+// a capital letter. The a/an rule follows pronunciation in English.
+// We don't use the BBC's overcorrect "an historic occasion" though if
+// you speak a dialect you may well say "an 'istoric occasion".
+static bool AnWord(String* str) {
+ if (str->length() == 0) return false; // a nothing
+ int c0 = str->Get(0);
+ int c1 = str->length() > 1 ? str->Get(1) : 0;
+ if (c0 == 'U') {
+ if (c1 > 'Z') {
+ return true; // an Umpire, but a UTF8String, a U
+ }
+ } else if (c0 == 'A' || c0 == 'E' || c0 == 'I' || c0 == 'O') {
+ return true; // an Ape, an ABCBook
+ } else if ((c1 == 0 || (c1 >= 'A' && c1 <= 'Z')) &&
+ (c0 == 'F' || c0 == 'H' || c0 == 'M' || c0 == 'N' || c0 == 'R' ||
+ c0 == 'S' || c0 == 'X')) {
+ return true; // an MP3File, an M
+ }
+ return false;
+}
+
+
+Object* String::Flatten() {
+#ifdef DEBUG
+ // Do not attempt to flatten in debug mode when allocation is not
+ // allowed. This is to avoid an assertion failure when allocating.
+ // Flattening strings is the only case where we always allow
+ // allocation because no GC is performed if the allocation fails.
+ if (!Heap::IsAllocationAllowed()) return this;
+#endif
+
+ switch (representation_tag()) {
+ case kSlicedStringTag: {
+ SlicedString* ss = SlicedString::cast(this);
+ // The SlicedString constructor should ensure that there are no
+ // SlicedStrings that are constructed directly on top of other
+ // SlicedStrings.
+ ASSERT(!ss->buffer()->IsSlicedString());
+ Object* ok = String::cast(ss->buffer())->Flatten();
+ if (ok->IsFailure()) return ok;
+ return this;
+ }
+ case kConsStringTag: {
+ ConsString* cs = ConsString::cast(this);
+ if (String::cast(cs->second())->length() == 0) {
+ return this;
+ }
+ // There's little point in putting the flat string in new space if the
+ // cons string is in old space. It can never get GCed until there is
+ // an old space GC.
+ PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED : TENURED;
+ Object* object = IsAscii() ?
+ Heap::AllocateRawAsciiString(length(), tenure) :
+ Heap::AllocateRawTwoByteString(length(), tenure);
+ if (object->IsFailure()) return object;
+ String* result = String::cast(object);
+ Flatten(this, result, 0, length(), 0);
+ cs->set_first(result);
+ cs->set_second(Heap::empty_string());
+ return this;
+ }
+ default:
+ return this;
+ }
+}
+
+
+void String::StringShortPrint(StringStream* accumulator) {
+ int len = length();
+ if (len > kMaxMediumStringSize) {
+ accumulator->Add("<Very long string[%u]>", len);
+ return;
+ }
+
+ if (!LooksValid()) {
+ accumulator->Add("<Invalid String>");
+ return;
+ }
+
+ StringInputBuffer buf(this);
+
+ bool truncated = false;
+ if (len > 1024) {
+ len = 1024;
+ truncated = true;
+ }
+ bool ascii = true;
+ for (int i = 0; i < len; i++) {
+ int c = buf.GetNext();
+
+ if (c < 32 || c >= 127) {
+ ascii = false;
+ }
+ }
+ buf.Reset(this);
+ if (ascii) {
+ accumulator->Add("<String[%u]: ", length());
+ for (int i = 0; i < len; i++) {
+ accumulator->Put(buf.GetNext());
+ }
+ accumulator->Put('>');
+ } else {
+ // Backslash indicates that the string contains control
+ // characters and that backslashes are therefore escaped.
+ accumulator->Add("<String[%u]\\: ", length());
+ for (int i = 0; i < len; i++) {
+ int c = buf.GetNext();
+ if (c == '\n') {
+ accumulator->Add("\\n");
+ } else if (c == '\r') {
+ accumulator->Add("\\r");
+ } else if (c == '\\') {
+ accumulator->Add("\\\\");
+ } else if (c < 32 || c > 126) {
+ accumulator->Add("\\x%02x", c);
+ } else {
+ accumulator->Put(c);
+ }
+ }
+ if (truncated) {
+ accumulator->Put('.');
+ accumulator->Put('.');
+ accumulator->Put('.');
+ }
+ accumulator->Put('>');
+ }
+ return;
+}
+
+
+void JSObject::JSObjectShortPrint(StringStream* accumulator) {
+ switch (map()->instance_type()) {
+ case JS_ARRAY_TYPE: {
+ double length = JSArray::cast(this)->length()->Number();
+ accumulator->Add("<JS array[%u]>", static_cast<uint32_t>(length));
+ break;
+ }
+ case JS_FUNCTION_TYPE: {
+ Object* fun_name = JSFunction::cast(this)->shared()->name();
+ bool printed = false;
+ if (fun_name->IsString()) {
+ String* str = String::cast(fun_name);
+ if (str->length() > 0) {
+ accumulator->Add("<JS Function ");
+ accumulator->Put(str);
+ accumulator->Put('>');
+ printed = true;
+ }
+ }
+ if (!printed) {
+ accumulator->Add("<JS Function>");
+ }
+ break;
+ }
+ // All other JSObjects are rather similar to each other (JSObject,
+ // JSGlobalObject, JSUndetectableObject, JSValue).
+ default: {
+ Object* constructor = map()->constructor();
+ bool printed = false;
+ if (constructor->IsHeapObject() &&
+ !Heap::Contains(HeapObject::cast(constructor))) {
+ accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
+ } else {
+ bool global_object = IsJSGlobalObject();
+ if (constructor->IsJSFunction()) {
+ if (!Heap::Contains(JSFunction::cast(constructor)->shared())) {
+ accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
+ } else {
+ Object* constructor_name =
+ JSFunction::cast(constructor)->shared()->name();
+ if (constructor_name->IsString()) {
+ String* str = String::cast(constructor_name);
+ if (str->length() > 0) {
+ bool vowel = AnWord(str);
+ accumulator->Add("<%sa%s ",
+ global_object ? "JS Global Object: " : "",
+ vowel ? "n" : "");
+ accumulator->Put(str);
+ accumulator->Put('>');
+ printed = true;
+ }
+ }
+ }
+ }
+ if (!printed) {
+ accumulator->Add("<JS %sObject", global_object ? "Global " : "");
+ }
+ }
+ if (IsJSValue()) {
+ accumulator->Add(" value = ");
+ JSValue::cast(this)->value()->ShortPrint(accumulator);
+ }
+ accumulator->Put('>');
+ break;
+ }
+ }
+}
+
+
+void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
+ // if (!Heap::InNewSpace(this)) PrintF("*", this);
+ if (!Heap::Contains(this)) {
+ accumulator->Add("!!!INVALID POINTER!!!");
+ return;
+ }
+ if (!Heap::Contains(map())) {
+ accumulator->Add("!!!INVALID MAP!!!");
+ return;
+ }
+
+ accumulator->Add("%p ", this);
+
+ if (IsString()) {
+ String::cast(this)->StringShortPrint(accumulator);
+ return;
+ }
+ if (IsJSObject()) {
+ JSObject::cast(this)->JSObjectShortPrint(accumulator);
+ return;
+ }
+ switch (map()->instance_type()) {
+ case MAP_TYPE:
+ accumulator->Add("<Map>");
+ break;
+ case FIXED_ARRAY_TYPE:
+ accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
+ break;
+ case BYTE_ARRAY_TYPE:
+ accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
+ break;
+ case SHARED_FUNCTION_INFO_TYPE:
+ accumulator->Add("<SharedFunctionInfo>");
+ break;
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ accumulator->Add(#Name); \
+ break;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ case CODE_TYPE:
+ accumulator->Add("<Code>");
+ break;
+ case ODDBALL_TYPE: {
+ if (IsUndefined())
+ accumulator->Add("<undefined>");
+ else if (IsTheHole())
+ accumulator->Add("<the hole>");
+ else if (IsNull())
+ accumulator->Add("<null>");
+ else if (IsTrue())
+ accumulator->Add("<true>");
+ else if (IsFalse())
+ accumulator->Add("<false>");
+ else
+ accumulator->Add("<Odd Oddball>");
+ break;
+ }
+ case HEAP_NUMBER_TYPE:
+ accumulator->Add("<Number: ");
+ HeapNumber::cast(this)->HeapNumberPrint(accumulator);
+ accumulator->Put('>');
+ break;
+ case PROXY_TYPE:
+ accumulator->Add("<Proxy>");
+ break;
+ default:
+ accumulator->Add("<Other heap object (%d)>", map()->instance_type());
+ break;
+ }
+}
+
+
+int HeapObject::SlowSizeFromMap(Map* map) {
+ // Avoid calling functions such as FixedArray::cast during GC, which
+ // read map pointer of this object again.
+ InstanceType instance_type = map->instance_type();
+
+ if (instance_type < FIRST_NONSTRING_TYPE
+ && (reinterpret_cast<String*>(this)->map_representation_tag(map)
+ == kSeqStringTag)) {
+ if (reinterpret_cast<String*>(this)->is_ascii_map(map)) {
+ return reinterpret_cast<AsciiString*>(this)->AsciiStringSize(map);
+ } else {
+ return reinterpret_cast<TwoByteString*>(this)->TwoByteStringSize(map);
+ }
+ }
+
+ switch (instance_type) {
+ case FIXED_ARRAY_TYPE:
+ return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
+ case BYTE_ARRAY_TYPE:
+ return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
+ case CODE_TYPE:
+ return reinterpret_cast<Code*>(this)->CodeSize();
+ case MAP_TYPE:
+ return Map::kSize;
+ default:
+ return map->instance_size();
+ }
+}
+
+
+void HeapObject::Iterate(ObjectVisitor* v) {
+ // Handle header
+ IteratePointer(v, kMapOffset);
+ // Handle object body
+ Map* m = map();
+ IterateBody(m->instance_type(), SizeFromMap(m), v);
+}
+
+
+void HeapObject::IterateBody(InstanceType type, int object_size,
+ ObjectVisitor* v) {
+ // Avoiding <Type>::cast(this) because it accesses the map pointer field.
+ // During GC, the map pointer field is encoded.
+ if (type < FIRST_NONSTRING_TYPE) {
+ switch (type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ break;
+ case kConsStringTag:
+ reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v);
+ break;
+ case kSlicedStringTag:
+ reinterpret_cast<SlicedString*>(this)->SlicedStringIterateBody(v);
+ break;
+ }
+ return;
+ }
+
+ switch (type) {
+ case FIXED_ARRAY_TYPE:
+ reinterpret_cast<FixedArray*>(this)->FixedArrayIterateBody(v);
+ break;
+ case JS_OBJECT_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ reinterpret_cast<JSObject*>(this)->JSObjectIterateBody(object_size, v);
+ break;
+ case JS_BUILTINS_OBJECT_TYPE:
+ reinterpret_cast<JSObject*>(this)->JSObjectIterateBody(object_size, v);
+ break;
+ case ODDBALL_TYPE:
+ reinterpret_cast<Oddball*>(this)->OddballIterateBody(v);
+ break;
+ case PROXY_TYPE:
+ reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
+ break;
+ case MAP_TYPE:
+ reinterpret_cast<Map*>(this)->MapIterateBody(v);
+ break;
+ case CODE_TYPE:
+ reinterpret_cast<Code*>(this)->CodeIterateBody(v);
+ break;
+ case HEAP_NUMBER_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ break;
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
+ shared->SharedFunctionInfoIterateBody(v);
+ break;
+ }
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ IterateStructBody(object_size, v);
+ break;
+ default:
+ PrintF("Unknown type: %d\n", type);
+ UNREACHABLE();
+ }
+}
+
+
+void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) {
+ IteratePointers(v, HeapObject::kSize, object_size);
+}
+
+
+Object* HeapNumber::HeapNumberToBoolean() {
+ // NaN, +0, and -0 should return the false object
+ switch (fpclassify(value())) {
+ case FP_NAN: // fall through
+ case FP_ZERO: return Heap::false_value();
+ default: return Heap::true_value();
+ }
+}
+
+
+void HeapNumber::HeapNumberPrint() {
+ PrintF("%.16g", Number());
+}
+
+
+void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ char buffer[100];
+ OS::SNPrintF(buffer, sizeof(buffer), "%.16g", Number());
+ accumulator->Add("%s", buffer);
+}
+
+
+String* JSObject::class_name() {
+ if (IsJSFunction()) return Heap::function_class_symbol();
+ // If the constructor is not present "Object" is returned.
+ String* result = Heap::Object_symbol();
+ if (map()->constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(map()->constructor());
+ return String::cast(constructor->shared()->instance_class_name());
+ }
+ return result;
+}
+
+
+void JSObject::JSObjectIterateBody(int object_size, ObjectVisitor* v) {
+ // Iterate over all fields in the body. Assumes all are Object*.
+ IteratePointers(v, kPropertiesOffset, object_size);
+}
+
+
+Object* JSObject::Copy(PretenureFlag pretenure) {
+ // Copy the elements and properties.
+ Object* elem = FixedArray::cast(elements())->Copy();
+ if (elem->IsFailure()) return elem;
+ Object* prop = properties()->Copy();
+ if (prop->IsFailure()) return prop;
+
+ // Make the clone.
+ Object* clone = (pretenure == NOT_TENURED) ?
+ Heap::Allocate(map(), NEW_SPACE) :
+ Heap::Allocate(map(), OLD_SPACE);
+ if (clone->IsFailure()) return clone;
+ JSObject::cast(clone)->CopyBody(this);
+
+ // Set the new elements and properties.
+ JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
+ JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
+
+ // NOTE: Copy is only used for copying objects and functions from
+ // boilerplates. This means if we have a function the prototype is
+ // not present.
+ ASSERT(!IsJSFunction() || !JSFunction::cast(clone)->has_prototype());
+
+ // Return the new clone.
+ return clone;
+}
+
+
+Object* JSObject::AddFastPropertyUsingMap(Map* new_map,
+ String* name,
+ Object* value) {
+ int index = new_map->PropertyIndexFor(name);
+ if (map()->unused_property_fields() > 0) {
+ ASSERT(index < properties()->length());
+ properties()->set(index, value);
+ } else {
+ ASSERT(map()->unused_property_fields() == 0);
+ int new_unused = new_map->unused_property_fields();
+ Object* values =
+ properties()->CopySize(properties()->length() + new_unused + 1);
+ if (values->IsFailure()) return values;
+ FixedArray::cast(values)->set(index, value);
+ set_properties(FixedArray::cast(values));
+ }
+ set_map(new_map);
+ return value;
+}
+
+
+Object* JSObject::AddFastProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ // Normalize the object if the name is not a real identifier.
+ StringInputBuffer buffer(name);
+ if (!Scanner::IsIdentifier(&buffer)) {
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+ return AddSlowProperty(name, value, attributes);
+ }
+
+ // Compute the new index for new field.
+ int index = map()->NextFreePropertyIndex();
+
+ // Allocate new instance descriptors with (name, index) added
+ FieldDescriptor fd(name, index, attributes);
+ Object* new_descriptors =
+ map()->instance_descriptors()->CopyInsert(&fd, true);
+ if (new_descriptors->IsFailure()) return new_descriptors;
+
+ // Only allow map transition if the object's map is NOT equal to the
+ // global object_function's map and there is not a transition for name.
+ bool allow_map_transition =
+ !map()->instance_descriptors()->Contains(name) &&
+ (Top::context()->global_context()->object_function()->map() != map());
+
+ if (map()->unused_property_fields() > 0) {
+ ASSERT(index < properties()->length());
+ // Allocate a new map for the object.
+ Object* new_map = map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+ if (allow_map_transition) {
+ // Allocate new instance descriptors for the old map with map transition.
+ MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
+ Object* old_descriptors = map()->instance_descriptors()->CopyInsert(&d);
+ if (old_descriptors->IsFailure()) return old_descriptors;
+ // We have now allocate all the necessary object and change can be
+ // applied.
+ map()->set_instance_descriptors(DescriptorArray::cast(old_descriptors));
+ }
+ Map::cast(new_map)->
+ set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ Map::cast(new_map)->
+ set_unused_property_fields(map()->unused_property_fields() - 1);
+ set_map(Map::cast(new_map));
+ properties()->set(index, value);
+ } else {
+ ASSERT(map()->unused_property_fields() == 0);
+
+ static const int kFastNofProperties = 8;
+ if (properties()->length() > kFastNofProperties) {
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+ return AddSlowProperty(name, value, attributes);
+ }
+
+ static const int kExtraFields = 3;
+ // Make room for the new value
+ Object* values =
+ properties()->CopySize(properties()->length() + kExtraFields);
+ if (values->IsFailure()) return values;
+ FixedArray::cast(values)->set(index, value);
+
+ // Allocate a new map for the object.
+ Object* new_map = map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+
+ if (allow_map_transition) {
+ MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
+ // Allocate a new instance descriptors for the old map with map
+ // transition.
+ Object* old_descriptors = map()->instance_descriptors()->CopyInsert(&d);
+ if (old_descriptors->IsFailure()) return old_descriptors;
+
+ // We have now allocate all the necessary object and change can be
+ // applied.
+ map()->set_instance_descriptors(DescriptorArray::cast(old_descriptors));
+ }
+
+ Map::cast(new_map)->
+ set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ Map::cast(new_map)->
+ set_unused_property_fields(kExtraFields - 1);
+ set_map(Map::cast(new_map));
+ set_properties(FixedArray::cast(values));
+ }
+
+ return value;
+}
+
+
+Object* JSObject::AddConstantFunctionProperty(String* name,
+ JSFunction* function,
+ PropertyAttributes attributes) {
+ // Allocate new instance descriptors with (name, function) added
+ ConstantFunctionDescriptor d(name, function, attributes);
+ Object* new_descriptors =
+ map()->instance_descriptors()->CopyInsert(&d, true);
+ if (new_descriptors->IsFailure()) return new_descriptors;
+
+ // Allocate a new map for the object.
+ Object* new_map = map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+
+ DescriptorArray* descriptors = DescriptorArray::cast(new_descriptors);
+ Map::cast(new_map)->set_instance_descriptors(descriptors);
+ set_map(Map::cast(new_map));
+
+ return function;
+}
+
+
+Object* JSObject::ReplaceConstantFunctionProperty(String* name,
+ Object* value) {
+ // There are two situations to handle here:
+ // 1: Replace a constant function with another function.
+ // 2: Replace a constant function with an object.
+ if (value->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(value);
+
+ // Allocate new instance descriptors with (name, function) added
+ Object* new_descriptors = map()->instance_descriptors()->Copy();
+ if (new_descriptors->IsFailure()) return new_descriptors;
+
+ // Replace the function entry
+ DescriptorArray* p = DescriptorArray::cast(new_descriptors);
+ for (DescriptorReader r(p); !r.eos(); r.advance()) {
+ if (r.Equals(name)) r.ReplaceConstantFunction(function);
+ }
+
+ // Allocate a new map for the object.
+ Object* new_map = map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+
+ Map::cast(new_map)->
+ set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ set_map(Map::cast(new_map));
+ } else {
+ // Allocate new instance descriptors with updated property index.
+ int index = map()->NextFreePropertyIndex();
+ Object* new_descriptors =
+ map()->instance_descriptors()->CopyReplace(name, index, NONE);
+ if (new_descriptors->IsFailure()) return new_descriptors;
+
+ if (map()->unused_property_fields() > 0) {
+ ASSERT(index < properties()->length());
+
+ // Allocate a new map for the object.
+ Object* new_map = map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+
+ Map::cast(new_map)->
+ set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ Map::cast(new_map)->
+ set_unused_property_fields(map()->unused_property_fields()-1);
+ set_map(Map::cast(new_map));
+ properties()->set(index, value);
+ } else {
+ ASSERT(map()->unused_property_fields() == 0);
+ static const int kFastNofProperties = 20;
+ if (properties()->length() > kFastNofProperties) {
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+ return SetProperty(name, value, NONE);
+ }
+
+ static const int kExtraFields = 5;
+ // Make room for the more properties.
+ Object* values =
+ properties()->CopySize(properties()->length() + kExtraFields);
+ if (values->IsFailure()) return values;
+ FixedArray::cast(values)->set(index, value);
+
+ // Allocate a new map for the object.
+ Object* new_map = map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+
+ Map::cast(new_map)->
+ set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ Map::cast(new_map)->
+ set_unused_property_fields(kExtraFields - 1);
+ set_map(Map::cast(new_map));
+ set_properties(FixedArray::cast(values));
+ }
+ }
+ return value;
+}
+
+
+// Add property in slow mode
+Object* JSObject::AddSlowProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ Object* result = property_dictionary()->AddStringEntry(name, value, details);
+ if (result->IsFailure()) return result;
+ if (property_dictionary() != result) {
+ set_properties(Dictionary::cast(result));
+ }
+ return value;
+}
+
+
+Object* JSObject::AddProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ if (HasFastProperties()) {
+ // Ensure the descriptor array does not get too big.
+ if (map()->instance_descriptors()->number_of_descriptors() <
+ DescriptorArray::kMaxNumberOfDescriptors) {
+ if (value->IsJSFunction()) {
+ return AddConstantFunctionProperty(name,
+ JSFunction::cast(value),
+ attributes);
+ } else {
+ return AddFastProperty(name, value, attributes);
+ }
+ } else {
+ // Normalize the object to prevent very large instance descriptors.
+ // This eliminates unwanted N^2 allocation and lookup behavior.
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+ }
+ }
+ return AddSlowProperty(name, value, attributes);
+}
+
+
+Object* JSObject::SetPropertyPostInterceptor(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ // Check local property, ignore interceptor.
+ LookupResult result;
+ LocalLookupRealNamedProperty(name, &result);
+ if (result.IsValid()) return SetProperty(&result, name, value, attributes);
+ // Add real property.
+ return AddProperty(name, value, attributes);
+}
+
+
+Object* JSObject::SetPropertyWithInterceptor(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ HandleScope scope;
+ Handle<JSObject> this_handle(this);
+ Handle<String> name_handle(name);
+ Handle<Object> value_handle(value);
+ Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ if (!interceptor->setter()->IsUndefined()) {
+ Handle<Object> data_handle(interceptor->data());
+ LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
+ v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(this_handle));
+ v8::NamedPropertySetter setter =
+ v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ Handle<Object> value_unhole(value->IsTheHole() ?
+ Heap::undefined_value() :
+ value);
+ result = setter(v8::Utils::ToLocal(name_handle),
+ v8::Utils::ToLocal(value_unhole),
+ info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) return *value_handle;
+ }
+ Object* raw_result = this_handle->SetPropertyPostInterceptor(*name_handle,
+ *value_handle,
+ attributes);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return raw_result;
+}
+
+
+Object* JSObject::SetProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ LookupResult result;
+ LocalLookup(name, &result);
+ return SetProperty(&result, name, value, attributes);
+}
+
+
+Object* JSObject::SetPropertyWithCallback(Object* structure,
+ String* name,
+ Object* value,
+ JSObject* holder) {
+ HandleScope scope;
+
+ // We should never get here to initialize a const with the hole
+ // value since a const declaration would conflict with the setter.
+ ASSERT(!value->IsTheHole());
+ Handle<Object> value_handle(value);
+
+ // To accommodate both the old and the new api we switch on the
+ // data structure used to store the callbacks. Eventually proxy
+ // callbacks should be phased out.
+ if (structure->IsProxy()) {
+ AccessorDescriptor* callback =
+ reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+ Object* obj = (callback->setter)(this, value, callback->data);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (obj->IsFailure()) return obj;
+ return *value_handle;
+ }
+
+ if (structure->IsAccessorInfo()) {
+ // api style callbacks
+ AccessorInfo* data = AccessorInfo::cast(structure);
+ Object* call_obj = data->setter();
+ v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ if (call_fun == NULL) return value;
+ Handle<JSObject> self(this);
+ Handle<JSObject> holder_handle(JSObject::cast(holder));
+ Handle<String> key(name);
+ Handle<Object> fun_data(data->data());
+ LOG(ApiNamedPropertyAccess("store", this, name));
+ v8::AccessorInfo info(v8::Utils::ToLocal(self),
+ v8::Utils::ToLocal(fun_data),
+ v8::Utils::ToLocal(holder_handle));
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ call_fun(v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value_handle),
+ info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return *value_handle;
+ }
+
+ if (structure->IsFixedArray()) {
+ Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+ if (setter->IsJSFunction()) {
+ Handle<JSFunction> fun(JSFunction::cast(setter));
+ Handle<JSObject> self(this);
+ bool has_pending_exception;
+ Object** argv[] = { value_handle.location() };
+ Execution::Call(fun, self, 1, argv, &has_pending_exception);
+ // Check for pending exception and return the result.
+ if (has_pending_exception) return Failure::Exception();
+ } else {
+ Handle<String> key(name);
+ Handle<Object> holder_handle(holder);
+ Handle<Object> args[2] = { key, holder_handle };
+ return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2)));
+ }
+ return *value_handle;
+ }
+
+ UNREACHABLE();
+ return 0;
+}
+
+
+void JSObject::LookupCallbackSetterInPrototypes(String* name,
+ LookupResult* result) {
+ for (Object* pt = GetPrototype();
+ pt != Heap::null_value();
+ pt = pt->GetPrototype()) {
+ JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
+ if (result->IsValid()) {
+ if (!result->IsTransitionType() && result->IsReadOnly()) {
+ result->NotFound();
+ return;
+ }
+ if (result->type() == CALLBACKS) {
+ return;
+ }
+ }
+ }
+ result->NotFound();
+}
+
+
+void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
+ DescriptorArray* descriptors = map()->instance_descriptors();
+ int number = descriptors->Search(name);
+ if (number != DescriptorArray::kNotFound) {
+ result->DescriptorResult(this, descriptors->GetDetails(number), number);
+ } else {
+ result->NotFound();
+ }
+}
+
+
+void JSObject::LocalLookupRealNamedProperty(String* name,
+ LookupResult* result) {
+ if (HasFastProperties()) {
+ LookupInDescriptor(name, result);
+ if (result->IsValid()) {
+ ASSERT(result->holder() == this && result->type() != NORMAL);
+ // Disallow caching for uninitialized constants. These can only
+ // occur as fields.
+ if (result->IsReadOnly() && result->type() == FIELD &&
+ properties()->get(result->GetFieldIndex())->IsTheHole()) {
+ result->DisallowCaching();
+ }
+ return;
+ }
+ } else {
+ int entry = property_dictionary()->FindStringEntry(name);
+ if (entry != -1) {
+ // Make sure to disallow caching for uninitialized constants
+ // found in the dictionary-mode objects.
+ if (property_dictionary()->ValueAt(entry)->IsTheHole()) {
+ result->DisallowCaching();
+ }
+ result->DictionaryResult(this, entry);
+ return;
+ }
+ // Slow case object skipped during lookup. Do not use inline caching.
+ result->DisallowCaching();
+ }
+ result->NotFound();
+}
+
+
+void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
+ LocalLookupRealNamedProperty(name, result);
+ if (result->IsProperty()) return;
+
+ LookupRealNamedPropertyInPrototypes(name, result);
+}
+
+
+void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
+ LookupResult* result) {
+ for (Object* pt = GetPrototype();
+ pt != Heap::null_value();
+ pt = JSObject::cast(pt)->GetPrototype()) {
+ JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
+ if (result->IsValid()) {
+ switch (result->type()) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ return;
+ default: break;
+ }
+ }
+ }
+ result->NotFound();
+}
+
+
+// We only need to deal with CALLBACKS and INTERCEPTORS
+Object* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
+ String* name,
+ Object* value) {
+ if (!result->IsProperty()) {
+ LookupCallbackSetterInPrototypes(name, result);
+ }
+
+ if (result->IsProperty()) {
+ if (!result->IsReadOnly()) {
+ switch (result->type()) {
+ case CALLBACKS: {
+ Object* obj = result->GetCallbackObject();
+ if (obj->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(obj);
+ if (info->all_can_write()) {
+ return SetPropertyWithCallback(result->GetCallbackObject(),
+ name,
+ value,
+ result->holder());
+ }
+ }
+ break;
+ }
+ case INTERCEPTOR: {
+ // Try lookup real named properties. Note that only property can be
+ // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
+ LookupResult r;
+ LookupRealNamedProperty(name, &r);
+ if (r.IsProperty()) {
+ return SetPropertyWithFailedAccessCheck(&r, name, value);
+ }
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+ }
+
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return value;
+}
+
+
+Object* JSObject::SetProperty(LookupResult* result,
+ String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc;
+
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded()
+ && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(result, name, value);
+ }
+
+ if (result->IsValid()) {
+ if (!result->IsLoaded()) {
+ return SetLazyProperty(result, name, value, attributes);
+ }
+ if (result->IsReadOnly() && !result->IsTransitionType()) return value;
+ switch (result->type()) {
+ case NORMAL:
+ property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+ return value;
+ case FIELD:
+ properties()->set(result->GetFieldIndex(), value);
+ return value;
+ case MAP_TRANSITION:
+ if (attributes == result->GetAttributes()) {
+ // Only use map transition if attributes matches.
+ return AddFastPropertyUsingMap(result->GetTransitionMap(),
+ name,
+ value);
+ } else {
+ return AddFastProperty(name, value, attributes);
+ }
+ case CONSTANT_FUNCTION:
+ if (value == result->GetConstantFunction()) return this;
+ // Only replace the function if necessary.
+ return ReplaceConstantFunctionProperty(name, value);
+ case CALLBACKS:
+ return SetPropertyWithCallback(result->GetCallbackObject(),
+ name,
+ value,
+ result->holder());
+ case INTERCEPTOR:
+ return SetPropertyWithInterceptor(name, value, attributes);
+ case CONSTANT_TRANSITION:
+ break;
+ }
+ }
+
+ // We could not find a local property so let's check whether there is an
+ // accessor that wants to handle the property.
+ LookupResult accessor_result;
+ LookupCallbackSetterInPrototypes(name, &accessor_result);
+ if (accessor_result.IsValid()) {
+ return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+ name,
+ value,
+ accessor_result.holder());
+ }
+
+ // The property was not found
+ return AddProperty(name, value, attributes);
+}
+
+
+// Set a real local property, even if it is READ_ONLY. If the property is not
+// present, add it with attributes NONE. This code is the same as in
+// SetProperty, except for the check for IsReadOnly and the check for a
+// callback setter.
+Object* JSObject::IgnoreAttributesAndSetLocalProperty(String* name,
+ Object* value) {
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc;
+
+ LookupResult result;
+ LocalLookup(name, &result);
+
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return value;
+ }
+
+ if (result.IsValid()) {
+ switch (result.type()) {
+ case NORMAL:
+ property_dictionary()->ValueAtPut(result.GetDictionaryEntry(), value);
+ return value;
+ case FIELD:
+ properties()->set(result.GetFieldIndex(), value);
+ return value;
+ case MAP_TRANSITION:
+ return AddFastPropertyUsingMap(result.GetTransitionMap(), name, value);
+ case CONSTANT_FUNCTION:
+ return ReplaceConstantFunctionProperty(name, value);
+ case CALLBACKS:
+ return SetPropertyWithCallback(result.GetCallbackObject(), name, value,
+ result.holder());
+ case INTERCEPTOR:
+ return SetPropertyWithInterceptor(name, value, NONE);
+ case CONSTANT_TRANSITION:
+ break;
+ }
+ }
+
+ // The property was not found
+ return AddProperty(name, value, NONE);
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
+ JSObject* receiver,
+ String* name,
+ bool continue_search) {
+ // Check local property, ignore interceptor.
+ LookupResult result;
+ LocalLookupRealNamedProperty(name, &result);
+ if (result.IsValid()) return result.GetAttributes();
+
+ if (continue_search) {
+ // Continue searching via the prototype chain.
+ Object* pt = GetPrototype();
+ if (pt != Heap::null_value()) {
+ return JSObject::cast(pt)->
+ GetPropertyAttributeWithReceiver(receiver, name);
+ }
+ }
+ return ABSENT;
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
+ JSObject* receiver,
+ String* name,
+ bool continue_search) {
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+ Handle<String> name_handle(name);
+ Handle<Object> data_handle(interceptor->data());
+ v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(holder_handle));
+ if (!interceptor->query()->IsUndefined()) {
+ v8::NamedPropertyQuery query =
+ v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
+ LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+ v8::Handle<v8::Boolean> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = query(v8::Utils::ToLocal(name_handle), info);
+ }
+ if (!result.IsEmpty()) {
+ // Convert the boolean result to a property attribute
+ // specification.
+ return result->IsTrue() ? NONE : ABSENT;
+ }
+ } else if (!interceptor->getter()->IsUndefined()) {
+ v8::NamedPropertyGetter getter =
+ v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ LOG(ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = getter(v8::Utils::ToLocal(name_handle), info);
+ }
+ if (!result.IsEmpty()) return NONE;
+ }
+ return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
+ *name_handle,
+ continue_search);
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
+ JSObject* receiver,
+ String* key) {
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ if (HasElementWithReceiver(receiver, index)) return NONE;
+ return ABSENT;
+ }
+ // Named property.
+ LookupResult result;
+ Lookup(key, &result);
+ return GetPropertyAttribute(receiver, &result, key, true);
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return ABSENT;
+ }
+ if (result->IsValid()) {
+ switch (result->type()) {
+ case NORMAL: // fall through
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ return result->GetAttributes();
+ case INTERCEPTOR:
+ return result->holder()->
+ GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
+ default:
+ break;
+ }
+ }
+ return ABSENT;
+}
+
+
+PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
+ // Check whether the name is an array index.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ if (HasLocalElement(index)) return NONE;
+ return ABSENT;
+ }
+ // Named property.
+ LookupResult result;
+ LocalLookup(name, &result);
+ return GetPropertyAttribute(this, &result, name, false);
+}
+
+
+Object* JSObject::NormalizeProperties() {
+ if (!HasFastProperties()) return this;
+
+ // Allocate new content
+ Object* obj =
+ Dictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4);
+ if (obj->IsFailure()) return obj;
+ Dictionary* dictionary = Dictionary::cast(obj);
+
+ for (DescriptorReader r(map()->instance_descriptors());
+ !r.eos();
+ r.advance()) {
+ PropertyDetails details = r.GetDetails();
+ switch (details.type()) {
+ case CONSTANT_FUNCTION: {
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), NORMAL, details.index());
+ Object* value = r.GetConstantFunction();
+ Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
+ if (result->IsFailure()) return result;
+ dictionary = Dictionary::cast(result);
+ break;
+ }
+ case FIELD: {
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), NORMAL, details.index());
+ Object* value = properties()->get(r.GetFieldIndex());
+ Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
+ if (result->IsFailure()) return result;
+ dictionary = Dictionary::cast(result);
+ break;
+ }
+ case CALLBACKS: {
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ Object* value = r.GetCallbacksObject();
+ Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
+ if (result->IsFailure()) return result;
+ dictionary = Dictionary::cast(result);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Copy the next enumeration index from instance descriptor.
+ int index = map()->instance_descriptors()->NextEnumerationIndex();
+ dictionary->SetNextEnumerationIndex(index);
+
+ // Descriptors with type MAP_TRANSITION is ignored.
+
+ // Allocate new map.
+ obj = map()->Copy();
+ if (obj->IsFailure()) return obj;
+
+ set_map(Map::cast(obj));
+ map()->
+ set_instance_descriptors(DescriptorArray::cast(Heap::empty_fixed_array()));
+
+ // We have now allocate all the necessary object and change can be applied.
+ map()->set_unused_property_fields(0);
+ set_properties(dictionary);
+
+ Counters::props_to_dictionary.Increment();
+
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ PrintF("Object properties have been normalized:\n");
+ Print();
+ }
+#endif
+ return this;
+}
+
+
+Object* JSObject::TransformToFastProperties(int unused_property_fields) {
+ if (HasFastProperties()) return this;
+ return property_dictionary()->
+ TransformPropertiesToFastFor(this, unused_property_fields);
+}
+
+
+Object* JSObject::NormalizeElements() {
+ if (!HasFastElements()) return this;
+
+ // Get number of entries.
+ FixedArray* array = FixedArray::cast(elements());
+
+ // Compute the effective length.
+ int length = IsJSArray() ?
+ Smi::cast(JSArray::cast(this)->length())->value() :
+ array->length();
+ Object* obj = Dictionary::Allocate(length);
+ if (obj->IsFailure()) return obj;
+ Dictionary* dictionary = Dictionary::cast(obj);
+ // Copy entries.
+ for (int i = 0; i < length; i++) {
+ Object* value = array->get(i);
+ if (!value->IsTheHole()) {
+ PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ Object* result = dictionary->AddNumberEntry(i, array->get(i), details);
+ if (result->IsFailure()) return result;
+ dictionary = Dictionary::cast(result);
+ }
+ }
+ // Switch to using the dictionary as the backing storage for elements.
+ set_elements(dictionary);
+
+ Counters::elements_to_dictionary.Increment();
+
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ PrintF("Object elements have been normalized:\n");
+ Print();
+ }
+#endif
+
+ return this;
+}
+
+
+Object* JSObject::DeletePropertyPostInterceptor(String* name) {
+ // Check local property, ignore interceptor.
+ LookupResult result;
+ LocalLookupRealNamedProperty(name, &result);
+ if (!result.IsValid()) return Heap::true_value();
+
+ // Normalize object if needed.
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+
+ ASSERT(!HasFastProperties());
+ // Attempt to remove the property from the property dictionary.
+ Dictionary* dictionary = property_dictionary();
+ int entry = dictionary->FindStringEntry(name);
+ if (entry != -1) return dictionary->DeleteProperty(entry);
+ return Heap::true_value();
+}
+
+
+Object* JSObject::DeletePropertyWithInterceptor(String* name) {
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ Handle<String> name_handle(name);
+ Handle<JSObject> this_handle(this);
+ if (!interceptor->deleter()->IsUndefined()) {
+ v8::NamedPropertyDeleter deleter =
+ v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
+ Handle<Object> data_handle(interceptor->data());
+ LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
+ v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(this_handle));
+ v8::Handle<v8::Boolean> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = deleter(v8::Utils::ToLocal(name_handle), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) {
+ ASSERT(result->IsBoolean());
+ return *v8::Utils::OpenHandle(*result);
+ }
+ }
+ Object* raw_result = this_handle->DeletePropertyPostInterceptor(*name_handle);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return raw_result;
+}
+
+
+Object* JSObject::DeleteElementPostInterceptor(uint32_t index) {
+ if (HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if (index < length) {
+ FixedArray::cast(elements())->set_the_hole(index);
+ }
+ return Heap::true_value();
+ }
+ ASSERT(!HasFastElements());
+ Dictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindNumberEntry(index);
+ if (entry != -1) return dictionary->DeleteProperty(entry);
+ return Heap::true_value();
+}
+
+
+Object* JSObject::DeleteElementWithInterceptor(uint32_t index) {
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return Heap::false_value();
+ v8::IndexedPropertyDeleter deleter =
+ v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
+ Handle<JSObject> this_handle(this);
+ Handle<Object> data_handle(interceptor->data());
+ LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
+ v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(this_handle));
+ v8::Handle<v8::Boolean> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = deleter(index, info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) {
+ ASSERT(result->IsBoolean());
+ return *v8::Utils::OpenHandle(*result);
+ }
+ Object* raw_result = this_handle->DeleteElementPostInterceptor(index);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return raw_result;
+}
+
+
+Object* JSObject::DeleteElement(uint32_t index) {
+ if (HasIndexedInterceptor()) {
+ return DeleteElementWithInterceptor(index);
+ }
+
+ if (HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if (index < length) {
+ FixedArray::cast(elements())->set_the_hole(index);
+ }
+ return Heap::true_value();
+ } else {
+ Dictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindNumberEntry(index);
+ if (entry != -1) return dictionary->DeleteProperty(entry);
+ }
+ return Heap::true_value();
+}
+
+
+Object* JSObject::DeleteProperty(String* name) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+ return Heap::false_value();
+ }
+
+ // ECMA-262, 3rd, 8.6.2.5
+ ASSERT(name->IsString());
+
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ return DeleteElement(index);
+ } else {
+ LookupResult result;
+ LocalLookup(name, &result);
+ if (!result.IsValid()) return Heap::true_value();
+ if (result.IsDontDelete()) return Heap::false_value();
+ // Check for interceptor.
+ if (result.type() == INTERCEPTOR) {
+ return DeletePropertyWithInterceptor(name);
+ }
+ if (!result.IsLoaded()) {
+ return JSObject::cast(this)->DeleteLazyProperty(&result, name);
+ }
+ // Normalize object if needed.
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+ // Make sure the properties are normalized before removing the entry.
+ Dictionary* dictionary = property_dictionary();
+ int entry = dictionary->FindStringEntry(name);
+ if (entry != -1) return dictionary->DeleteProperty(entry);
+ return Heap::true_value();
+ }
+}
+
+
+// Check whether this object references another object.
+bool JSObject::ReferencesObject(Object* obj) {
+ AssertNoAllocation no_alloc;
+
+ // Is the object the constructor for this object?
+ if (map()->constructor() == obj) {
+ return true;
+ }
+
+ // Is the object the prototype for this object?
+ if (map()->prototype() == obj) {
+ return true;
+ }
+
+ // Check if the object is among the named properties.
+ Object* key = SlowReverseLookup(obj);
+ if (key != Heap::undefined_value()) {
+ return true;
+ }
+
+ // Check if the object is among the indexed properties.
+ if (HasFastElements()) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : FixedArray::cast(elements())->length();
+ for (int i = 0; i < length; i++) {
+ Object* element = FixedArray::cast(elements())->get(i);
+ if (!element->IsTheHole() && element == obj) {
+ return true;
+ }
+ }
+ } else {
+ key = element_dictionary()->SlowReverseLookup(obj);
+ if (key != Heap::undefined_value()) {
+ return true;
+ }
+ }
+
+ // For functions check the context. Boilerplate functions do
+ // not have to be traversed since they have no real context.
+ if (IsJSFunction() && !JSFunction::cast(this)->IsBoilerplate()) {
+ // Get the constructor function for arguments array.
+ JSObject* arguments_boilerplate =
+ Top::context()->global_context()->arguments_boilerplate();
+ JSFunction* arguments_function =
+ JSFunction::cast(arguments_boilerplate->map()->constructor());
+
+ // Get the context and don't check if it is the global context.
+ JSFunction* f = JSFunction::cast(this);
+ Context* context = f->context();
+ if (context->IsGlobalContext()) {
+ return false;
+ }
+
+ // Check the non-special context slots.
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context->length(); i++) {
+ // Only check JS objects.
+ if (context->get(i)->IsJSObject()) {
+ JSObject* ctxobj = JSObject::cast(context->get(i));
+ // If it is an arguments array check the content.
+ if (ctxobj->map()->constructor() == arguments_function) {
+ if (ctxobj->ReferencesObject(obj)) {
+ return true;
+ }
+ } else if (ctxobj == obj) {
+ return true;
+ }
+ }
+ }
+
+ // Check the context extension if any.
+ if (context->extension() != NULL) {
+ return context->extension()->ReferencesObject(obj);
+ }
+ }
+
+ // No references to object.
+ return false;
+}
+
+
+// Tests for the fast common case for property enumeration:
+// - this object has an enum cache
+// - this object has no elements
+// - no prototype has enumerable properties/elements
+// - neither this object nor any prototype has interceptors
+bool JSObject::IsSimpleEnum() {
+ JSObject* arguments_boilerplate =
+ Top::context()->global_context()->arguments_boilerplate();
+ JSFunction* arguments_function =
+ JSFunction::cast(arguments_boilerplate->map()->constructor());
+ if (IsAccessCheckNeeded()) return false;
+ if (map()->constructor() == arguments_function) return false;
+
+ for (Object* o = this;
+ o != Heap::null_value();
+ o = JSObject::cast(o)->GetPrototype()) {
+ JSObject* curr = JSObject::cast(o);
+ if (!curr->HasFastProperties()) return false;
+ if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
+ if (curr->NumberOfEnumElements() > 0) return false;
+ if (curr->HasNamedInterceptor()) return false;
+ if (curr->HasIndexedInterceptor()) return false;
+ if (curr != this) {
+ FixedArray* curr_fixed_array =
+ FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
+ if (curr_fixed_array->length() > 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+int Map::NumberOfDescribedProperties() {
+ int result = 0;
+ for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
+ if (!r.IsTransition()) result++;
+ }
+ return result;
+}
+
+
+int Map::PropertyIndexFor(String* name) {
+ for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
+ if (r.Equals(name)) return r.GetFieldIndex();
+ }
+ return -1;
+}
+
+
+int Map::NextFreePropertyIndex() {
+ int index = -1;
+ for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
+ if (r.type() == FIELD) {
+ if (r.GetFieldIndex() > index) index = r.GetFieldIndex();
+ }
+ }
+ return index+1;
+}
+
+Object* Map::EnsureNoMapTransitions() {
+ // Remove all map transitions.
+
+ // Compute the size of the map transition entries to be removed.
+ int nof = 0;
+ for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
+ if (r.IsTransition()) nof++;
+ }
+
+ if (nof == 0) return this;
+
+ // Allocate the new descriptor array.
+ Object* result = DescriptorArray::Allocate(
+ instance_descriptors()->number_of_descriptors() - nof);
+ if (result->IsFailure()) return result;
+
+ // Copy the content.
+ DescriptorWriter w(DescriptorArray::cast(result));
+ for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
+ if (!r.IsTransition()) w.WriteFrom(&r);
+ }
+ ASSERT(w.eos());
+
+ set_instance_descriptors(DescriptorArray::cast(result));
+
+ return this;
+}
+
+
+AccessorDescriptor* Map::FindAccessor(String* name) {
+ for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
+ if (r.Equals(name) && r.type() == CALLBACKS) return r.GetCallbacks();
+ }
+ return NULL;
+}
+
+
+void JSObject::LocalLookup(String* name, LookupResult* result) {
+ ASSERT(name->IsString());
+
+ // Do not use inline caching if the object is a non-global object
+ // that requires access checks.
+ if (!IsJSGlobalObject() && IsAccessCheckNeeded()) {
+ result->DisallowCaching();
+ }
+
+ // Check __proto__ before interceptor.
+ if (name->Equals(Heap::Proto_symbol())) {
+ result->ConstantResult(this);
+ return;
+ }
+
+ // Check for lookup interceptor except when bootstrapping.
+ if (HasNamedInterceptor() && !Bootstrapper::IsActive()) {
+ result->InterceptorResult(this);
+ return;
+ }
+
+ LocalLookupRealNamedProperty(name, result);
+}
+
+
+void JSObject::Lookup(String* name, LookupResult* result) {
+ // Ecma-262 3rd 8.6.2.4
+ for (Object* current = this;
+ current != Heap::null_value();
+ current = JSObject::cast(current)->GetPrototype()) {
+ JSObject::cast(current)->LocalLookup(name, result);
+ if (result->IsValid() && !result->IsTransitionType()) {
+ return;
+ }
+ }
+ result->NotFound();
+}
+
+
+Object* JSObject::DefineGetterSetter(String* name,
+ PropertyAttributes attributes) {
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc;
+
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return Heap::undefined_value();
+ }
+
+ // TryFlatten before operating on the string.
+ name->TryFlatten();
+
+ // Make sure name is not an index.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) return Heap::undefined_value();
+
+ // Lookup the name.
+ LookupResult result;
+ LocalLookup(name, &result);
+ if (result.IsValid()) {
+ if (result.IsReadOnly()) return Heap::undefined_value();
+ if (result.type() == CALLBACKS) {
+ Object* obj = result.GetCallbackObject();
+ if (obj->IsFixedArray()) return obj;
+ }
+ }
+
+ // Normalize object to make this operation simple.
+ Object* ok = NormalizeProperties();
+ if (ok->IsFailure()) return ok;
+
+ // Allocate the fixed array to hold getter and setter.
+ Object* array = Heap::AllocateFixedArray(2);
+ if (array->IsFailure()) return array;
+
+ // Update the dictionary with the new CALLBACKS property.
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ Object* dict =
+ property_dictionary()->SetOrAddStringEntry(name, array, details);
+ if (dict->IsFailure()) return dict;
+
+ // Set the potential new dictionary on the object.
+ set_properties(Dictionary::cast(dict));
+ return array;
+}
+
+
+Object* JSObject::DefineAccessor(String* name, bool is_getter, JSFunction* fun,
+ PropertyAttributes attributes) {
+ Object* array = DefineGetterSetter(name, attributes);
+ if (array->IsFailure() || array->IsUndefined()) return array;
+ FixedArray::cast(array)->set(is_getter ? 0 : 1, fun);
+ return this;
+}
+
+
+Object* JSObject::LookupAccessor(String* name, bool is_getter) {
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc;
+
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return Heap::undefined_value();
+ }
+
+ // Make sure name is not an index.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) return Heap::undefined_value();
+
+ // Make the lookup and include prototypes.
+ for (Object* obj = this;
+ obj != Heap::null_value();
+ obj = JSObject::cast(obj)->GetPrototype()) {
+ LookupResult result;
+ JSObject::cast(obj)->LocalLookup(name, &result);
+ if (result.IsValid()) {
+ if (result.IsReadOnly()) return Heap::undefined_value();
+ if (result.type() == CALLBACKS) {
+ Object* obj = result.GetCallbackObject();
+ if (obj->IsFixedArray()) {
+ return FixedArray::cast(obj)->get(is_getter
+ ? kGetterIndex
+ : kSetterIndex);
+ }
+ }
+ }
+ }
+ return Heap::undefined_value();
+}
+
+
+Object* JSObject::SlowReverseLookup(Object* value) {
+ if (HasFastProperties()) {
+ for (DescriptorReader r(map()->instance_descriptors());
+ !r.eos();
+ r.advance()) {
+ if (r.type() == FIELD) {
+ if (properties()->get(r.GetFieldIndex()) == value) {
+ return r.GetKey();
+ }
+ } else if (r.type() == CONSTANT_FUNCTION) {
+ if (r.GetConstantFunction() == value) {
+ return r.GetKey();
+ }
+ }
+ }
+ return Heap::undefined_value();
+ } else {
+ return property_dictionary()->SlowReverseLookup(value);
+ }
+}
+
+
+Object* Map::Copy() {
+ Object* result = Heap::AllocateMap(instance_type(), instance_size());
+ if (result->IsFailure()) return result;
+ Map::cast(result)->set_prototype(prototype());
+ Map::cast(result)->set_constructor(constructor());
+ Map::cast(result)->set_instance_descriptors(instance_descriptors());
+ // Please note instance_type and instance_size are set when allocated.
+ Map::cast(result)->set_unused_property_fields(unused_property_fields());
+ Map::cast(result)->set_bit_field(bit_field());
+ Map::cast(result)->ClearCodeCache();
+ return result;
+}
+
+
+Object* Map::UpdateCodeCache(String* name, Code* code) {
+ ASSERT(code->state() == MONOMORPHIC);
+ FixedArray* cache = code_cache();
+
+ // When updating the code cache we disregard the type encoded in the
+ // flags. This allows call constant stubs to overwrite call field
+ // stubs, etc.
+ Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
+
+ // First check whether we can update existing code cache without
+ // extending it.
+ int length = cache->length();
+ for (int i = 0; i < length; i += 2) {
+ Object* key = cache->get(i);
+ if (key->IsUndefined()) {
+ cache->set(i + 0, name);
+ cache->set(i + 1, code);
+ return this;
+ }
+ if (name->Equals(String::cast(key))) {
+ Code::Flags found = Code::cast(cache->get(i + 1))->flags();
+ if (Code::RemoveTypeFromFlags(found) == flags) {
+ cache->set(i + 1, code);
+ return this;
+ }
+ }
+ }
+
+ // Extend the code cache with some new entries (at least one).
+ int new_length = length + ((length >> 1) & ~1) + 2;
+ ASSERT((new_length & 1) == 0); // must be a multiple of two
+ Object* result = cache->CopySize(new_length);
+ if (result->IsFailure()) return result;
+
+ // Add the (name, code) pair to the new cache.
+ cache = FixedArray::cast(result);
+ cache->set(length + 0, name);
+ cache->set(length + 1, code);
+ set_code_cache(cache);
+ return this;
+}
+
+
+Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
+ FixedArray* cache = code_cache();
+ int length = cache->length();
+ for (int i = 0; i < length; i += 2) {
+ Object* key = cache->get(i);
+ if (key->IsUndefined()) {
+ return key;
+ }
+ if (name->Equals(String::cast(key))) {
+ Code* code = Code::cast(cache->get(i + 1));
+ if (code->flags() == flags) return code;
+ }
+ }
+ return Heap::undefined_value();
+}
+
+
+bool Map::IncludedInCodeCache(Code* code) {
+ FixedArray* array = code_cache();
+ int len = array->length();
+ for (int i = 0; i < len; i += 2) {
+ if (array->get(i+1) == code) return true;
+ }
+ return false;
+}
+
+
+void FixedArray::FixedArrayIterateBody(ObjectVisitor* v) {
+ IteratePointers(v, kHeaderSize, kHeaderSize + length() * kPointerSize);
+}
+
+
+static bool HasKey(FixedArray* array, Object* key) {
+ int len0 = array->length();
+ for (int i = 0; i < len0; i++) {
+ Object* element = array->get(i);
+ if (element->IsSmi() && key->IsSmi() && (element == key)) return true;
+ if (element->IsString() &&
+ key->IsString() && String::cast(element)->Equals(String::cast(key))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
+ // Remove array holes from array if any.
+ Object* object = array->RemoveHoles();
+ if (object->IsFailure()) return object;
+ JSArray* compacted_array = JSArray::cast(object);
+
+ // Allocate a temporary fixed array.
+ int compacted_array_length = Smi::cast(compacted_array->length())->value();
+ object = Heap::AllocateFixedArray(compacted_array_length);
+ if (object->IsFailure()) return object;
+ FixedArray* key_array = FixedArray::cast(object);
+
+ // Copy the elements from the JSArray to the temporary fixed array.
+ for (int i = 0; i < compacted_array_length; i++) {
+ key_array->set(i, compacted_array->GetElement(i));
+ }
+
+ // Compute the union of this and the temporary fixed array.
+ return UnionOfKeys(key_array);
+}
+
+
+Object* FixedArray::UnionOfKeys(FixedArray* other) {
+ int len0 = length();
+ int len1 = other->length();
+ // Optimize if either is empty.
+ if (len0 == 0) return other;
+ if (len1 == 0) return this;
+
+ // Compute how many elements are not in this.
+ int extra = 0;
+ for (int y = 0; y < len1; y++) {
+ if (!HasKey(this, other->get(y))) extra++;
+ }
+
+ // Allocate the result
+ Object* obj = Heap::AllocateFixedArray(len0 + extra);
+ if (obj->IsFailure()) return obj;
+ // Fill in the content
+ FixedArray* result = FixedArray::cast(obj);
+ for (int i = 0; i < len0; i++) {
+ result->set(i, get(i));
+ }
+ // Fill in the extra keys.
+ int index = 0;
+ for (int y = 0; y < len1; y++) {
+ if (!HasKey(this, other->get(y))) {
+ result->set(len0 + index, other->get(y));
+ index++;
+ }
+ }
+ ASSERT(extra == index);
+ return result;
+}
+
+
+Object* FixedArray::Copy() {
+ int len = length();
+ if (len == 0) return this;
+ Object* obj = Heap::AllocateFixedArray(len);
+ if (obj->IsFailure()) return obj;
+ FixedArray* result = FixedArray::cast(obj);
+ WriteBarrierMode mode = result->GetWriteBarrierMode();
+ // Copy the content
+ for (int i = 0; i < len; i++) {
+ result->set(i, get(i), mode);
+ }
+ result->set_map(map());
+ return result;
+}
+
+Object* FixedArray::CopySize(int new_length) {
+ if (new_length == 0) return Heap::empty_fixed_array();
+ Object* obj = Heap::AllocateFixedArray(new_length);
+ if (obj->IsFailure()) return obj;
+ FixedArray* result = FixedArray::cast(obj);
+ WriteBarrierMode mode = result->GetWriteBarrierMode();
+ // Copy the content
+ int len = length();
+ if (new_length < len) len = new_length;
+ for (int i = 0; i < len; i++) {
+ result->set(i, get(i), mode);
+ }
+ result->set_map(map());
+ return result;
+}
+
+
+void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
+ WriteBarrierMode mode = dest->GetWriteBarrierMode();
+ for (int index = 0; index < len; index++) {
+ dest->set(dest_pos+index, get(pos+index), mode);
+ }
+}
+
+
+Object* DescriptorArray::Allocate(int number_of_descriptors) {
+ // Allocate the descriptor array.
+ Object* array = Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+ if (array->IsFailure()) return array;
+ DescriptorArray* result = DescriptorArray::cast(array);
+
+ // Allocate the content array and set it in the descriptor array.
+ array = Heap::AllocateFixedArray(number_of_descriptors << 1);
+ if (array->IsFailure()) return array;
+ result->set(kContentArrayIndex, array);
+
+ // Initialize the next enumeration index.
+ result->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+
+ return result;
+}
+
+
+void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
+ FixedArray* new_cache) {
+ ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
+ if (HasEnumCache()) {
+ FixedArray::cast(get(kEnumerationIndexIndex))->
+ set(kEnumCacheBridgeCacheIndex, new_cache);
+ } else {
+ if (length() == 0) return; // Do nothing for empty descriptor array.
+ FixedArray::cast(bridge_storage)->
+ set(kEnumCacheBridgeCacheIndex, new_cache);
+ fast_set(FixedArray::cast(bridge_storage),
+ kEnumCacheBridgeEnumIndex,
+ get(kEnumerationIndexIndex));
+ set(kEnumerationIndexIndex, bridge_storage);
+ }
+}
+
+
+void DescriptorArray::ReplaceConstantFunction(int descriptor_number,
+ JSFunction* value) {
+ ASSERT(!Heap::InNewSpace(value));
+ FixedArray* content_array = GetContentArray();
+ fast_set(content_array, ToValueIndex(descriptor_number), value);
+}
+
+
+Object* DescriptorArray::CopyInsert(Descriptor* desc,
+ bool remove_map_transitions) {
+ int transitions = 0;
+ if (remove_map_transitions) {
+ // Compute space from map transitions.
+ for (DescriptorReader r(this); !r.eos(); r.advance()) {
+ if (r.IsTransition()) transitions++;
+ }
+ }
+
+ // Ensure the key is a symbol.
+ Object* result = desc->KeyToSymbol();
+ if (result->IsFailure()) return result;
+
+ result = Allocate(number_of_descriptors() - transitions + 1);
+ if (result->IsFailure()) return result;
+
+ // Set the enumeration index in the descriptors and set the enumeration index
+ // in the result.
+ int index = NextEnumerationIndex();
+ desc->SetEnumerationIndex(index);
+ DescriptorArray::cast(result)->SetNextEnumerationIndex(index + 1);
+
+ // Write the old content and the descriptor information
+ DescriptorWriter w(DescriptorArray::cast(result));
+ DescriptorReader r(this);
+ while (!r.eos() && r.GetKey()->Hash() <= desc->key()->Hash()) {
+ if (!r.IsTransition() || !remove_map_transitions) {
+ w.WriteFrom(&r);
+ }
+ r.advance();
+ }
+ w.Write(desc);
+ while (!r.eos()) {
+ if (!r.IsTransition() || !remove_map_transitions) {
+ w.WriteFrom(&r);
+ }
+ r.advance();
+ }
+ ASSERT(w.eos());
+
+ return result;
+}
+
+
+Object* DescriptorArray::CopyReplace(String* name,
+ int index,
+ PropertyAttributes attributes) {
+ // Allocate the new descriptor array.
+ Object* result = DescriptorArray::Allocate(number_of_descriptors());
+ if (result->IsFailure()) return result;
+
+ // Make sure only symbols are added to the instance descriptor.
+ if (!name->IsSymbol()) {
+ Object* result = Heap::LookupSymbol(name);
+ if (result->IsFailure()) return result;
+ name = String::cast(result);
+ }
+
+ DescriptorWriter w(DescriptorArray::cast(result));
+ for (DescriptorReader r(this); !r.eos(); r.advance()) {
+ if (r.Equals(name)) {
+ FieldDescriptor d(name, index, attributes);
+ d.SetEnumerationIndex(r.GetDetails().index());
+ w.Write(&d);
+ } else {
+ w.WriteFrom(&r);
+ }
+ }
+
+ // Copy the next enumeration index.
+ DescriptorArray::cast(result)->
+ SetNextEnumerationIndex(NextEnumerationIndex());
+
+ ASSERT(w.eos());
+ return result;
+}
+
+
+bool DescriptorArray::IsSortedNoDuplicates() {
+ String* current_key = NULL;
+ uint32_t current = 0;
+ for (DescriptorReader r(this); !r.eos(); r.advance()) {
+ String* key = r.GetKey();
+ if (key == current_key) return false;
+ current_key = key;
+ uint32_t hash = r.GetKey()->Hash();
+ if (hash < current) return false;
+ current = hash;
+ }
+ return true;
+}
+
+
+void DescriptorArray::Sort() {
+ // In-place heap sort.
+ int len = number_of_descriptors();
+
+ // Bottom-up max-heap construction.
+ for (int i = 1; i < len; ++i) {
+ int child_index = i;
+ while (child_index > 0) {
+ int parent_index = ((child_index + 1) >> 1) - 1;
+ uint32_t parent_hash = GetKey(parent_index)->Hash();
+ uint32_t child_hash = GetKey(child_index)->Hash();
+ if (parent_hash < child_hash) {
+ Swap(parent_index, child_index);
+ } else {
+ break;
+ }
+ child_index = parent_index;
+ }
+ }
+
+ // Extract elements and create sorted array.
+ for (int i = len - 1; i > 0; --i) {
+ // Put max element at the back of the array.
+ Swap(0, i);
+ // Sift down the new top element.
+ int parent_index = 0;
+ while (true) {
+ int child_index = ((parent_index + 1) << 1) - 1;
+ if (child_index >= i) break;
+ uint32_t child1_hash = GetKey(child_index)->Hash();
+ uint32_t child2_hash = GetKey(child_index + 1)->Hash();
+ uint32_t parent_hash = GetKey(parent_index)->Hash();
+ if (child_index + 1 >= i || child1_hash > child2_hash) {
+ if (parent_hash > child1_hash) break;
+ Swap(parent_index, child_index);
+ parent_index = child_index;
+ } else {
+ if (parent_hash > child2_hash) break;
+ Swap(parent_index, child_index + 1);
+ parent_index = child_index + 1;
+ }
+ }
+ }
+
+ SLOW_ASSERT(IsSortedNoDuplicates());
+}
+
+
+int DescriptorArray::BinarySearch(String* name, int low, int high) {
+ uint32_t hash = name->Hash();
+
+ while (low <= high) {
+ int mid = (low + high) / 2;
+ String* mid_name = GetKey(mid);
+ uint32_t mid_hash = mid_name->Hash();
+
+ if (mid_hash > hash) {
+ high = mid - 1;
+ continue;
+ }
+ if (mid_hash < hash) {
+ low = mid + 1;
+ continue;
+ }
+ // Found an element with the same hash-code.
+ ASSERT(hash == mid_hash);
+ // There might be more, so we find the first one and
+ // check them all to see if we have a match.
+ if (name == mid_name) return mid;
+ while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
+ for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
+ if (GetKey(mid)->Equals(name)) return mid;
+ }
+ break;
+ }
+ return kNotFound;
+}
+
+
+static StaticResource<StringInputBuffer> string_input_buffer;
+
+
+bool String::LooksValid() {
+ if (!Heap::Contains(this))
+ return false;
+ switch (representation_tag()) {
+ case kSeqStringTag:
+ case kConsStringTag:
+ case kSlicedStringTag:
+ case kExternalStringTag:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int offset,
+ int length,
+ int* length_return) {
+ ASSERT(NativeAllocationChecker::allocation_allowed());
+ if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
+ return SmartPointer<char>(NULL);
+ }
+
+ // Negative length means the to the end of the string.
+ if (length < 0) length = kMaxInt - offset;
+
+ // Compute the size of the UTF-8 string. Start at the specified offset.
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Reset(offset, this);
+ int character_position = offset;
+ int utf8_bytes = 0;
+ while (buffer->has_more()) {
+ uint16_t character = buffer->GetNext();
+ if (character_position < offset + length) {
+ utf8_bytes += unibrow::Utf8::Length(character);
+ }
+ character_position++;
+ }
+
+ if (length_return) {
+ *length_return = utf8_bytes;
+ }
+
+ char* result = NewArray<char>(utf8_bytes + 1);
+
+ // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
+ buffer->Rewind();
+ buffer->Seek(offset);
+ character_position = offset;
+ int utf8_byte_position = 0;
+ while (buffer->has_more()) {
+ uint16_t character = buffer->GetNext();
+ if (character_position < offset + length) {
+ if (allow_nulls == DISALLOW_NULLS && character == 0) {
+ character = ' ';
+ }
+ utf8_byte_position +=
+ unibrow::Utf8::Encode(result + utf8_byte_position, character);
+ }
+ character_position++;
+ }
+ result[utf8_byte_position] = 0;
+ return SmartPointer<char>(result);
+}
+
+
+SmartPointer < char >String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int* length_return) {
+ return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
+}
+
+
+const uc16* String::GetTwoByteData() {
+ return GetTwoByteData(0);
+}
+
+
+const uc16* String::GetTwoByteData(unsigned start) {
+ ASSERT(!IsAscii());
+ switch (representation_tag()) {
+ case kSeqStringTag:
+ return TwoByteString::cast(this)->TwoByteStringGetData(start);
+ case kExternalStringTag:
+ return ExternalTwoByteString::cast(this)->
+ ExternalTwoByteStringGetData(start);
+ case kSlicedStringTag: {
+ SlicedString* sliced_string = SlicedString::cast(this);
+ String* buffer = String::cast(sliced_string->buffer());
+ if (buffer->StringIsConsString()) {
+ ConsString* cons_string = ConsString::cast(buffer);
+ // Flattened string.
+ ASSERT(String::cast(cons_string->second())->length() == 0);
+ buffer = String::cast(cons_string->first());
+ }
+ return buffer->GetTwoByteData(start + sliced_string->start());
+ }
+ case kConsStringTag:
+ UNREACHABLE();
+ return NULL;
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+uc16* String::ToWideCString(RobustnessFlag robust_flag) {
+ ASSERT(NativeAllocationChecker::allocation_allowed());
+
+ if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
+ return NULL;
+ }
+
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Reset(this);
+
+ uc16* result = NewArray<uc16>(length() + 1);
+
+ int i = 0;
+ while (buffer->has_more()) {
+ uint16_t character = buffer->GetNext();
+ result[i++] = character;
+ }
+ result[i] = 0;
+ return result;
+}
+
+
+const uc16* TwoByteString::TwoByteStringGetData(unsigned start) {
+ return reinterpret_cast<uc16*>(
+ reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
+}
+
+
+void TwoByteString::TwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ unsigned chars_read = 0;
+ unsigned offset = *offset_ptr;
+ while (chars_read < max_chars) {
+ uint16_t c = *reinterpret_cast<uint16_t*>(
+ reinterpret_cast<char*>(this) -
+ kHeapObjectTag + kHeaderSize + offset * kShortSize);
+ if (c <= kMaxAsciiCharCode) {
+ // Fast case for ASCII characters. Cursor is an input output argument.
+ if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
+ rbb->util_buffer,
+ rbb->capacity,
+ rbb->cursor)) {
+ break;
+ }
+ } else {
+ if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
+ rbb->util_buffer,
+ rbb->capacity,
+ rbb->cursor)) {
+ break;
+ }
+ }
+ offset++;
+ chars_read++;
+ }
+ *offset_ptr = offset;
+ rbb->remaining += chars_read;
+}
+
+
+const unibrow::byte* AsciiString::AsciiStringReadBlock(unsigned* remaining,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ // Cast const char* to unibrow::byte* (signedness difference).
+ const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
+ kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
+ *remaining = max_chars;
+ *offset_ptr += max_chars;
+ return b;
+}
+
+
+// This will iterate unless the block of string data spans two 'halves' of
+// a ConsString, in which case it will recurse. Since the block of string
+// data to be read has a maximum size this limits the maximum recursion
+// depth to something sane. Since C++ does not have tail call recursion
+// elimination, the iteration must be explicit. Since this is not an
+// -IntoBuffer method it can delegate to one of the efficient
+// *AsciiStringReadBlock routines.
+const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ ConsString* current = this;
+ unsigned offset = *offset_ptr;
+ int offset_correction = 0;
+
+ while (true) {
+ String* left = String::cast(current->first());
+ unsigned left_length = (unsigned)left->length();
+ if (left_length > offset &&
+ (max_chars <= left_length - offset ||
+ (rbb->capacity <= left_length - offset &&
+ (max_chars = left_length - offset, true)))) { // comma operator!
+ // Left hand side only - iterate unless we have reached the bottom of
+ // the cons tree. The assignment on the left of the comma operator is
+ // in order to make use of the fact that the -IntoBuffer routines can
+ // produce at most 'capacity' characters. This enables us to postpone
+ // the point where we switch to the -IntoBuffer routines (below) in order
+ // to maximize the chances of delegating a big chunk of work to the
+ // efficient *AsciiStringReadBlock routines.
+ if (left->StringIsConsString()) {
+ current = ConsString::cast(left);
+ continue;
+ } else {
+ const unibrow::byte* answer =
+ String::ReadBlock(left, rbb, &offset, max_chars);
+ *offset_ptr = offset + offset_correction;
+ return answer;
+ }
+ } else if (left_length <= offset) {
+ // Right hand side only - iterate unless we have reached the bottom of
+ // the cons tree.
+ String* right = String::cast(current->second());
+ offset -= left_length;
+ offset_correction += left_length;
+ if (right->StringIsConsString()) {
+ current = ConsString::cast(right);
+ continue;
+ } else {
+ const unibrow::byte* answer =
+ String::ReadBlock(right, rbb, &offset, max_chars);
+ *offset_ptr = offset + offset_correction;
+ return answer;
+ }
+ } else {
+ // The block to be read spans two sides of the ConsString, so we call the
+ // -IntoBuffer version, which will recurse. The -IntoBuffer methods
+ // are able to assemble data from several part strings because they use
+ // the util_buffer to store their data and never return direct pointers
+ // to their storage. We don't try to read more than the buffer capacity
+ // here or we can get too much recursion.
+ ASSERT(rbb->remaining == 0);
+ ASSERT(rbb->cursor == 0);
+ current->ConsStringReadBlockIntoBuffer(
+ rbb,
+ &offset,
+ max_chars > rbb->capacity ? rbb->capacity : max_chars);
+ *offset_ptr = offset + offset_correction;
+ return rbb->util_buffer;
+ }
+ }
+}
+
+
+const unibrow::byte* SlicedString::SlicedStringReadBlock(ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ String* backing = String::cast(buffer());
+ unsigned offset = start() + *offset_ptr;
+ unsigned length = backing->length();
+ if (max_chars > length - offset) {
+ max_chars = length - offset;
+ }
+ const unibrow::byte* answer =
+ String::ReadBlock(backing, rbb, &offset, max_chars);
+ *offset_ptr = offset - start();
+ return answer;
+}
+
+
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+ ASSERT(index >= 0 && index < length());
+ return resource()->data()[index];
+}
+
+
+const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
+ unsigned* remaining,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ // Cast const char* to unibrow::byte* (signedness difference).
+ const unibrow::byte* b =
+ reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
+ *remaining = max_chars;
+ *offset_ptr += max_chars;
+ return b;
+}
+
+
+const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
+ unsigned start) {
+ return resource()->data() + start;
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+ ASSERT(index >= 0 && index < length());
+ return resource()->data()[index];
+}
+
+
+void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
+ ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ unsigned chars_read = 0;
+ unsigned offset = *offset_ptr;
+ const uint16_t* data = resource()->data();
+ while (chars_read < max_chars) {
+ uint16_t c = data[offset];
+ if (c <= kMaxAsciiCharCode) {
+ // Fast case for ASCII characters. Cursor is an input output argument.
+ if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
+ rbb->util_buffer,
+ rbb->capacity,
+ rbb->cursor))
+ break;
+ } else {
+ if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
+ rbb->util_buffer,
+ rbb->capacity,
+ rbb->cursor))
+ break;
+ }
+ offset++;
+ chars_read++;
+ }
+ *offset_ptr = offset;
+ rbb->remaining += chars_read;
+}
+
+
+void AsciiString::AsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ unsigned capacity = rbb->capacity - rbb->cursor;
+ if (max_chars > capacity) max_chars = capacity;
+ memcpy(rbb->util_buffer + rbb->cursor,
+ reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize +
+ *offset_ptr * kCharSize,
+ max_chars);
+ rbb->remaining += max_chars;
+ *offset_ptr += max_chars;
+ rbb->cursor += max_chars;
+}
+
+
+void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
+ ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ unsigned capacity = rbb->capacity - rbb->cursor;
+ if (max_chars > capacity) max_chars = capacity;
+ memcpy(rbb->util_buffer + rbb->cursor,
+ resource()->data() + *offset_ptr,
+ max_chars);
+ rbb->remaining += max_chars;
+ *offset_ptr += max_chars;
+ rbb->cursor += max_chars;
+}
+
+
+// This method determines the type of string involved and then copies
+// a whole chunk of characters into a buffer, or returns a pointer to a buffer
+// where they can be found. The pointer is not necessarily valid across a GC
+// (see AsciiStringReadBlock).
+const unibrow::byte* String::ReadBlock(String* input,
+ ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ ASSERT(*offset_ptr <= static_cast<unsigned>(input->length()));
+ if (max_chars == 0) {
+ rbb->remaining = 0;
+ return NULL;
+ }
+ switch (input->representation_tag()) {
+ case kSeqStringTag:
+ if (input->is_ascii()) {
+ return AsciiString::cast(input)->AsciiStringReadBlock(&rbb->remaining,
+ offset_ptr,
+ max_chars);
+ } else {
+ TwoByteString::cast(input)->TwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ return rbb->util_buffer;
+ }
+ case kConsStringTag:
+ return ConsString::cast(input)->ConsStringReadBlock(rbb,
+ offset_ptr,
+ max_chars);
+ case kSlicedStringTag:
+ return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
+ offset_ptr,
+ max_chars);
+ case kExternalStringTag:
+ if (input->is_ascii()) {
+ return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
+ &rbb->remaining,
+ offset_ptr,
+ max_chars);
+ } else {
+ ExternalTwoByteString::cast(input)->
+ ExternalTwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ return rbb->util_buffer;
+ }
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return 0;
+}
+
+
+void StringInputBuffer::Seek(unsigned pos) {
+ Reset(pos, input_);
+}
+
+
+void SafeStringInputBuffer::Seek(unsigned pos) {
+ Reset(pos, input_);
+}
+
+
+// This method determines the type of string involved and then copies
+// a whole chunk of characters into a buffer. It can be used with strings
+// that have been glued together to form a ConsString and which must cooperate
+// to fill up a buffer.
+void String::ReadBlockIntoBuffer(String* input,
+ ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ ASSERT(*offset_ptr <= (unsigned)input->length());
+ if (max_chars == 0) return;
+
+ switch (input->representation_tag()) {
+ case kSeqStringTag:
+ if (input->is_ascii()) {
+ AsciiString::cast(input)->AsciiStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ return;
+ } else {
+ TwoByteString::cast(input)->TwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ return;
+ }
+ case kConsStringTag:
+ ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ return;
+ case kSlicedStringTag:
+ SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ return;
+ case kExternalStringTag:
+ if (input->is_ascii()) {
+ ExternalAsciiString::cast(input)->
+ ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
+ } else {
+ ExternalTwoByteString::cast(input)->
+ ExternalTwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
+ }
+ return;
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return;
+}
+
+
+const unibrow::byte* String::ReadBlock(String* input,
+ unibrow::byte* util_buffer,
+ unsigned capacity,
+ unsigned* remaining,
+ unsigned* offset_ptr) {
+ ASSERT(*offset_ptr <= (unsigned)input->length());
+ unsigned chars = input->length() - *offset_ptr;
+ ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
+ const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars);
+ ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
+ *remaining = rbb.remaining;
+ return answer;
+}
+
+
+const unibrow::byte* String::ReadBlock(String** raw_input,
+ unibrow::byte* util_buffer,
+ unsigned capacity,
+ unsigned* remaining,
+ unsigned* offset_ptr) {
+ Handle<String> input(raw_input);
+ ASSERT(*offset_ptr <= (unsigned)input->length());
+ unsigned chars = input->length() - *offset_ptr;
+ if (chars > capacity) chars = capacity;
+ ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
+ ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars);
+ ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
+ *remaining = rbb.remaining;
+ return rbb.util_buffer;
+}
+
+
+// This will iterate unless the block of string data spans two 'halves' of
+// a ConsString, in which case it will recurse. Since the block of string
+// data to be read has a maximum size this limits the maximum recursion
+// depth to something sane. Since C++ does not have tail call recursion
+// elimination, the iteration must be explicit.
+void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ ConsString* current = this;
+ unsigned offset = *offset_ptr;
+ int offset_correction = 0;
+
+ while (true) {
+ String* left = String::cast(current->first());
+ unsigned left_length = (unsigned)left->length();
+ if (left_length > offset &&
+ max_chars <= left_length - offset) {
+ // Left hand side only - iterate unless we have reached the bottom of
+ // the cons tree.
+ if (left->StringIsConsString()) {
+ current = ConsString::cast(left);
+ continue;
+ } else {
+ String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars);
+ *offset_ptr = offset + offset_correction;
+ return;
+ }
+ } else if (left_length <= offset) {
+ // Right hand side only - iterate unless we have reached the bottom of
+ // the cons tree.
+ offset -= left_length;
+ offset_correction += left_length;
+ String* right = String::cast(current->second());
+ if (right->StringIsConsString()) {
+ current = ConsString::cast(right);
+ continue;
+ } else {
+ String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
+ *offset_ptr = offset + offset_correction;
+ return;
+ }
+ } else {
+ // The block to be read spans two sides of the ConsString, so we recurse.
+ // First recurse on the left.
+ max_chars -= left_length - offset;
+ String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset);
+ // We may have reached the max or there may not have been enough space
+ // in the buffer for the characters in the left hand side.
+ if (offset == left_length) {
+ // Recurse on the right.
+ String* right = String::cast(current->second());
+ offset -= left_length;
+ offset_correction += left_length;
+ String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
+ }
+ *offset_ptr = offset + offset_correction;
+ return;
+ }
+ }
+}
+
+
+void SlicedString::SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
+ String* backing = String::cast(buffer());
+ unsigned offset = start() + *offset_ptr;
+ unsigned length = backing->length();
+ if (max_chars > length - offset) {
+ max_chars = length - offset;
+ }
+ String::ReadBlockIntoBuffer(backing, rbb, &offset, max_chars);
+ *offset_ptr = offset - start();
+}
+
+
+void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
+ IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
+}
+
+
+uint16_t ConsString::ConsStringGet(int index) {
+ ASSERT(index >= 0 && index < this->length());
+
+ // Check for a flattened cons string
+ if (String::cast(second())->length() == 0) {
+ return String::cast(first())->Get(index);
+ }
+
+ String* string = String::cast(this);
+
+ while (true) {
+ if (string->StringIsConsString()) {
+ ConsString* cons_string = ConsString::cast(string);
+ String* left = String::cast(cons_string->first());
+ if (left->length() > index) {
+ string = left;
+ } else {
+ index -= left->length();
+ string = String::cast(cons_string->second());
+ }
+ } else {
+ return string->Get(index);
+ }
+ }
+
+ UNREACHABLE();
+ return 0;
+}
+
+
+Object* SlicedString::SlicedStringFlatten() {
+ // The SlicedString constructor should ensure that there are no
+ // SlicedStrings that are constructed directly on top of other
+ // SlicedStrings.
+ String* buf = String::cast(buffer());
+ ASSERT(!buf->StringIsSlicedString());
+ if (buf->StringIsConsString()) {
+ Object* ok = buf->Flatten();
+ if (ok->IsFailure()) return ok;
+ }
+ return this;
+}
+
+
+void String::Flatten(String* src, String* sink, int f, int t, int so) {
+ String* source = src;
+ int from = f;
+ int to = t;
+ int sink_offset = so;
+ while (true) {
+ ASSERT(0 <= from && from <= to && to <= source->length());
+ ASSERT(0 <= sink_offset && sink_offset < sink->length());
+ switch (source->representation_tag()) {
+ case kSeqStringTag:
+ case kExternalStringTag: {
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Reset(from, source);
+ int j = sink_offset;
+ for (int i = from; i < to; i++) {
+ sink->Set(j++, buffer->GetNext());
+ }
+ return;
+ }
+ case kSlicedStringTag: {
+ SlicedString* sliced_string = SlicedString::cast(source);
+ int start = sliced_string->start();
+ from += start;
+ to += start;
+ source = String::cast(sliced_string->buffer());
+ }
+ break;
+ case kConsStringTag: {
+ ConsString* cons_string = ConsString::cast(source);
+ String* first = String::cast(cons_string->first());
+ int boundary = first->length();
+ if (to - boundary > boundary - from) {
+ // Right hand side is longer. Recurse over left.
+ if (from < boundary) {
+ Flatten(first, sink, from, boundary, sink_offset);
+ sink_offset += boundary - from;
+ from = 0;
+ } else {
+ from -= boundary;
+ }
+ to -= boundary;
+ source = String::cast(cons_string->second());
+ } else {
+ // Left hand side is longer. Recurse over right.
+ if (to > boundary) {
+ String* second = String::cast(cons_string->second());
+ Flatten(second,
+ sink,
+ 0,
+ to - boundary,
+ sink_offset + boundary - from);
+ to = boundary;
+ }
+ source = first;
+ }
+ }
+ break;
+ }
+ }
+}
+
+
+void SlicedString::SlicedStringIterateBody(ObjectVisitor* v) {
+ IteratePointer(v, kBufferOffset);
+}
+
+
+uint16_t SlicedString::SlicedStringGet(int index) {
+ ASSERT(index >= 0 && index < this->length());
+ // Delegate to the buffer string.
+ return String::cast(buffer())->Get(start() + index);
+}
+
+
+bool String::SlowEquals(String* other) {
+ // Fast check: negative check with lengths.
+ int len = length();
+ if (len != other->length()) return false;
+ if (len == 0) return true;
+
+ // Fast check: if hash code is computed for both strings
+ // a fast negative check can be performed.
+ if (HasHashCode() && other->HasHashCode()) {
+ if (Hash() != other->Hash()) return false;
+ }
+
+ // Fast case: avoid input buffers for small strings.
+ const int kMaxLenthForFastCaseCheck = 5;
+ for (int i = 0; i < kMaxLenthForFastCaseCheck; i++) {
+ if (Get(i) != other->Get(i)) return false;
+ if (i + 1 == len) return true;
+ }
+
+ // General slow case check.
+ static StringInputBuffer buf1;
+ static StringInputBuffer buf2;
+ buf1.Reset(kMaxLenthForFastCaseCheck, this);
+ buf2.Reset(kMaxLenthForFastCaseCheck, other);
+ while (buf1.has_more()) {
+ if (buf1.GetNext() != buf2.GetNext()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool String::MarkAsUndetectable() {
+ if (this->IsSymbol()) return false;
+
+ Map* map = this->map();
+ if (map == Heap::short_string_map()) {
+ this->set_map(Heap::undetectable_short_string_map());
+ return true;
+ } else if (map == Heap::medium_string_map()) {
+ this->set_map(Heap::undetectable_medium_string_map());
+ return true;
+ } else if (map == Heap::long_string_map()) {
+ this->set_map(Heap::undetectable_long_string_map());
+ return true;
+ } else if (map == Heap::short_ascii_string_map()) {
+ this->set_map(Heap::undetectable_short_ascii_string_map());
+ return true;
+ } else if (map == Heap::medium_ascii_string_map()) {
+ this->set_map(Heap::undetectable_medium_ascii_string_map());
+ return true;
+ } else if (map == Heap::long_ascii_string_map()) {
+ this->set_map(Heap::undetectable_long_ascii_string_map());
+ return true;
+ }
+ // Rest cannot be marked as undetectable
+ return false;
+}
+
+
+bool String::IsEqualTo(Vector<const char> str) {
+ int slen = length();
+ Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
+ decoder->Reset(str.start(), str.length());
+ int i;
+ for (i = 0; i < slen && decoder->has_more(); i++) {
+ uc32 r = decoder->GetNext();
+ if (Get(i) != r) return false;
+ }
+ return i == slen && !decoder->has_more();
+}
+
+
+uint32_t String::ComputeAndSetHash() {
+ // Should only be call if hash code has not yet been computed.
+ ASSERT(!(length_field() & kHashComputedMask));
+
+ // Compute the hash code.
+ StringInputBuffer buffer(this);
+ int hash = ComputeHashCode(&buffer, length());
+
+ // Store the hash code in the object.
+ set_length_field(hash);
+
+ // Check the hash code is there.
+ ASSERT(length_field() & kHashComputedMask);
+ return hash;
+}
+
+
+bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
+ uint32_t* index,
+ int length) {
+ if (length == 0) return false;
+ uc32 ch = buffer->GetNext();
+
+ // If the string begins with a '0' character, it must only consist
+ // of it to be a legal array index.
+ if (ch == '0') {
+ *index = 0;
+ return length == 1;
+ }
+
+ // Convert string to uint32 array index; character by character.
+ int d = ch - '0';
+ if (d < 0 || d > 9) return false;
+ uint32_t result = d;
+ while (buffer->has_more()) {
+ d = buffer->GetNext() - '0';
+ if (d < 0 || d > 9) return false;
+ // Check that the new result is below the 32 bit limit.
+ if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
+ result = (result * 10) + d;
+ }
+
+ *index = result;
+ return true;
+}
+
+
+bool String::SlowAsArrayIndex(uint32_t* index) {
+ StringInputBuffer buffer(this);
+ return ComputeArrayIndex(&buffer, index, length());
+}
+
+
+static inline uint32_t HashField(uint32_t hash, bool is_array_index) {
+ return (hash << String::kLongLengthShift) | (is_array_index ? 3 : 1);
+}
+
+
+uint32_t String::ComputeHashCode(unibrow::CharacterStream* buffer,
+ int length) {
+ // Large string (please note large strings cannot be an array index).
+ if (length > kMaxMediumStringSize) return HashField(length, false);
+
+ // Note: the Jenkins one-at-a-time hash function
+ uint32_t hash = 0;
+ while (buffer->has_more()) {
+ uc32 r = buffer->GetNext();
+ hash += r;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ // Short string.
+ if (length <= kMaxShortStringSize) {
+ // Make hash value consistent with value returned from String::Hash.
+ buffer->Rewind();
+ uint32_t index;
+ hash = HashField(hash, ComputeArrayIndex(buffer, &index, length));
+ hash = (hash & 0x00FFFFFF) | (length << kShortLengthShift);
+ return hash;
+ }
+
+ // Medium string (please note medium strings cannot be an array index).
+ ASSERT(length <= kMaxMediumStringSize);
+ // Make hash value consistent with value returned from String::Hash.
+ hash = HashField(hash, false);
+ hash = (hash & 0x0000FFFF) | (length << kMediumLengthShift);
+ return hash;
+}
+
+
+Object* String::Slice(int start, int end) {
+ if (start == 0 && end == length()) return this;
+ int representation = representation_tag();
+ if (representation == kSlicedStringTag) {
+ // Translate slices of a SlicedString into slices of the
+ // underlying string buffer.
+ SlicedString* str = SlicedString::cast(this);
+ return Heap::AllocateSlicedString(String::cast(str->buffer()),
+ str->start() + start,
+ str->start() + end);
+ }
+ Object* answer = Heap::AllocateSlicedString(this, start, end);
+ if (answer->IsFailure()) {
+ return answer;
+ }
+ // Due to the way we retry after GC on allocation failure we are not allowed
+ // to fail on allocation after this point. This is the one-allocation rule.
+
+ // Try to flatten a cons string that is under the sliced string.
+ // This is to avoid memory leaks and possible stack overflows caused by
+ // building 'towers' of sliced strings on cons strings.
+ // This may fail due to an allocation failure (when a GC is needed), but it
+ // will succeed often enough to avoid the problem. We only have to do this
+ // if Heap::AllocateSlicedString actually returned a SlicedString. It will
+ // return flat strings for small slices for efficiency reasons.
+ if (String::cast(answer)->StringIsSlicedString() &&
+ representation == kConsStringTag) {
+ TryFlatten();
+ // If the flatten succeeded we might as well make the sliced string point
+ // to the flat string rather than the cons string.
+ if (String::cast(ConsString::cast(this)->second())->length() == 0) {
+ SlicedString::cast(answer)->set_buffer(ConsString::cast(this)->first());
+ }
+ }
+ return answer;
+}
+
+
+void String::PrintOn(FILE* file) {
+ int length = this->length();
+ for (int i = 0; i < length; i++) {
+ fprintf(file, "%c", Get(i));
+ }
+}
+
+
+void Map::MapIterateBody(ObjectVisitor* v) {
+ // Assumes all Object* members are contiguously allocated!
+ IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize);
+}
+
+
+int JSFunction::NumberOfLiterals() {
+ return literals()->length();
+}
+
+
+Object* JSFunction::SetInstancePrototype(Object* value) {
+ ASSERT(value->IsJSObject());
+
+ if (has_initial_map()) {
+ initial_map()->set_prototype(value);
+ } else {
+ // Put the value in the initial map field until an initial map is
+ // needed. At that point, a new initial map is created and the
+ // prototype is put into the initial map where it belongs.
+ set_prototype_or_initial_map(value);
+ }
+ return value;
+}
+
+
+
+Object* JSFunction::SetPrototype(Object* value) {
+ Object* construct_prototype = value;
+
+ // If the value is not a JSObject, store the value in the map's
+ // constructor field so it can be accessed. Also, set the prototype
+ // used for constructing objects to the original object prototype.
+ // See ECMA-262 13.2.2.
+ if (!value->IsJSObject()) {
+ // Copy the map so this does not affect unrelated functions.
+ // Remove map transitions so we do not lose the prototype
+ // information on map transitions.
+ Object* copy = map()->Copy();
+ if (copy->IsFailure()) return copy;
+ Object* new_map = Map::cast(copy)->EnsureNoMapTransitions();
+ if (new_map->IsFailure()) return new_map;
+ set_map(Map::cast(new_map));
+
+ map()->set_constructor(value);
+ map()->set_non_instance_prototype(true);
+ construct_prototype = *Top::initial_object_prototype();
+ } else {
+ map()->set_non_instance_prototype(false);
+ }
+
+ return SetInstancePrototype(construct_prototype);
+}
+
+
+Object* JSFunction::SetInstanceClassName(String* name) {
+ shared()->set_instance_class_name(name);
+ return this;
+}
+
+
+void Oddball::OddballIterateBody(ObjectVisitor* v) {
+ // Assumes all Object* members are contiguously allocated!
+ IteratePointers(v, kToStringOffset, kToNumberOffset + kPointerSize);
+}
+
+
+Object* Oddball::Initialize(const char* to_string, Object* to_number) {
+ Object* symbol = Heap::LookupAsciiSymbol(to_string);
+ if (symbol->IsFailure()) return symbol;
+ set_to_string(String::cast(symbol));
+ set_to_number(to_number);
+ return this;
+}
+
+
+bool SharedFunctionInfo::HasSourceCode() {
+ return !script()->IsUndefined() &&
+ !Script::cast(script())->source()->IsUndefined();
+}
+
+
+Object* SharedFunctionInfo::GetSourceCode() {
+ HandleScope scope;
+ if (script()->IsUndefined()) return Heap::undefined_value();
+ Object* source = Script::cast(script())->source();
+ if (source->IsUndefined()) return Heap::undefined_value();
+ return *SubString(Handle<String>(String::cast(source)),
+ start_position(), end_position());
+}
+
+
+// Support function for printing the source code to a StringStream
+// without any allocation in the heap.
+void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
+ int max_length) {
+ // For some native functions there is no source.
+ if (script()->IsUndefined() ||
+ Script::cast(script())->source()->IsUndefined()) {
+ accumulator->Add("<No Source>");
+ return;
+ }
+
+ // Get the slice of the source for this function.
+ // Don't use String::cast because we don't want more assertion errors while
+ // we are already creating a stack dump.
+ String* script_source =
+ reinterpret_cast<String*>(Script::cast(script())->source());
+
+ if (!script_source->LooksValid()) {
+ accumulator->Add("<Invalid Source>");
+ return;
+ }
+
+ if (!is_toplevel()) {
+ accumulator->Add("function ");
+ Object* name = this->name();
+ if (name->IsString() && String::cast(name)->length() > 0) {
+ accumulator->PrintName(name);
+ }
+ }
+
+ int len = end_position() - start_position();
+ if (len > max_length) {
+ accumulator->Put(script_source,
+ start_position(),
+ start_position() + max_length);
+ accumulator->Add("...\n");
+ } else {
+ accumulator->Put(script_source, start_position(), end_position());
+ }
+}
+
+
+void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
+ IteratePointers(v, kNameOffset, kCodeOffset + kPointerSize);
+ IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
+ IteratePointer(v, kDebugInfoOffset);
+}
+
+
+void ObjectVisitor::BeginCodeIteration(Code* code) {
+ ASSERT(code->ic_flag() == Code::IC_TARGET_IS_OBJECT);
+}
+
+
+void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(is_code_target(rinfo->rmode()));
+ VisitPointer(rinfo->target_object_address());
+}
+
+
+void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT(is_js_return(rinfo->rmode()) && rinfo->is_call_instruction());
+ VisitPointer(rinfo->call_object_address());
+}
+
+
+// Convert relocatable targets from address to code object address. This is
+// mainly IC call targets but for debugging straight-line code can be replaced
+// with a call instruction which also has to be relocated.
+void Code::ConvertICTargetsFromAddressToObject() {
+ ASSERT(ic_flag() == IC_TARGET_IS_ADDRESS);
+
+ for (RelocIterator it(this, RelocInfo::kCodeTargetMask);
+ !it.done(); it.next()) {
+ Address ic_addr = it.rinfo()->target_address();
+ ASSERT(ic_addr != NULL);
+ HeapObject* code = HeapObject::FromAddress(ic_addr - Code::kHeaderSize);
+ ASSERT(code->IsHeapObject());
+ it.rinfo()->set_target_object(code);
+ }
+
+ if (Debug::has_break_points()) {
+ for (RelocIterator it(this, RelocMask(js_return)); !it.done(); it.next()) {
+ if (it.rinfo()->is_call_instruction()) {
+ Address addr = it.rinfo()->call_address();
+ ASSERT(addr != NULL);
+ HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
+ ASSERT(code->IsHeapObject());
+ it.rinfo()->set_call_object(code);
+ }
+ }
+ }
+ set_ic_flag(IC_TARGET_IS_OBJECT);
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+ v->BeginCodeIteration(this);
+
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocMask(embedded_object) |
+ RelocMask(external_reference) |
+ RelocMask(js_return) |
+ RelocMask(runtime_entry);
+
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ RelocMode rmode = it.rinfo()->rmode();
+ if (rmode == embedded_object) {
+ v->VisitPointer(it.rinfo()->target_object_address());
+ } else if (is_code_target(rmode)) {
+ v->VisitCodeTarget(it.rinfo());
+ } else if (rmode == external_reference) {
+ v->VisitExternalReference(it.rinfo()->target_reference_address());
+ } else if (Debug::has_break_points() &&
+ is_js_return(rmode) && it.rinfo()->is_call_instruction()) {
+ v->VisitDebugTarget(it.rinfo());
+ } else if (rmode == runtime_entry) {
+ v->VisitRuntimeEntry(it.rinfo());
+ }
+ }
+
+ ScopeInfo<>::IterateScopeInfo(this, v);
+
+ v->EndCodeIteration(this);
+}
+
+
+void Code::ConvertICTargetsFromObjectToAddress() {
+ ASSERT(ic_flag() == IC_TARGET_IS_OBJECT);
+
+ for (RelocIterator it(this, RelocInfo::kCodeTargetMask);
+ !it.done(); it.next()) {
+ // We cannot use the safe cast (Code::cast) here, because we may be in
+ // the middle of relocating old objects during GC and the map pointer in
+ // the code object may be mangled
+ Code* code = reinterpret_cast<Code*>(it.rinfo()->target_object());
+ ASSERT((code != NULL) && code->IsHeapObject());
+ it.rinfo()->set_target_address(code->instruction_start());
+ }
+
+ if (Debug::has_break_points()) {
+ for (RelocIterator it(this, RelocMask(js_return)); !it.done(); it.next()) {
+ if (it.rinfo()->is_call_instruction()) {
+ Code* code = reinterpret_cast<Code*>(it.rinfo()->call_object());
+ ASSERT((code != NULL) && code->IsHeapObject());
+ it.rinfo()->set_call_address(code->instruction_start());
+ }
+ }
+ }
+ set_ic_flag(IC_TARGET_IS_ADDRESS);
+}
+
+
+void Code::Relocate(int delta) {
+ for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
+ it.rinfo()->apply(delta);
+ }
+}
+
+
+void Code::CopyFrom(const CodeDesc& desc) {
+ // copy code
+ memmove(instruction_start(), desc.buffer, desc.instr_size);
+
+ // fill gap with zero bytes
+ { byte* p = instruction_start() + desc.instr_size;
+ byte* q = relocation_start();
+ while (p < q) {
+ *p++ = 0;
+ }
+ }
+
+ // copy reloc info
+ memmove(relocation_start(),
+ desc.buffer + desc.buffer_size - desc.reloc_size,
+ desc.reloc_size);
+
+ // unbox handles and relocate
+ int delta = instruction_start() - desc.buffer;
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocMask(embedded_object) |
+ RelocInfo::kApplyMask;
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ RelocMode mode = it.rinfo()->rmode();
+ if (mode == embedded_object) {
+ Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+ it.rinfo()->set_target_object(*p);
+ } else if (is_code_target(mode)) {
+ // rewrite code handles in inline cache targets to direct
+ // pointers to the first instruction in the code object
+ Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+ Code* code = Code::cast(*p);
+ it.rinfo()->set_target_address(code->instruction_start());
+ } else {
+ it.rinfo()->apply(delta);
+ }
+ }
+}
+
+
+// Locate the source position which is closest to the address in the code. This
+// is using the source position information embedded in the relocation info.
+// The position returned is relative to the beginning of the script where the
+// source for this function is found.
+int Code::SourcePosition(Address pc) {
+ int distance = kMaxInt;
+ int position = kNoPosition; // Initially no position found.
+ // Run through all the relocation info to find the best matching source
+ // position. All the code needs to be considered as the sequence of the
+ // instructions in the code does not necessarily follow the same order as the
+ // source.
+ RelocIterator it(this, RelocInfo::kPositionMask);
+ while (!it.done()) {
+ if (it.rinfo()->pc() < pc && (pc - it.rinfo()->pc()) < distance) {
+ position = it.rinfo()->data();
+ distance = pc - it.rinfo()->pc();
+ }
+ it.next();
+ }
+ return position;
+}
+
+
+// Same as Code::SourcePosition above except it only looks for statement
+// positions.
+int Code::SourceStatementPosition(Address pc) {
+ // First find the position as close as possible using all position
+ // information.
+ int position = SourcePosition(pc);
+ // Now find the closest statement position before the position.
+ int statement_position = 0;
+ RelocIterator it(this, RelocInfo::kPositionMask);
+ while (!it.done()) {
+ if (is_statement_position(it.rinfo()->rmode())) {
+ int p = it.rinfo()->data();
+ if (statement_position < p && p <= position) {
+ statement_position = p;
+ }
+ }
+ it.next();
+ }
+ return statement_position;
+}
+
+
+void JSObject::SetFastElements(FixedArray* elems) {
+#ifdef DEBUG
+ // Check the provided array is filled with the_hole.
+ uint32_t len = static_cast<uint32_t>(elems->length());
+ for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
+#endif
+ FixedArray::WriteBarrierMode mode = elems->GetWriteBarrierMode();
+ if (HasFastElements()) {
+ FixedArray* old_elements = FixedArray::cast(elements());
+ uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+ // Fill out the new array with this content and array holes.
+ for (uint32_t i = 0; i < old_length; i++) {
+ elems->set(i, old_elements->get(i), mode);
+ }
+ } else {
+ Dictionary* dictionary = Dictionary::cast(elements());
+ for (int i = 0; i < dictionary->Capacity(); i++) {
+ Object* key = dictionary->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t entry = static_cast<uint32_t>(key->Number());
+ elems->set(entry, dictionary->ValueAt(i), mode);
+ }
+ }
+ }
+ set_elements(elems);
+}
+
+
+Object* JSObject::SetSlowElements(Object* len) {
+ uint32_t new_length = static_cast<uint32_t>(len->Number());
+
+ if (!HasFastElements()) {
+ if (IsJSArray()) {
+ uint32_t old_length =
+ static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+ element_dictionary()->RemoveNumberEntries(new_length, old_length),
+ JSArray::cast(this)->set_length(len);
+ }
+ return this;
+ }
+
+ // Make sure we never try to shrink dense arrays into sparse arrays.
+ ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
+ new_length);
+ Object* obj = NormalizeElements();
+ if (obj->IsFailure()) return obj;
+
+ // Update length for JSArrays.
+ if (IsJSArray()) JSArray::cast(this)->set_length(len);
+ return this;
+}
+
+
+Object* JSArray::Initialize(int capacity) {
+ ASSERT(capacity >= 0);
+ set_length(Smi::FromInt(0));
+ FixedArray* new_elements;
+ if (capacity == 0) {
+ new_elements = Heap::empty_fixed_array();
+ } else {
+ Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ if (obj->IsFailure()) return obj;
+ new_elements = FixedArray::cast(obj);
+ }
+ set_elements(new_elements);
+ return this;
+}
+
+
+void JSArray::SetContent(FixedArray* storage) {
+ set_length(Smi::FromInt(storage->length()));
+ set_elements(storage);
+}
+
+
+// Computes the new capacity when expanding the elements of a JSObject.
+static int NewElementsCapacity(int old_capacity) {
+ // (old_capacity + 50%) + 16
+ return old_capacity + (old_capacity >> 1) + 16;
+}
+
+
+static Object* ArrayLengthRangeError() {
+ HandleScope scope;
+ return Top::Throw(*Factory::NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+}
+
+
+Object* JSObject::SetElementsLength(Object* len) {
+ Object* smi_length = len->ToSmi();
+ if (smi_length->IsSmi()) {
+ int value = Smi::cast(smi_length)->value();
+ if (value < 0) return ArrayLengthRangeError();
+ if (HasFastElements()) {
+ int old_capacity = FixedArray::cast(elements())->length();
+ if (value <= old_capacity) {
+ if (IsJSArray()) {
+ int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+ // NOTE: We may be able to optimize this by removing the
+ // last part of the elements backing storage array and
+ // setting the capacity to the new size.
+ for (int i = value; i < old_length; i++) {
+ FixedArray::cast(elements())->set_the_hole(i);
+ }
+ JSArray::cast(this)->set_length(smi_length);
+ }
+ return this;
+ }
+ int min = NewElementsCapacity(old_capacity);
+ int new_capacity = value > min ? value : min;
+ if (KeepInFastCase(new_capacity) ||
+ new_capacity <= kMaxFastElementsLength) {
+ Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+ if (obj->IsFailure()) return obj;
+ if (IsJSArray()) JSArray::cast(this)->set_length(smi_length);
+ SetFastElements(FixedArray::cast(obj));
+ return this;
+ }
+ } else {
+ if (IsJSArray()) {
+ if (value == 0) {
+ // If the length of a slow array is reset to zero, we clear
+ // the array and flush backing storage. This has the added
+ // benefit that the array returns to fast mode.
+ initialize_elements();
+ } else {
+ // Remove deleted elements.
+ uint32_t old_length =
+ static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+ element_dictionary()->RemoveNumberEntries(value, old_length);
+ }
+ JSArray::cast(this)->set_length(smi_length);
+ }
+ return this;
+ }
+ }
+
+ // General slow case.
+ if (len->IsNumber()) {
+ uint32_t length;
+ if (Array::IndexFromObject(len, &length)) {
+ return SetSlowElements(len);
+ } else {
+ return ArrayLengthRangeError();
+ }
+ }
+
+ // len is not a number so make the array size one and
+ // set only element to len.
+ Object* obj = Heap::AllocateFixedArray(1);
+ if (obj->IsFailure()) return obj;
+ FixedArray::cast(obj)->set(0, len);
+ if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
+ set_elements(FixedArray::cast(obj));
+ return this;
+}
+
+
+bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
+ if (HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+ return true;
+ }
+ } else {
+ if (element_dictionary()->FindNumberEntry(index) != -1) return true;
+ }
+
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+ Object* pt = GetPrototype();
+ if (pt == Heap::null_value()) return false;
+ return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
+}
+
+
+bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+ Handle<Object> data_handle(interceptor->data());
+ v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(holder_handle));
+ if (!interceptor->query()->IsUndefined()) {
+ v8::IndexedPropertyQuery query =
+ v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+ LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ v8::Handle<v8::Boolean> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = query(index, info);
+ }
+ if (!result.IsEmpty()) return result->IsTrue();
+ } else if (!interceptor->getter()->IsUndefined()) {
+ v8::IndexedPropertyGetter getter =
+ v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ LOG(ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = getter(index, info);
+ }
+ if (!result.IsEmpty()) return !result->IsUndefined();
+ }
+ return holder_handle->HasElementPostInterceptor(*receiver_handle, index);
+}
+
+
+bool JSObject::HasLocalElement(uint32_t index) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
+
+ // Check for lookup interceptor
+ if (HasIndexedInterceptor()) {
+ return HasElementWithInterceptor(this, index);
+ }
+
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+ if (HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ return (index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole();
+ } else {
+ return element_dictionary()->FindNumberEntry(index) != -1;
+ }
+}
+
+
+bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
+
+ // Check for lookup interceptor
+ if (HasIndexedInterceptor()) {
+ return HasElementWithInterceptor(receiver, index);
+ }
+
+ if (HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
+ } else {
+ if (element_dictionary()->FindNumberEntry(index) != -1) return true;
+ }
+
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+ Object* pt = GetPrototype();
+ if (pt == Heap::null_value()) return false;
+ return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
+}
+
+
+Object* JSObject::SetElementPostInterceptor(uint32_t index, Object* value) {
+ if (HasFastElements()) return SetFastElement(index, value);
+
+ // Dictionary case.
+ ASSERT(!HasFastElements());
+
+ FixedArray* elms = FixedArray::cast(elements());
+ Object* result = Dictionary::cast(elms)->AtNumberPut(index, value);
+ if (result->IsFailure()) return result;
+ if (elms != FixedArray::cast(result)) {
+ set_elements(FixedArray::cast(result));
+ }
+
+ if (IsJSArray()) {
+ return JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
+ }
+
+ return value;
+}
+
+
+Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ Handle<JSObject> this_handle(this);
+ Handle<Object> value_handle(value);
+ if (!interceptor->setter()->IsUndefined()) {
+ v8::IndexedPropertySetter setter =
+ v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
+ Handle<Object> data_handle(interceptor->data());
+ LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
+ v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(this_handle));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = setter(index, v8::Utils::ToLocal(value_handle), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) return *value_handle;
+ }
+ Object* raw_result =
+ this_handle->SetElementPostInterceptor(index, *value_handle);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return raw_result;
+}
+
+
+// Adding n elements in fast case is O(n*n).
+// Note: revisit design to have dual undefined values to capture absent
+// elements.
+Object* JSObject::SetFastElement(uint32_t index, Object* value) {
+ ASSERT(HasFastElements());
+
+ FixedArray* elms = FixedArray::cast(elements());
+ uint32_t elms_length = static_cast<uint32_t>(elms->length());
+
+ // Check whether there is extra space in fixed array..
+ if (index < elms_length) {
+ elms->set(index, value);
+ if (IsJSArray()) {
+ // Update the length of the array if needed.
+ uint32_t array_length = 0;
+ CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
+ &array_length));
+ if (index >= array_length) {
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ }
+ }
+ return this;
+ }
+
+ // Allow gap in fast case.
+ if ((index - elms_length) < kMaxGap) {
+ // Try allocating extra space.
+ int new_capacity = NewElementsCapacity(index+1);
+ if (KeepInFastCase(new_capacity) ||
+ new_capacity <= kMaxFastElementsLength) {
+ ASSERT(static_cast<uint32_t>(new_capacity) > index);
+ Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+ if (obj->IsFailure()) return obj;
+ SetFastElements(FixedArray::cast(obj));
+ if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ FixedArray::cast(elements())->set(index, value);
+ return this;
+ }
+ }
+
+ // Otherwise default to slow case.
+ Object* obj = NormalizeElements();
+ if (obj->IsFailure()) return obj;
+ ASSERT(!HasFastElements());
+ return SetElement(index, value);
+}
+
+
+Object* JSObject::SetElement(uint32_t index, Object* value) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return value;
+ }
+
+ // Check for lookup interceptor
+ if (HasIndexedInterceptor()) {
+ return SetElementWithInterceptor(index, value);
+ }
+
+ // Fast case.
+ if (HasFastElements()) return SetFastElement(index, value);
+
+ // Dictionary case.
+ ASSERT(!HasFastElements());
+
+ // Insert element in the dictionary.
+ FixedArray* elms = FixedArray::cast(elements());
+ Dictionary* dictionary = Dictionary::cast(elms);
+ Object* result = dictionary->AtNumberPut(index, value);
+ if (result->IsFailure()) return result;
+ if (elms != FixedArray::cast(result)) {
+ set_elements(FixedArray::cast(result));
+ }
+
+ // Update the array length if this JSObject is an array.
+ if (IsJSArray()) {
+ JSArray* array = JSArray::cast(this);
+ Object* return_value = array->JSArrayUpdateLengthFromIndex(index, value);
+ if (return_value->IsFailure()) return return_value;
+ }
+
+ // Attempt to put this object back in fast case.
+ if (ShouldHaveFastElements()) {
+ uint32_t new_length = 0;
+ if (IsJSArray()) {
+ CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &new_length));
+ } else {
+ new_length = Dictionary::cast(elements())->max_number_key() + 1;
+ }
+ Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
+ if (obj->IsFailure()) return obj;
+ SetFastElements(FixedArray::cast(obj));
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ PrintF("Object elements are fast case again:\n");
+ Print();
+ }
+#endif
+ }
+
+ return value;
+}
+
+
+Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
+ uint32_t old_len = 0;
+ CHECK(Array::IndexFromObject(length(), &old_len));
+ // Check to see if we need to update the length. For now, we make
+ // sure that the length stays within 32-bits (unsigned).
+ if (index >= old_len && index != 0xffffffff) {
+ Object* len =
+ Heap::NumberFromDouble(static_cast<double>(index) + 1);
+ if (len->IsFailure()) return len;
+ set_length(len);
+ }
+ return value;
+}
+
+
+Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
+ uint32_t index) {
+ // Get element works for both JSObject and JSArray since
+ // JSArray::length cannot change.
+ if (HasFastElements()) {
+ FixedArray* elms = FixedArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ Object* value = elms->get(index);
+ if (!value->IsTheHole()) return value;
+ }
+ } else {
+ Dictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindNumberEntry(index);
+ if (entry != -1) {
+ return dictionary->ValueAt(entry);
+ }
+ }
+
+ // Continue searching via the prototype chain.
+ Object* pt = GetPrototype();
+ if (pt == Heap::null_value()) return Heap::undefined_value();
+ return pt->GetElementWithReceiver(receiver, index);
+}
+
+
+Object* JSObject::GetElementWithInterceptor(JSObject* receiver,
+ uint32_t index) {
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ Handle<JSObject> this_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+
+ if (!interceptor->getter()->IsUndefined()) {
+ Handle<Object> data_handle(interceptor->data());
+ v8::IndexedPropertyGetter getter =
+ v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+ v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(holder_handle));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = getter(index, info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ }
+
+ Object* raw_result =
+ holder_handle->GetElementPostInterceptor(*this_handle, index);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return raw_result;
+}
+
+
+Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(this, index, v8::ACCESS_GET)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ return Heap::undefined_value();
+ }
+
+ if (HasIndexedInterceptor()) {
+ return GetElementWithInterceptor(receiver, index);
+ }
+
+ // Get element works for both JSObject and JSArray since
+ // JSArray::length cannot change.
+ if (HasFastElements()) {
+ FixedArray* elms = FixedArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ Object* value = elms->get(index);
+ if (!value->IsTheHole()) return value;
+ }
+ } else {
+ Dictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindNumberEntry(index);
+ if (entry != -1) {
+ return dictionary->ValueAt(entry);
+ }
+ }
+
+ Object* pt = GetPrototype();
+ if (pt == Heap::null_value()) return Heap::undefined_value();
+ return pt->GetElementWithReceiver(receiver, index);
+}
+
+
+bool JSObject::HasDenseElements() {
+ int capacity = 0;
+ int number_of_elements = 0;
+
+ if (HasFastElements()) {
+ FixedArray* elms = FixedArray::cast(elements());
+ capacity = elms->length();
+ for (int i = 0; i < capacity; i++) {
+ if (!elms->get(i)->IsTheHole()) number_of_elements++;
+ }
+ } else {
+ Dictionary* dictionary = Dictionary::cast(elements());
+ capacity = dictionary->Capacity();
+ number_of_elements = dictionary->NumberOfElements();
+ }
+
+ if (capacity == 0) return true;
+ return (number_of_elements > (capacity / 2));
+}
+
+
+bool JSObject::KeepInFastCase(int new_capacity) {
+ ASSERT(HasFastElements());
+ // Keep the array in fast case if the current backing storage is
+ // almost filled and if the new capacity is no more than twice the
+ // old capacity.
+ int elements_length = FixedArray::cast(elements())->length();
+ return HasDenseElements() && ((new_capacity / 2) <= elements_length);
+}
+
+
+bool JSObject::ShouldHaveFastElements() {
+ ASSERT(!HasFastElements());
+ Dictionary* dictionary = Dictionary::cast(elements());
+ // If the elements are sparse, we should not go back to fast case.
+ if (!HasDenseElements()) return false;
+ // If an element has been added at a very high index in the elements
+ // dictionary, we cannot go back to fast case.
+ if (dictionary->requires_slow_elements()) return false;
+ // An object requiring access checks is never allowed to have fast
+ // elements. If it had fast elements we would skip security checks.
+ if (IsAccessCheckNeeded()) return false;
+ // If the dictionary backing storage takes up roughly half as much
+ // space as a fast-case backing storage would the array should have
+ // fast elements.
+ uint32_t length = 0;
+ if (IsJSArray()) {
+ CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
+ } else {
+ length = dictionary->max_number_key();
+ }
+ return static_cast<uint32_t>(dictionary->Capacity()) >=
+ (length / (2 * Dictionary::kElementSize));
+}
+
+
+Object* Dictionary::RemoveHoles() {
+ int capacity = Capacity();
+ Object* obj = Allocate(NumberOfElements());
+ if (obj->IsFailure()) return obj;
+ Dictionary* dict = Dictionary::cast(obj);
+ uint32_t pos = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ dict->AddNumberEntry(pos++, ValueAt(i), DetailsAt(i));
+ }
+ }
+ return dict;
+}
+
+
+void Dictionary::CopyValuesTo(FixedArray* elements) {
+ int pos = 0;
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) elements->set(pos++, ValueAt(i));
+ }
+ ASSERT(pos == elements->length());
+}
+
+
+Object* JSArray::RemoveHoles() {
+ if (HasFastElements()) {
+ int len = Smi::cast(length())->value();
+ int pos = 0;
+ FixedArray* elms = FixedArray::cast(elements());
+ for (int index = 0; index < len; index++) {
+ Object* e = elms->get(index);
+ if (!e->IsTheHole()) {
+ if (index != pos) elms->set(pos, e);
+ pos++;
+ }
+ }
+ set_length(Smi::FromInt(pos));
+ for (int index = pos; index < len; index++) {
+ elms->set_the_hole(index);
+ }
+ return this;
+ }
+
+ // Compact the sparse array if possible.
+ Dictionary* dict = element_dictionary();
+ int length = dict->NumberOfElements();
+
+ // Try to make this a fast array again.
+ if (length <= kMaxFastElementsLength) {
+ Object* obj = Heap::AllocateFixedArray(length);
+ if (obj->IsFailure()) return obj;
+ dict->CopyValuesTo(FixedArray::cast(obj));
+ set_length(Smi::FromInt(length));
+ set_elements(FixedArray::cast(obj));
+ return this;
+ }
+
+ // Make another dictionary with smaller indices.
+ Object* obj = dict->RemoveHoles();
+ if (obj->IsFailure()) return obj;
+ set_length(Smi::FromInt(length));
+ set_elements(Dictionary::cast(obj));
+ return this;
+}
+
+
+InterceptorInfo* JSObject::GetNamedInterceptor() {
+ ASSERT(map()->has_named_interceptor());
+ JSFunction* constructor = JSFunction::cast(map()->constructor());
+ Object* template_info = constructor->shared()->function_data();
+ Object* result =
+ FunctionTemplateInfo::cast(template_info)->named_property_handler();
+ return InterceptorInfo::cast(result);
+}
+
+
+InterceptorInfo* JSObject::GetIndexedInterceptor() {
+ ASSERT(map()->has_indexed_interceptor());
+ JSFunction* constructor = JSFunction::cast(map()->constructor());
+ Object* template_info = constructor->shared()->function_data();
+ Object* result =
+ FunctionTemplateInfo::cast(template_info)->indexed_property_handler();
+ return InterceptorInfo::cast(result);
+}
+
+
+Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes) {
+ // Check local property in holder, ignore interceptor.
+ LookupResult result;
+ LocalLookupRealNamedProperty(name, &result);
+ if (result.IsValid()) return GetProperty(receiver, &result, name, attributes);
+ // Continue searching via the prototype chain.
+ Object* pt = GetPrototype();
+ *attributes = ABSENT;
+ if (pt == Heap::null_value()) return Heap::undefined_value();
+ return pt->GetPropertyWithReceiver(receiver, name, attributes);
+}
+
+
+Object* JSObject::GetPropertyWithInterceptor(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes) {
+ HandleScope scope;
+ Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+ Handle<String> name_handle(name);
+ Handle<Object> data_handle(interceptor->data());
+
+ if (!interceptor->getter()->IsUndefined()) {
+ v8::NamedPropertyGetter getter =
+ v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
+ v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(holder_handle));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = getter(v8::Utils::ToLocal(name_handle), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) {
+ *attributes = NONE;
+ return *v8::Utils::OpenHandle(*result);
+ }
+ }
+
+ Object* raw_result = holder_handle->GetPropertyPostInterceptor(
+ *receiver_handle,
+ *name_handle,
+ attributes);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return raw_result;
+}
+
+
+bool JSObject::HasRealNamedProperty(String* key) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
+
+ LookupResult result;
+ LocalLookupRealNamedProperty(key, &result);
+ if (result.IsValid()) {
+ switch (result.type()) {
+ case NORMAL: // fall through.
+ case FIELD: // fall through.
+ case CALLBACKS: // fall through.
+ case CONSTANT_FUNCTION: return true;
+ default: return false;
+ }
+ }
+
+ return false;
+}
+
+
+bool JSObject::HasRealElementProperty(uint32_t index) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
+
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+ if (HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ return (index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole();
+ }
+ return element_dictionary()->FindNumberEntry(index) != -1;
+}
+
+
+bool JSObject::HasRealNamedCallbackProperty(String* key) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
+
+ LookupResult result;
+ LocalLookupRealNamedProperty(key, &result);
+ return result.IsValid() && (result.type() == CALLBACKS);
+}
+
+
+int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
+ if (HasFastProperties()) {
+ int result = 0;
+ for (DescriptorReader r(map()->instance_descriptors());
+ !r.eos();
+ r.advance()) {
+ PropertyDetails details = r.GetDetails();
+ if (!details.IsTransition() && (details.attributes() & filter) == 0) {
+ result++;
+ }
+ }
+ return result;
+ } else {
+ return property_dictionary()->NumberOfElementsFilterAttributes(filter);
+ }
+}
+
+
+int JSObject::NumberOfEnumProperties() {
+ return NumberOfLocalProperties(static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+void FixedArray::Swap(int i, int j) {
+ Object* temp = get(i);
+ set(i, get(j));
+ set(j, temp);
+}
+
+
+static void InsertionSortPairs(FixedArray* content, FixedArray* smis) {
+ int len = smis->length();
+ for (int i = 1; i < len; i++) {
+ int j = i;
+ while (j > 0 &&
+ Smi::cast(smis->get(j-1))->value() >
+ Smi::cast(smis->get(j))->value()) {
+ smis->Swap(j-1, j);
+ content->Swap(j-1, j);
+ j--;
+ }
+ }
+}
+
+
+void HeapSortPairs(FixedArray* content, FixedArray* smis) {
+ // In-place heap sort.
+ ASSERT(content->length() == smis->length());
+ int len = smis->length();
+
+ // Bottom-up max-heap construction.
+ for (int i = 1; i < len; ++i) {
+ int child_index = i;
+ while (child_index > 0) {
+ int parent_index = ((child_index + 1) >> 1) - 1;
+ int parent_value = Smi::cast(smis->get(parent_index))->value();
+ int child_value = Smi::cast(smis->get(child_index))->value();
+ if (parent_value < child_value) {
+ content->Swap(parent_index, child_index);
+ smis->Swap(parent_index, child_index);
+ } else {
+ break;
+ }
+ child_index = parent_index;
+ }
+ }
+
+ // Extract elements and create sorted array.
+ for (int i = len - 1; i > 0; --i) {
+ // Put max element at the back of the array.
+ content->Swap(0, i);
+ smis->Swap(0, i);
+ // Sift down the new top element.
+ int parent_index = 0;
+ while (true) {
+ int child_index = ((parent_index + 1) << 1) - 1;
+ if (child_index >= i) break;
+ uint32_t child1_value = Smi::cast(smis->get(child_index))->value();
+ uint32_t child2_value = Smi::cast(smis->get(child_index + 1))->value();
+ uint32_t parent_value = Smi::cast(smis->get(parent_index))->value();
+ if (child_index + 1 >= i || child1_value > child2_value) {
+ if (parent_value > child1_value) break;
+ content->Swap(parent_index, child_index);
+ smis->Swap(parent_index, child_index);
+ parent_index = child_index;
+ } else {
+ if (parent_value > child2_value) break;
+ content->Swap(parent_index, child_index + 1);
+ smis->Swap(parent_index, child_index + 1);
+ parent_index = child_index + 1;
+ }
+ }
+ }
+}
+
+
+// Sort this array and the smis as pairs wrt. the (distinct) smis.
+void FixedArray::SortPairs(FixedArray* smis) {
+ ASSERT(this->length() == smis->length());
+ int len = smis->length();
+ // For small arrays, simply use insertion sort.
+ if (len <= 10) {
+ InsertionSortPairs(this, smis);
+ return;
+ }
+ // Check the range of indices.
+ int min_index = Smi::cast(smis->get(0))->value();
+ int max_index = min_index;
+ int i;
+ for (i = 1; i < len; i++) {
+ if (Smi::cast(smis->get(i))->value() < min_index) {
+ min_index = Smi::cast(smis->get(i))->value();
+ } else if (Smi::cast(smis->get(i))->value() > max_index) {
+ max_index = Smi::cast(smis->get(i))->value();
+ }
+ }
+ if (max_index - min_index + 1 == len) {
+ // Indices form a contiguous range, unless there are duplicates.
+ // Do an in-place linear time sort assuming distinct smis, but
+ // avoid hanging in case they are not.
+ for (i = 0; i < len; i++) {
+ int p;
+ int j = 0;
+ // While the current element at i is not at its correct position p,
+ // swap the elements at these two positions.
+ while ((p = Smi::cast(smis->get(i))->value() - min_index) != i &&
+ j++ < len) {
+ this->Swap(i, p);
+ smis->Swap(i, p);
+ }
+ }
+ } else {
+ HeapSortPairs(this, smis);
+ return;
+ }
+}
+
+
+// Fill in the names of local properties into the supplied storage. The main
+// purpose of this function is to provide reflection information for the object
+// mirrors.
+void JSObject::GetLocalPropertyNames(FixedArray* storage) {
+ ASSERT(storage->length() ==
+ NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE)));
+ int index = 0;
+ if (HasFastProperties()) {
+ for (DescriptorReader r(map()->instance_descriptors());
+ !r.eos();
+ r.advance()) {
+ if (!r.IsTransition()) {
+ storage->set(index++, r.GetKey());
+ }
+ }
+ ASSERT(storage->length() == index);
+ } else {
+ property_dictionary()->CopyKeysTo(storage);
+ }
+}
+
+
+int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
+ return GetLocalElementKeys(NULL, filter);
+}
+
+
+int JSObject::NumberOfEnumElements() {
+ return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+int JSObject::GetLocalElementKeys(FixedArray* storage,
+ PropertyAttributes filter) {
+ int counter = 0;
+ if (HasFastElements()) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : FixedArray::cast(elements())->length();
+ for (int i = 0; i < length; i++) {
+ if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
+ if (storage) {
+ storage->set(counter,
+ Smi::FromInt(i),
+ FixedArray::SKIP_WRITE_BARRIER);
+ }
+ counter++;
+ }
+ }
+ ASSERT(!storage || storage->length() >= counter);
+ } else {
+ if (storage)
+ element_dictionary()->CopyKeysTo(storage, filter);
+ counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+ }
+
+ if (this->IsJSValue()) {
+ Object* val = JSValue::cast(this)->value();
+ if (val->IsString()) {
+ String* str = String::cast(val);
+ if (storage) {
+ for (int i = 0; i < str->length(); i++) {
+ storage->set(counter + i,
+ Smi::FromInt(i),
+ FixedArray::SKIP_WRITE_BARRIER);
+ }
+ }
+ counter += str->length();
+ }
+ }
+ ASSERT(!storage || storage->length() == counter);
+ return counter;
+}
+
+
+int JSObject::GetEnumElementKeys(FixedArray* storage) {
+ return GetLocalElementKeys(storage,
+ static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+// The NumberKey uses carries the uint32_t as key.
+// This avoids allocation in HasProperty.
+class Dictionary::NumberKey : public Dictionary::Key {
+ public:
+ explicit NumberKey(uint32_t number) {
+ number_ = number;
+ }
+
+ private:
+ bool IsMatch(Object* other) {
+ return number_ == ToUint32(other);
+ }
+
+ // Thomas Wang, Integer Hash Functions.
+ // http://www.concentric.net/~Ttwang/tech/inthash.htm
+ static uint32_t ComputeHash(uint32_t key) {
+ uint32_t hash = key;
+ hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
+ hash = hash ^ (hash >> 12);
+ hash = hash + (hash << 2);
+ hash = hash ^ (hash >> 4);
+ hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
+ hash = hash ^ (hash >> 16);
+ return hash;
+ }
+
+ uint32_t Hash() { return ComputeHash(number_); }
+
+ HashFunction GetHashFunction() { return NumberHash; }
+
+ Object* GetObject() {
+ return Heap::NumberFromDouble(number_);
+ }
+
+ static uint32_t NumberHash(Object* obj) {
+ return ComputeHash(ToUint32(obj));
+ }
+
+ static uint32_t ToUint32(Object* obj) {
+ ASSERT(obj->IsNumber());
+ return static_cast<uint32_t>(obj->Number());
+ }
+
+ bool IsStringKey() { return false; }
+
+ uint32_t number_;
+};
+
+// StringKey simply carries a string object as key.
+class Dictionary::StringKey : public Dictionary::Key {
+ public:
+ explicit StringKey(String* string) {
+ string_ = string;
+ }
+
+ private:
+ bool IsMatch(Object* other) {
+ if (!other->IsString()) return false;
+ return string_->Equals(String::cast(other));
+ }
+
+ uint32_t Hash() { return StringHash(string_); }
+
+ HashFunction GetHashFunction() { return StringHash; }
+
+ Object* GetObject() { return string_; }
+
+ static uint32_t StringHash(Object* obj) {
+ return String::cast(obj)->Hash();
+ }
+
+ bool IsStringKey() { return true; }
+
+ String* string_;
+};
+
+// Utf8Key carries a vector of chars as key.
+class SymbolTable::Utf8Key : public SymbolTable::Key {
+ public:
+ explicit Utf8Key(Vector<const char> string)
+ : string_(string), hash_(0) { }
+
+ bool IsMatch(Object* other) {
+ if (!other->IsString()) return false;
+ return String::cast(other)->IsEqualTo(string_);
+ }
+
+ HashFunction GetHashFunction() {
+ return StringHash;
+ }
+
+ uint32_t Hash() {
+ if (hash_ != 0) return hash_;
+ unibrow::Utf8InputBuffer<> buffer(string_.start(),
+ static_cast<unsigned>(string_.length()));
+ chars_ = buffer.Length();
+ hash_ = String::ComputeHashCode(&buffer, chars_);
+ return hash_;
+ }
+
+ Object* GetObject() {
+ if (hash_ == 0) Hash();
+ unibrow::Utf8InputBuffer<> buffer(string_.start(),
+ static_cast<unsigned>(string_.length()));
+ return Heap::AllocateSymbol(&buffer, chars_, hash_);
+ }
+
+ static uint32_t StringHash(Object* obj) {
+ return String::cast(obj)->Hash();
+ }
+
+ bool IsStringKey() { return true; }
+
+ Vector<const char> string_;
+ uint32_t hash_;
+ int chars_; // Caches the number of characters when computing the hash code.
+};
+
+
+// StringKey carries a string object as key.
+class SymbolTable::StringKey : public SymbolTable::Key {
+ public:
+ explicit StringKey(String* string) : string_(string) { }
+
+ HashFunction GetHashFunction() {
+ return StringHash;
+ }
+
+ bool IsMatch(Object* other) {
+ if (!other->IsString()) return false;
+ return String::cast(other)->Equals(string_);
+ }
+
+ uint32_t Hash() { return string_->Hash(); }
+
+ Object* GetObject() {
+ // Transform string to symbol if possible.
+ Map* map = Heap::SymbolMapForString(string_);
+ if (map != NULL) {
+ string_->set_map(map);
+ return string_;
+ }
+ // Otherwise allocate a new symbol.
+ StringInputBuffer buffer(string_);
+ return Heap::AllocateSymbol(&buffer, string_->length(), string_->Hash());
+ }
+
+ static uint32_t StringHash(Object* obj) {
+ return String::cast(obj)->Hash();
+ }
+
+ bool IsStringKey() { return true; }
+
+ String* string_;
+};
+
+
+template<int prefix_size, int element_size>
+void HashTable<prefix_size, element_size>::IteratePrefix(ObjectVisitor* v) {
+ IteratePointers(v, 0, kElementsStartOffset);
+}
+
+
+template<int prefix_size, int element_size>
+void HashTable<prefix_size, element_size>::IterateElements(ObjectVisitor* v) {
+ IteratePointers(v,
+ kElementsStartOffset,
+ kHeaderSize + length() * kPointerSize);
+}
+
+
+template<int prefix_size, int element_size>
+Object* HashTable<prefix_size, element_size>::Allocate(int at_least_space_for) {
+ int capacity = NextPowerOf2(at_least_space_for);
+ if (capacity < 4) capacity = 4; // Guarantee min capacity.
+ Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
+ if (!obj->IsFailure()) {
+ HashTable::cast(obj)->SetNumberOfElements(0);
+ HashTable::cast(obj)->SetCapacity(capacity);
+ }
+ return obj;
+}
+
+
+// Find entry for key otherwise return -1.
+template <int prefix_size, int element_size>
+int HashTable<prefix_size, element_size>::FindEntry(Key* key) {
+ uint32_t nof = NumberOfElements();
+ if (nof == 0) return -1; // Bail out if empty.
+
+ uint32_t capacity = Capacity();
+ uint32_t hash = key->Hash();
+ uint32_t entry = GetProbe(hash, 0, capacity);
+
+ Object* element = KeyAt(entry);
+ uint32_t passed_elements = 0;
+ if (!element->IsNull()) {
+ if (!element->IsUndefined() && key->IsMatch(element)) return entry;
+ if (++passed_elements == nof) return -1;
+ }
+ for (uint32_t i = 1; !element->IsUndefined(); i++) {
+ entry = GetProbe(hash, i, capacity);
+ element = KeyAt(entry);
+ if (!element->IsNull()) {
+ if (!element->IsUndefined() && key->IsMatch(element)) return entry;
+ if (++passed_elements == nof) return -1;
+ }
+ }
+ return -1;
+}
+
+
+template<int prefix_size, int element_size>
+Object* HashTable<prefix_size, element_size>::EnsureCapacity(int n, Key* key) {
+ int capacity = Capacity();
+ int nof = NumberOfElements() + n;
+ // Make sure 20% is free
+ if (nof + (nof >> 2) <= capacity) return this;
+
+ Object* obj = Allocate(nof * 2);
+ if (obj->IsFailure()) return obj;
+ HashTable* dict = HashTable::cast(obj);
+ WriteBarrierMode mode = dict->GetWriteBarrierMode();
+
+ // Copy prefix to new array.
+ for (int i = kPrefixStartIndex; i < kPrefixStartIndex + prefix_size; i++) {
+ dict->set(i, get(i), mode);
+ }
+ // Rehash the elements.
+ uint32_t (*Hash)(Object* key) = key->GetHashFunction();
+ for (int i = 0; i < capacity; i++) {
+ uint32_t from_index = EntryToIndex(i);
+ Object* key = get(from_index);
+ if (IsKey(key)) {
+ uint32_t insertion_index =
+ EntryToIndex(dict->FindInsertionEntry(key, Hash(key)));
+ for (int j = 0; j < element_size; j++) {
+ dict->set(insertion_index + j, get(from_index + j), mode);
+ }
+ }
+ }
+ dict->SetNumberOfElements(NumberOfElements());
+ return dict;
+}
+
+
+template<int prefix_size, int element_size>
+uint32_t HashTable<prefix_size, element_size>::FindInsertionEntry(
+ Object* key,
+ uint32_t hash) {
+ uint32_t capacity = Capacity();
+ uint32_t entry = GetProbe(hash, 0, capacity);
+ Object* element = KeyAt(entry);
+
+ for (uint32_t i = 1; !(element->IsUndefined() || element->IsNull()); i++) {
+ entry = GetProbe(hash, i, capacity);
+ element = KeyAt(entry);
+ }
+
+ return entry;
+}
+
+
+// Force instantiation of SymbolTable's base class
+template class HashTable<0, 1>;
+
+
+// Force instantiation of Dictionary's base class
+template class HashTable<2, 3>;
+
+
+Object* SymbolTable::LookupString(String* string, Object** s) {
+ StringKey key(string);
+ return LookupKey(&key, s);
+}
+
+
+Object* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
+ Utf8Key key(str);
+ return LookupKey(&key, s);
+}
+
+
+Object* SymbolTable::LookupKey(Key* key, Object** s) {
+ int entry = FindEntry(key);
+
+ // Symbol already in table.
+ if (entry != -1) {
+ *s = KeyAt(entry);
+ return this;
+ }
+
+ // Adding new symbol. Grow table if needed.
+ Object* obj = EnsureCapacity(1, key);
+ if (obj->IsFailure()) return obj;
+
+ // Create symbol object.
+ Object* symbol = key->GetObject();
+ if (symbol->IsFailure()) return symbol;
+
+ // If the symbol table grew as part of EnsureCapacity, obj is not
+ // the current symbol table and therefore we cannot use
+ // SymbolTable::cast here.
+ SymbolTable* table = reinterpret_cast<SymbolTable*>(obj);
+
+ // Add the new symbol and return it along with the symbol table.
+ entry = table->FindInsertionEntry(symbol, key->Hash());
+ table->set(EntryToIndex(entry), symbol);
+ table->ElementAdded();
+ *s = symbol;
+ return table;
+}
+
+
+Object* Dictionary::Allocate(int at_least_space_for) {
+ Object* obj = DictionaryBase::Allocate(at_least_space_for);
+ // Initialize the next enumeration index.
+ if (!obj->IsFailure()) {
+ Dictionary::cast(obj)->
+ SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+ }
+ return obj;
+}
+
+Object* Dictionary::GenerateNewEnumerationIndices() {
+ int length = NumberOfElements();
+
+ // Allocate and initialize iteration order array.
+ Object* obj = Heap::AllocateFixedArray(length);
+ if (obj->IsFailure()) return obj;
+ FixedArray* iteration_order = FixedArray::cast(obj);
+ for (int i = 0; i < length; i++) iteration_order->set(i, Smi::FromInt(i));
+
+ // Allocate array with enumeration order.
+ obj = Heap::AllocateFixedArray(length);
+ if (obj->IsFailure()) return obj;
+ FixedArray* enumeration_order = FixedArray::cast(obj);
+
+ // Fill the enumeration order array with property details.
+ int capacity = Capacity();
+ int pos = 0;
+ for (int i = 0; i < capacity; i++) {
+ if (IsKey(KeyAt(i))) {
+ enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()));
+ }
+ }
+
+ // Sort the arrays wrt. enumeration order.
+ iteration_order->SortPairs(enumeration_order);
+
+ // Overwrite the enumeration_order with the enumeration indices.
+ for (int i = 0; i < length; i++) {
+ int index = Smi::cast(iteration_order->get(i))->value();
+ int enum_index = PropertyDetails::kInitialIndex + i;
+ enumeration_order->set(index, Smi::FromInt(enum_index));
+ }
+
+ // Update the dictionary with new indices.
+ capacity = Capacity();
+ pos = 0;
+ for (int i = 0; i < capacity; i++) {
+ if (IsKey(KeyAt(i))) {
+ int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
+ PropertyDetails details = DetailsAt(i);
+ PropertyDetails new_details =
+ PropertyDetails(details.attributes(), details.type(), enum_index);
+ DetailsAtPut(i, new_details);
+ }
+ }
+
+ // Set the next enumeration index.
+ SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length);
+ return this;
+}
+
+
+Object* Dictionary::EnsureCapacity(int n, Key* key) {
+ // Check whether there is enough enumeration indices for adding n elements.
+ if (key->IsStringKey() &&
+ !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) {
+ // If not, we generate new indices for the properties.
+ Object* result = GenerateNewEnumerationIndices();
+ if (result->IsFailure()) return result;
+ }
+ return DictionaryBase::EnsureCapacity(n, key);
+}
+
+
+void Dictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
+ // Do nothing if the interval [from, to) is empty.
+ if (from >= to) return;
+
+ int removed_entries = 0;
+ Object* sentinel = Heap::null_value();
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (from <= number && number < to) {
+ SetEntry(i, sentinel, sentinel, Smi::FromInt(0));
+ removed_entries++;
+ }
+ }
+ }
+
+ // Update the number of elements.
+ SetNumberOfElements(NumberOfElements() - removed_entries);
+}
+
+
+Object* Dictionary::DeleteProperty(int entry) {
+ PropertyDetails details = DetailsAt(entry);
+ if (details.IsDontDelete()) return Heap::false_value();
+ SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
+ ElementRemoved();
+ return Heap::true_value();
+}
+
+
+int Dictionary::FindStringEntry(String* key) {
+ StringKey k(key);
+ return FindEntry(&k);
+}
+
+
+int Dictionary::FindNumberEntry(uint32_t index) {
+ NumberKey k(index);
+ return FindEntry(&k);
+}
+
+
+Object* Dictionary::AtPut(Key* key, Object* value) {
+ int entry = FindEntry(key);
+
+ // If the entry is present set the value;
+ if (entry != -1) {
+ ValueAtPut(entry, value);
+ return this;
+ }
+
+ // Check whether the dictionary should be extended.
+ Object* obj = EnsureCapacity(1, key);
+ if (obj->IsFailure()) return obj;
+ Object* k = key->GetObject();
+ if (k->IsFailure()) return k;
+ PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ Dictionary::cast(obj)->AddEntry(k, value, details, key->Hash());
+ return obj;
+}
+
+
+Object* Dictionary::Add(Key* key, Object* value, PropertyDetails details) {
+ // Check whether the dictionary should be extended.
+ Object* obj = EnsureCapacity(1, key);
+ if (obj->IsFailure()) return obj;
+ // Compute the key object.
+ Object* k = key->GetObject();
+ if (k->IsFailure()) return k;
+ Dictionary::cast(obj)->AddEntry(k, value, details, key->Hash());
+ return obj;
+}
+
+
+// Add a key, value pair to the dictionary.
+void Dictionary::AddEntry(Object* key,
+ Object* value,
+ PropertyDetails details,
+ uint32_t hash) {
+ uint32_t entry = FindInsertionEntry(key, hash);
+ // Insert element at empty or deleted entry
+ if (details.index() == 0 && key->IsString()) {
+ // Assign an enumeration index to the property and update
+ // SetNextEnumerationIndex.
+ int index = NextEnumerationIndex();
+ details = PropertyDetails(details.attributes(), details.type(), index);
+ SetNextEnumerationIndex(index + 1);
+ }
+ SetEntry(entry, key, value, details);
+ ASSERT(KeyAt(entry)->IsNumber() || KeyAt(entry)->IsString());
+ ElementAdded();
+}
+
+
+void Dictionary::UpdateMaxNumberKey(uint32_t key) {
+ // If the dictionary requires slow elements an element has already
+ // been added at a high index.
+ if (requires_slow_elements()) return;
+ // Check if this index is high enough that we should require slow
+ // elements.
+ if (key > kRequiresSlowElementsLimit) {
+ set(kPrefixStartIndex, Smi::FromInt(kRequiresSlowElementsMask));
+ return;
+ }
+ // Update max key value.
+ Object* max_index_object = get(kPrefixStartIndex);
+ if (!max_index_object->IsSmi() || max_number_key() < key) {
+ set(kPrefixStartIndex, Smi::FromInt(key << kRequiresSlowElementsTagSize));
+ }
+}
+
+
+Object* Dictionary::AddStringEntry(String* key,
+ Object* value,
+ PropertyDetails details) {
+ StringKey k(key);
+ SLOW_ASSERT(FindEntry(&k) == -1);
+ return Add(&k, value, details);
+}
+
+
+Object* Dictionary::AddNumberEntry(uint32_t key,
+ Object* value,
+ PropertyDetails details) {
+ NumberKey k(key);
+ UpdateMaxNumberKey(key);
+ SLOW_ASSERT(FindEntry(&k) == -1);
+ return Add(&k, value, details);
+}
+
+
+Object* Dictionary::AtStringPut(String* key, Object* value) {
+ StringKey k(key);
+ return AtPut(&k, value);
+}
+
+
+Object* Dictionary::AtNumberPut(uint32_t key, Object* value) {
+ NumberKey k(key);
+ UpdateMaxNumberKey(key);
+ return AtPut(&k, value);
+}
+
+
+Object* Dictionary::SetOrAddStringEntry(String* key,
+ Object* value,
+ PropertyDetails details) {
+ StringKey k(key);
+ int entry = FindEntry(&k);
+ if (entry == -1) return AddStringEntry(key, value, details);
+ // Preserve enumeration index.
+ details = PropertyDetails(details.attributes(),
+ details.type(),
+ DetailsAt(entry).index());
+ SetEntry(entry, key, value, details);
+ return this;
+}
+
+
+int Dictionary::NumberOfElementsFilterAttributes(PropertyAttributes filter) {
+ int capacity = Capacity();
+ int result = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ PropertyAttributes attr = DetailsAt(i).attributes();
+ if ((attr & filter) == 0) result++;
+ }
+ }
+ return result;
+}
+
+
+int Dictionary::NumberOfEnumElements() {
+ return NumberOfElementsFilterAttributes(
+ static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+void Dictionary::CopyKeysTo(FixedArray* storage, PropertyAttributes filter) {
+ ASSERT(storage->length() >= NumberOfEnumElements());
+ int capacity = Capacity();
+ int index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ PropertyAttributes attr = DetailsAt(i).attributes();
+ if ((attr & filter) == 0) storage->set(index++, k);
+ }
+ }
+ ASSERT(storage->length() >= index);
+}
+
+
+void Dictionary::CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array) {
+ ASSERT(storage->length() >= NumberOfEnumElements());
+ int capacity = Capacity();
+ int index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ PropertyDetails details = DetailsAt(i);
+ if (!details.IsDontEnum()) {
+ storage->set(index, k);
+ sort_array->set(index, Smi::FromInt(details.index()));
+ index++;
+ }
+ }
+ }
+ storage->SortPairs(sort_array);
+ ASSERT(storage->length() >= index);
+}
+
+
+void Dictionary::CopyKeysTo(FixedArray* storage) {
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
+ static_cast<PropertyAttributes>(NONE)));
+ int capacity = Capacity();
+ int index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ storage->set(index++, k);
+ }
+ }
+ ASSERT(storage->length() >= index);
+}
+
+
+// Backwards lookup (slow).
+Object* Dictionary::SlowReverseLookup(Object* value) {
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k) && ValueAt(i) == value) {
+ return k;
+ }
+ }
+ return Heap::undefined_value();
+}
+
+
+Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj,
+ int unused_property_fields) {
+ // Make sure we preserve dictionary representation if there are too many
+ // descriptors.
+ if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+
+ // Figure out if it is necessary to generate new enumeration indices.
+ int max_enumeration_index =
+ NextEnumerationIndex() +
+ (DescriptorArray::kMaxNumberOfDescriptors - NumberOfElements());
+ if (!PropertyDetails::IsValidIndex(max_enumeration_index)) {
+ Object* result = GenerateNewEnumerationIndices();
+ if (result->IsFailure()) return result;
+ }
+
+ int instance_descriptor_length = 0;
+ int number_of_fields = 0;
+
+ // Compute the length of the instance descriptor.
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ Object* value = ValueAt(i);
+ PropertyType type = DetailsAt(i).type();
+ ASSERT(type != FIELD);
+ instance_descriptor_length++;
+ if (type == NORMAL && !value->IsJSFunction()) number_of_fields += 1;
+ }
+ }
+
+ // Allocate the instance descriptor.
+ Object* instance_descriptors =
+ DescriptorArray::Allocate(instance_descriptor_length);
+ if (instance_descriptors->IsFailure()) return instance_descriptors;
+
+ int number_of_allocated_fields = number_of_fields + unused_property_fields;
+
+ // Allocate the fixed array for the fields.
+ Object* fields = Heap::AllocateFixedArray(number_of_allocated_fields);
+ if (fields->IsFailure()) return fields;
+
+ // Fill in the instance descriptor and the fields.
+ DescriptorWriter w(DescriptorArray::cast(instance_descriptors));
+ int current_offset = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = KeyAt(i);
+ if (IsKey(k)) {
+ Object* value = ValueAt(i);
+ // Ensure the key is a symbol before writing into the instance descriptor.
+ Object* key = Heap::LookupSymbol(String::cast(k));
+ if (key->IsFailure()) return key;
+ PropertyDetails details = DetailsAt(i);
+ PropertyType type = details.type();
+ if (value->IsJSFunction()) {
+ ConstantFunctionDescriptor d(String::cast(key),
+ JSFunction::cast(value),
+ details.attributes(),
+ details.index());
+ w.Write(&d);
+ } else if (type == NORMAL) {
+ FixedArray::cast(fields)->set(current_offset, value);
+ FieldDescriptor d(String::cast(key),
+ current_offset++,
+ details.attributes(),
+ details.index());
+ w.Write(&d);
+ } else if (type == CALLBACKS) {
+ CallbacksDescriptor d(String::cast(key),
+ value,
+ details.attributes(),
+ details.index());
+ w.Write(&d);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+ ASSERT(current_offset == number_of_fields);
+
+ // Sort the instance descriptors.
+ DescriptorArray::cast(instance_descriptors)->Sort();
+
+ // Allocate new map.
+ Object* new_map = obj->map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+
+ // Transform the object.
+ Map::cast(new_map)->
+ set_instance_descriptors(DescriptorArray::cast(instance_descriptors));
+ Map::cast(new_map)->set_unused_property_fields(unused_property_fields);
+ obj->set_map(Map::cast(new_map));
+ obj->set_properties(FixedArray::cast(fields));
+ ASSERT(obj->IsJSObject());
+
+ // Transfer next enumeration index from dictionary to instance descriptors.
+ DescriptorArray::cast(instance_descriptors)->
+ SetNextEnumerationIndex(NextEnumerationIndex());
+
+ // Check it really works.
+ ASSERT(obj->HasFastProperties());
+ return obj;
+}
+
+
+// Check if there is a break point at this code position.
+bool DebugInfo::HasBreakPoint(int code_position) {
+ // Get the break point info object for this code position.
+ Object* break_point_info = GetBreakPointInfo(code_position);
+
+ // If there is no break point info object or no break points in the break
+ // point info object there is no break point at this code position.
+ if (break_point_info->IsUndefined()) return false;
+ return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
+}
+
+
+// Get the break point info object for this code position.
+Object* DebugInfo::GetBreakPointInfo(int code_position) {
+ // Find the index of the break point info object for this code position.
+ int index = GetBreakPointInfoIndex(code_position);
+
+ // Return the break point info object if any.
+ if (index == kNoBreakPointInfo) return Heap::undefined_value();
+ return BreakPointInfo::cast(break_points()->get(index));
+}
+
+
+// Clear a break point at the specified code position.
+void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
+ int code_position,
+ Handle<Object> break_point_object) {
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+ if (break_point_info->IsUndefined()) return;
+ BreakPointInfo::ClearBreakPoint(
+ Handle<BreakPointInfo>::cast(break_point_info),
+ break_point_object);
+}
+
+
+void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
+ int code_position,
+ int source_position,
+ int statement_position,
+ Handle<Object> break_point_object) {
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+ if (!break_point_info->IsUndefined()) {
+ BreakPointInfo::SetBreakPoint(
+ Handle<BreakPointInfo>::cast(break_point_info),
+ break_point_object);
+ return;
+ }
+
+ // Adding a new break point for a code position which did not have any
+ // break points before. Try to find a free slot.
+ int index = kNoBreakPointInfo;
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (debug_info->break_points()->get(i)->IsUndefined()) {
+ index = i;
+ break;
+ }
+ }
+ if (index == kNoBreakPointInfo) {
+ // No free slot - extend break point info array.
+ Handle<FixedArray> old_break_points =
+ Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+ debug_info->set_break_points(*Factory::NewFixedArray(
+ old_break_points->length() +
+ Debug::kEstimatedNofBreakPointsInFunction));
+ Handle<FixedArray> new_break_points =
+ Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+ for (int i = 0; i < old_break_points->length(); i++) {
+ new_break_points->set(i, old_break_points->get(i));
+ }
+ index = old_break_points->length();
+ }
+ ASSERT(index != kNoBreakPointInfo);
+
+ // Allocate new BreakPointInfo object and set the break point.
+ Handle<BreakPointInfo> new_break_point_info =
+ Handle<BreakPointInfo>::cast(Factory::NewStruct(BREAK_POINT_INFO_TYPE));
+ new_break_point_info->set_code_position(Smi::FromInt(code_position));
+ new_break_point_info->set_source_position(Smi::FromInt(source_position));
+ new_break_point_info->
+ set_statement_position(Smi::FromInt(statement_position));
+ new_break_point_info->set_break_point_objects(Heap::undefined_value());
+ BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
+ debug_info->break_points()->set(index, *new_break_point_info);
+}
+
+
+// Get the break point objects for a code position.
+Object* DebugInfo::GetBreakPointObjects(int code_position) {
+ Object* break_point_info = GetBreakPointInfo(code_position);
+ if (break_point_info->IsUndefined()) {
+ return Heap::undefined_value();
+ }
+ return BreakPointInfo::cast(break_point_info)->break_point_objects();
+}
+
+
+// Get the total number of break points.
+int DebugInfo::GetBreakPointCount() {
+ if (break_points()->IsUndefined()) return 0;
+ int count = 0;
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined()) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ count += break_point_info->GetBreakPointCount();
+ }
+ }
+ return count;
+}
+
+
+Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ Handle<Object> break_point_object) {
+ if (debug_info->break_points()->IsUndefined()) return Heap::undefined_value();
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (!debug_info->break_points()->get(i)->IsUndefined()) {
+ Handle<BreakPointInfo> break_point_info =
+ Handle<BreakPointInfo>(BreakPointInfo::cast(
+ debug_info->break_points()->get(i)));
+ if (BreakPointInfo::HasBreakPointObject(break_point_info,
+ break_point_object)) {
+ return *break_point_info;
+ }
+ }
+ }
+ return Heap::undefined_value();
+}
+
+
+// Find the index of the break point info object for the specified code
+// position.
+int DebugInfo::GetBreakPointInfoIndex(int code_position) {
+ if (break_points()->IsUndefined()) return kNoBreakPointInfo;
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined()) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ if (break_point_info->code_position()->value() == code_position) {
+ return i;
+ }
+ }
+ }
+ return kNoBreakPointInfo;
+}
+
+
+// Remove the specified break point object.
+void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
+ Handle<Object> break_point_object) {
+ // If there are no break points just ignore.
+ if (break_point_info->break_point_objects()->IsUndefined()) return;
+ // If there is a single break point clear it if it is the same.
+ if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ if (break_point_info->break_point_objects() == *break_point_object) {
+ break_point_info->set_break_point_objects(Heap::undefined_value());
+ }
+ return;
+ }
+ // If there are multiple break points shrink the array
+ ASSERT(break_point_info->break_point_objects()->IsFixedArray());
+ Handle<FixedArray> old_array =
+ Handle<FixedArray>(
+ FixedArray::cast(break_point_info->break_point_objects()));
+ Handle<FixedArray> new_array =
+ Factory::NewFixedArray(old_array->length() - 1);
+ int found_count = 0;
+ for (int i = 0; i < old_array->length(); i++) {
+ if (old_array->get(i) == *break_point_object) {
+ ASSERT(found_count == 0);
+ found_count++;
+ } else {
+ new_array->set(i - found_count, old_array->get(i));
+ }
+ }
+ // If the break point was found in the list change it.
+ if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
+}
+
+
+// Add the specified break point object.
+void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
+ Handle<Object> break_point_object) {
+ // If there was no break point objects before just set it.
+ if (break_point_info->break_point_objects()->IsUndefined()) {
+ break_point_info->set_break_point_objects(*break_point_object);
+ return;
+ }
+ // If the break point object is the same as before just ignore.
+ if (break_point_info->break_point_objects() == *break_point_object) return;
+ // If there was one break point object before replace with array.
+ if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ Handle<FixedArray> array = Factory::NewFixedArray(2);
+ array->set(0, break_point_info->break_point_objects());
+ array->set(1, *break_point_object);
+ break_point_info->set_break_point_objects(*array);
+ return;
+ }
+ // If there was more than one break point before extend array.
+ Handle<FixedArray> old_array =
+ Handle<FixedArray>(
+ FixedArray::cast(break_point_info->break_point_objects()));
+ Handle<FixedArray> new_array =
+ Factory::NewFixedArray(old_array->length() + 1);
+ for (int i = 0; i < old_array->length(); i++) {
+ // If the break point was there before just ignore.
+ if (old_array->get(i) == *break_point_object) return;
+ new_array->set(i, old_array->get(i));
+ }
+ // Add the new break point.
+ new_array->set(old_array->length(), *break_point_object);
+ break_point_info->set_break_point_objects(*new_array);
+}
+
+
+bool BreakPointInfo::HasBreakPointObject(
+ Handle<BreakPointInfo> break_point_info,
+ Handle<Object> break_point_object) {
+ // No break point.
+ if (break_point_info->break_point_objects()->IsUndefined()) return false;
+ // Single beak point.
+ if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ return break_point_info->break_point_objects() == *break_point_object;
+ }
+ // Multiple break points.
+ FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
+ for (int i = 0; i < array->length(); i++) {
+ if (array->get(i) == *break_point_object) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// Get the number of break points.
+int BreakPointInfo::GetBreakPointCount() {
+ // No break point.
+ if (break_point_objects()->IsUndefined()) return 0;
+ // Single beak point.
+ if (!break_point_objects()->IsFixedArray()) return 1;
+ // Multiple break points.
+ return FixedArray::cast(break_point_objects())->length();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_H_
+#define V8_OBJECTS_H_
+
+#include "builtins.h"
+#include "code-stubs.h"
+#include "smart-pointer.h"
+#include "unicode-inl.h"
+
+//
+// All object types in the V8 JavaScript are described in this file.
+//
+// Inheritance hierarchy:
+// - Object
+// - Smi (immediate small integer)
+// - Failure (immediate for marking failed operation)
+// - HeapObject (superclass for everything allocated in the heap)
+// - JSObject
+// - JSArray
+// - JSFunction
+// - GlobalObject
+// - JSGlobalObject
+// - JSBuiltinsObject
+// - JSValue
+// - Script
+// - Array
+// - ByteArray
+// - FixedArray
+// - HashTable
+// - Dictionary
+// - SymbolTable
+// - Context
+// - GlobalContext
+// - String
+// - SeqString
+// - AsciiString
+// - TwoByteString
+// - ConsString
+// - SlicedString
+// - ExternalString
+// - ExternalAsciiString
+// - ExternalTwoByteString
+// - HeapNumber
+// - Code
+// - Map
+// - Oddball
+// - Proxy
+// - SharedFunctionInfo
+// - Struct
+// - AccessorInfo
+// - AccessCheckInfo
+// - InterceptorInfo
+// - CallHandlerInfo
+// - FunctionTemplateInfo
+// - ObjectTemplateInfo
+// - SignatureInfo
+// - TypeSwitchInfo
+// - DebugInfo
+// - BreakPointInfo
+//
+// Formats of Object*:
+// Smi: [31 bit signed int] 0
+// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
+// Failure: [30 bit signed int] 11
+
+
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+ NONE = v8::None,
+ READ_ONLY = v8::ReadOnly,
+ DONT_ENUM = v8::DontEnum,
+ DONT_DELETE = v8::DontDelete,
+ INTERCEPTED = 1 << 3,
+ ABSENT = 16 // Used in runtime to indicate a property is absent.
+};
+
+namespace v8 { namespace internal {
+
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ int index = 0) {
+ ASSERT(TypeField::is_valid(type));
+ ASSERT(AttributesField::is_valid(attributes));
+ ASSERT(IndexField::is_valid(index));
+
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | IndexField::encode(index);
+
+ ASSERT(type == this->type());
+ ASSERT(attributes == this->attributes());
+ ASSERT(index == this->index());
+ }
+
+ // Conversion for storing details as Object*.
+ inline PropertyDetails(Smi* smi);
+ inline Smi* AsSmi();
+
+ PropertyType type() { return TypeField::decode(value_); }
+
+ bool IsTransition() {
+ PropertyType t = type();
+ ASSERT(t != INTERCEPTOR);
+ if (t == MAP_TRANSITION || t == CONSTANT_TRANSITION) return true;
+ return false;
+ }
+
+ PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+ int index() { return IndexField::decode(value_); }
+
+ static bool IsValidIndex(int index) { return IndexField::is_valid(index); }
+
+ bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+ bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+ bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class TypeField: public BitField<PropertyType, 0, 3> {};
+ class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+ class IndexField: public BitField<uint32_t, 6, 32-6> {};
+
+ static const int kInitialIndex = 1;
+
+ private:
+ uint32_t value_;
+};
+
+// All Maps have a field instance_type containing a InstanceType.
+// It describes the type of the instances.
+//
+// As an example, a JavaScript object is a heap object and its map
+// instance_type is JS_OBJECT_TYPE.
+//
+// The names of the string instance types are intended to systematically
+// mirror their encoding in the instance_type field of the map. The length
+// (SHORT, MEDIUM, or LONG) is always mentioned. The default encoding is
+// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
+// mentioned explicitly in the name. Likewise, the default representation is
+// considered sequential. It is not mentioned in the name. The other
+// representations (eg, CONS, SLICED, EXTERNAL) are explicitly mentioned.
+// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
+// STRING_TYPE (if it is not a symbol).
+//
+// NOTE: The following things are some that depend on the string types having
+// instance_types that are less than those of all other types:
+// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
+// Object::IsString.
+//
+// NOTE: Everything following JS_OBJECT_TYPE is considered a
+// JSObject for GC purposes. The first four entries here have typeof
+// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
+#define INSTANCE_TYPE_LIST(V) \
+ V(SHORT_SYMBOL_TYPE) \
+ V(MEDIUM_SYMBOL_TYPE) \
+ V(LONG_SYMBOL_TYPE) \
+ V(SHORT_ASCII_SYMBOL_TYPE) \
+ V(MEDIUM_ASCII_SYMBOL_TYPE) \
+ V(LONG_ASCII_SYMBOL_TYPE) \
+ V(SHORT_CONS_SYMBOL_TYPE) \
+ V(MEDIUM_CONS_SYMBOL_TYPE) \
+ V(LONG_CONS_SYMBOL_TYPE) \
+ V(SHORT_CONS_ASCII_SYMBOL_TYPE) \
+ V(MEDIUM_CONS_ASCII_SYMBOL_TYPE) \
+ V(LONG_CONS_ASCII_SYMBOL_TYPE) \
+ V(SHORT_SLICED_SYMBOL_TYPE) \
+ V(MEDIUM_SLICED_SYMBOL_TYPE) \
+ V(LONG_SLICED_SYMBOL_TYPE) \
+ V(SHORT_SLICED_ASCII_SYMBOL_TYPE) \
+ V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE) \
+ V(LONG_SLICED_ASCII_SYMBOL_TYPE) \
+ V(SHORT_EXTERNAL_SYMBOL_TYPE) \
+ V(MEDIUM_EXTERNAL_SYMBOL_TYPE) \
+ V(LONG_EXTERNAL_SYMBOL_TYPE) \
+ V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \
+ V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE) \
+ V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE) \
+ V(SHORT_STRING_TYPE) \
+ V(MEDIUM_STRING_TYPE) \
+ V(LONG_STRING_TYPE) \
+ V(SHORT_ASCII_STRING_TYPE) \
+ V(MEDIUM_ASCII_STRING_TYPE) \
+ V(LONG_ASCII_STRING_TYPE) \
+ V(SHORT_CONS_STRING_TYPE) \
+ V(MEDIUM_CONS_STRING_TYPE) \
+ V(LONG_CONS_STRING_TYPE) \
+ V(SHORT_CONS_ASCII_STRING_TYPE) \
+ V(MEDIUM_CONS_ASCII_STRING_TYPE) \
+ V(LONG_CONS_ASCII_STRING_TYPE) \
+ V(SHORT_SLICED_STRING_TYPE) \
+ V(MEDIUM_SLICED_STRING_TYPE) \
+ V(LONG_SLICED_STRING_TYPE) \
+ V(SHORT_SLICED_ASCII_STRING_TYPE) \
+ V(MEDIUM_SLICED_ASCII_STRING_TYPE) \
+ V(LONG_SLICED_ASCII_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_TYPE) \
+ V(MEDIUM_EXTERNAL_STRING_TYPE) \
+ V(LONG_EXTERNAL_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
+ V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE) \
+ V(LONG_EXTERNAL_ASCII_STRING_TYPE) \
+ V(LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(FIXED_ARRAY_TYPE) \
+ V(CODE_TYPE) \
+ V(ODDBALL_TYPE) \
+ V(PROXY_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(SIGNATURE_INFO_TYPE) \
+ V(TYPE_SWITCH_INFO_TYPE) \
+ V(DEBUG_INFO_TYPE) \
+ V(BREAK_POINT_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ \
+ V(JS_OBJECT_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_BUILTINS_OBJECT_TYPE) \
+ V(JS_VALUE_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ \
+ V(JS_FUNCTION_TYPE) \
+
+
+// Since string types are not consecutive, this macro is used to
+// iterate over them.
+#define STRING_TYPE_LIST(V) \
+ V(SHORT_SYMBOL_TYPE, TwoByteString::kHeaderSize, short_symbol) \
+ V(MEDIUM_SYMBOL_TYPE, TwoByteString::kHeaderSize, medium_symbol) \
+ V(LONG_SYMBOL_TYPE, TwoByteString::kHeaderSize, long_symbol) \
+ V(SHORT_ASCII_SYMBOL_TYPE, AsciiString::kHeaderSize, short_ascii_symbol) \
+ V(MEDIUM_ASCII_SYMBOL_TYPE, AsciiString::kHeaderSize, medium_ascii_symbol) \
+ V(LONG_ASCII_SYMBOL_TYPE, AsciiString::kHeaderSize, long_ascii_symbol) \
+ V(SHORT_CONS_SYMBOL_TYPE, ConsString::kSize, short_cons_symbol) \
+ V(MEDIUM_CONS_SYMBOL_TYPE, ConsString::kSize, medium_cons_symbol) \
+ V(LONG_CONS_SYMBOL_TYPE, ConsString::kSize, long_cons_symbol) \
+ V(SHORT_CONS_ASCII_SYMBOL_TYPE, ConsString::kSize, short_cons_ascii_symbol) \
+ V(MEDIUM_CONS_ASCII_SYMBOL_TYPE, ConsString::kSize, medium_cons_ascii_symbol)\
+ V(LONG_CONS_ASCII_SYMBOL_TYPE, ConsString::kSize, long_cons_ascii_symbol) \
+ V(SHORT_SLICED_SYMBOL_TYPE, SlicedString::kSize, short_sliced_symbol) \
+ V(MEDIUM_SLICED_SYMBOL_TYPE, SlicedString::kSize, medium_sliced_symbol) \
+ V(LONG_SLICED_SYMBOL_TYPE, SlicedString::kSize, long_sliced_symbol) \
+ V(SHORT_SLICED_ASCII_SYMBOL_TYPE, \
+ SlicedString::kSize, \
+ short_sliced_ascii_symbol) \
+ V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE, \
+ SlicedString::kSize, \
+ medium_sliced_ascii_symbol) \
+ V(LONG_SLICED_ASCII_SYMBOL_TYPE, \
+ SlicedString::kSize, \
+ long_sliced_ascii_symbol) \
+ V(SHORT_EXTERNAL_SYMBOL_TYPE, \
+ ExternalTwoByteString::kSize, \
+ short_external_symbol) \
+ V(MEDIUM_EXTERNAL_SYMBOL_TYPE, \
+ ExternalTwoByteString::kSize, \
+ medium_external_symbol) \
+ V(LONG_EXTERNAL_SYMBOL_TYPE, \
+ ExternalTwoByteString::kSize, \
+ long_external_symbol) \
+ V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \
+ ExternalAsciiString::kSize, \
+ short_external_ascii_symbol) \
+ V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE, \
+ ExternalAsciiString::kSize, \
+ medium_external_ascii_symbol) \
+ V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE, \
+ ExternalAsciiString::kSize, \
+ long_external_ascii_symbol) \
+ V(SHORT_STRING_TYPE, TwoByteString::kHeaderSize, short_string) \
+ V(MEDIUM_STRING_TYPE, TwoByteString::kHeaderSize, medium_string) \
+ V(LONG_STRING_TYPE, TwoByteString::kHeaderSize, long_string) \
+ V(SHORT_ASCII_STRING_TYPE, AsciiString::kHeaderSize, short_ascii_string) \
+ V(MEDIUM_ASCII_STRING_TYPE, AsciiString::kHeaderSize, medium_ascii_string) \
+ V(LONG_ASCII_STRING_TYPE, AsciiString::kHeaderSize, long_ascii_string) \
+ V(SHORT_CONS_STRING_TYPE, ConsString::kSize, short_cons_string) \
+ V(MEDIUM_CONS_STRING_TYPE, ConsString::kSize, medium_cons_string) \
+ V(LONG_CONS_STRING_TYPE, ConsString::kSize, long_cons_string) \
+ V(SHORT_CONS_ASCII_STRING_TYPE, ConsString::kSize, short_cons_ascii_string) \
+ V(MEDIUM_CONS_ASCII_STRING_TYPE, ConsString::kSize, medium_cons_ascii_string)\
+ V(LONG_CONS_ASCII_STRING_TYPE, ConsString::kSize, long_cons_ascii_string) \
+ V(SHORT_SLICED_STRING_TYPE, SlicedString::kSize, short_sliced_string) \
+ V(MEDIUM_SLICED_STRING_TYPE, SlicedString::kSize, medium_sliced_string) \
+ V(LONG_SLICED_STRING_TYPE, SlicedString::kSize, long_sliced_string) \
+ V(SHORT_SLICED_ASCII_STRING_TYPE, \
+ SlicedString::kSize, \
+ short_sliced_ascii_string) \
+ V(MEDIUM_SLICED_ASCII_STRING_TYPE, \
+ SlicedString::kSize, \
+ medium_sliced_ascii_string) \
+ V(LONG_SLICED_ASCII_STRING_TYPE, \
+ SlicedString::kSize, \
+ long_sliced_ascii_string) \
+ V(SHORT_EXTERNAL_STRING_TYPE, \
+ ExternalTwoByteString::kSize, \
+ short_external_string) \
+ V(MEDIUM_EXTERNAL_STRING_TYPE, \
+ ExternalTwoByteString::kSize, \
+ medium_external_string) \
+ V(LONG_EXTERNAL_STRING_TYPE, \
+ ExternalTwoByteString::kSize, \
+ long_external_string) \
+ V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
+ ExternalAsciiString::kSize, \
+ short_external_ascii_string) \
+ V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE, \
+ ExternalAsciiString::kSize, \
+ medium_external_ascii_string) \
+ V(LONG_EXTERNAL_ASCII_STRING_TYPE, \
+ ExternalAsciiString::kSize, \
+ long_external_ascii_string)
+
+// A struct is a simple object a set of object-valued fields. Including an
+// object type in this causes the compiler to generate most of the boilerplate
+// code for the class including allocation and garbage collection routines,
+// casts and predicates. All you need to define is the class, methods and
+// object verification routines. Easy, no?
+//
+// Note that for subtle reasons related to the ordering or numerical values of
+// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
+// manually.
+#define STRUCT_LIST(V) \
+ V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
+ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
+ V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+ V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
+ V(SIGNATURE_INFO, SignatureInfo, signature_info) \
+ V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
+ V(DEBUG_INFO, DebugInfo, debug_info) \
+ V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) \
+ V(SCRIPT, Script, script)
+
+
+// We use the full 8 bits of the instance_type field to encode heap object
+// instance types. The high-order bit (bit 7) is set if the object is not a
+// string, and cleared if it is a string.
+const uint32_t kIsNotStringMask = 0x80;
+const uint32_t kStringTag = 0x0;
+const uint32_t kNotStringTag = 0x80;
+
+// If bit 7 is clear, bits 5 and 6 are the string's size (short, medium, or
+// long).
+const uint32_t kStringSizeMask = 0x60;
+const uint32_t kShortStringTag = 0x0;
+const uint32_t kMediumStringTag = 0x20;
+const uint32_t kLongStringTag = 0x40;
+
+// If bit 7 is clear, bit 4 indicates that the string is a symbol (if set) or
+// not (if cleared).
+const uint32_t kIsSymbolMask = 0x10;
+const uint32_t kNotSymbolTag = 0x0;
+const uint32_t kSymbolTag = 0x10;
+
+// If bit 7 is clear, and the string representation is a sequential string,
+// then bit 3 indicates whether the string consists of two-byte characters or
+// one-byte characters.
+const uint32_t kStringEncodingMask = 0x8;
+const uint32_t kTwoByteStringTag = 0x0;
+const uint32_t kAsciiStringTag = 0x8;
+
+// If bit 7 is clear, the low-order 3 bits indicate the representation
+// of the string.
+const uint32_t kStringRepresentationMask = 0x07;
+enum StringRepresentationTag {
+ kSeqStringTag = 0x0,
+ kConsStringTag = 0x1,
+ kSlicedStringTag = 0x2,
+ kExternalStringTag = 0x3
+};
+
+enum InstanceType {
+ SHORT_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSeqStringTag,
+ MEDIUM_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSeqStringTag,
+ LONG_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSeqStringTag,
+ SHORT_ASCII_SYMBOL_TYPE =
+ kShortStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
+ MEDIUM_ASCII_SYMBOL_TYPE =
+ kMediumStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
+ LONG_ASCII_SYMBOL_TYPE =
+ kLongStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
+ SHORT_CONS_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kConsStringTag,
+ MEDIUM_CONS_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kConsStringTag,
+ LONG_CONS_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kConsStringTag,
+ SHORT_CONS_ASCII_SYMBOL_TYPE =
+ kShortStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
+ MEDIUM_CONS_ASCII_SYMBOL_TYPE =
+ kMediumStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
+ LONG_CONS_ASCII_SYMBOL_TYPE =
+ kLongStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
+ SHORT_SLICED_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSlicedStringTag,
+ MEDIUM_SLICED_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSlicedStringTag,
+ LONG_SLICED_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSlicedStringTag,
+ SHORT_SLICED_ASCII_SYMBOL_TYPE =
+ kShortStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
+ MEDIUM_SLICED_ASCII_SYMBOL_TYPE =
+ kMediumStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
+ LONG_SLICED_ASCII_SYMBOL_TYPE =
+ kLongStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
+ SHORT_EXTERNAL_SYMBOL_TYPE =
+ kShortStringTag | kSymbolTag | kExternalStringTag,
+ MEDIUM_EXTERNAL_SYMBOL_TYPE =
+ kMediumStringTag | kSymbolTag | kExternalStringTag,
+ LONG_EXTERNAL_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kExternalStringTag,
+ SHORT_EXTERNAL_ASCII_SYMBOL_TYPE =
+ kShortStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
+ MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE =
+ kMediumStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
+ LONG_EXTERNAL_ASCII_SYMBOL_TYPE =
+ kLongStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
+ SHORT_STRING_TYPE = kShortStringTag | kSeqStringTag,
+ MEDIUM_STRING_TYPE = kMediumStringTag | kSeqStringTag,
+ LONG_STRING_TYPE = kLongStringTag | kSeqStringTag,
+ SHORT_ASCII_STRING_TYPE = kShortStringTag | kAsciiStringTag | kSeqStringTag,
+ MEDIUM_ASCII_STRING_TYPE = kMediumStringTag | kAsciiStringTag | kSeqStringTag,
+ LONG_ASCII_STRING_TYPE = kLongStringTag | kAsciiStringTag | kSeqStringTag,
+ SHORT_CONS_STRING_TYPE = kShortStringTag | kConsStringTag,
+ MEDIUM_CONS_STRING_TYPE = kMediumStringTag | kConsStringTag,
+ LONG_CONS_STRING_TYPE = kLongStringTag | kConsStringTag,
+ SHORT_CONS_ASCII_STRING_TYPE =
+ kShortStringTag | kAsciiStringTag | kConsStringTag,
+ MEDIUM_CONS_ASCII_STRING_TYPE =
+ kMediumStringTag | kAsciiStringTag | kConsStringTag,
+ LONG_CONS_ASCII_STRING_TYPE =
+ kLongStringTag | kAsciiStringTag | kConsStringTag,
+ SHORT_SLICED_STRING_TYPE = kShortStringTag | kSlicedStringTag,
+ MEDIUM_SLICED_STRING_TYPE = kMediumStringTag | kSlicedStringTag,
+ LONG_SLICED_STRING_TYPE = kLongStringTag | kSlicedStringTag,
+ SHORT_SLICED_ASCII_STRING_TYPE =
+ kShortStringTag | kAsciiStringTag | kSlicedStringTag,
+ MEDIUM_SLICED_ASCII_STRING_TYPE =
+ kMediumStringTag | kAsciiStringTag | kSlicedStringTag,
+ LONG_SLICED_ASCII_STRING_TYPE =
+ kLongStringTag | kAsciiStringTag | kSlicedStringTag,
+ SHORT_EXTERNAL_STRING_TYPE = kShortStringTag | kExternalStringTag,
+ MEDIUM_EXTERNAL_STRING_TYPE = kMediumStringTag | kExternalStringTag,
+ LONG_EXTERNAL_STRING_TYPE = kLongStringTag | kExternalStringTag,
+ SHORT_EXTERNAL_ASCII_STRING_TYPE =
+ kShortStringTag | kAsciiStringTag | kExternalStringTag,
+ MEDIUM_EXTERNAL_ASCII_STRING_TYPE =
+ kMediumStringTag | kAsciiStringTag | kExternalStringTag,
+ LONG_EXTERNAL_ASCII_STRING_TYPE =
+ kLongStringTag | kAsciiStringTag | kExternalStringTag,
+ LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE = LONG_EXTERNAL_ASCII_STRING_TYPE,
+
+ MAP_TYPE = kNotStringTag,
+ HEAP_NUMBER_TYPE,
+ FIXED_ARRAY_TYPE,
+ CODE_TYPE,
+ ODDBALL_TYPE,
+ PROXY_TYPE,
+ BYTE_ARRAY_TYPE,
+ FILLER_TYPE,
+ SMI_TYPE,
+
+ ACCESSOR_INFO_TYPE,
+ ACCESS_CHECK_INFO_TYPE,
+ INTERCEPTOR_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ CALL_HANDLER_INFO_TYPE,
+ FUNCTION_TEMPLATE_INFO_TYPE,
+ OBJECT_TEMPLATE_INFO_TYPE,
+ SIGNATURE_INFO_TYPE,
+ TYPE_SWITCH_INFO_TYPE,
+ DEBUG_INFO_TYPE,
+ BREAK_POINT_INFO_TYPE,
+ SCRIPT_TYPE,
+
+ JS_OBJECT_TYPE,
+ JS_GLOBAL_OBJECT_TYPE,
+ JS_BUILTINS_OBJECT_TYPE,
+ JS_VALUE_TYPE,
+ JS_ARRAY_TYPE,
+
+ JS_FUNCTION_TYPE,
+
+ // Pseudo-types
+ FIRST_NONSTRING_TYPE = MAP_TYPE,
+ FIRST_TYPE = 0x0,
+ LAST_TYPE = JS_FUNCTION_TYPE,
+ // Boundaries for testing the type is a JavaScript "object". Note that
+ // function objects are not counted as objects, even though they are
+ // implemented as such; only values whose typeof is "object" are included.
+ FIRST_JS_OBJECT_TYPE = JS_OBJECT_TYPE,
+ LAST_JS_OBJECT_TYPE = JS_ARRAY_TYPE
+};
+
+
+enum CompareResult {
+ LESS = -1,
+ EQUAL = 0,
+ GREATER = 1,
+
+ NOT_EQUAL = GREATER
+};
+
+
+#define DECL_BOOLEAN_ACCESSORS(name) \
+ inline bool name(); \
+ inline void set_##name(bool value); \
+
+
+#define DECL_ACCESSORS(name, type) \
+ inline type* name(); \
+ inline void set_##name(type* value);
+
+
+class StringStream;
+class ObjectVisitor;
+
+struct ValueInfo : public Malloced {
+ ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
+ InstanceType type;
+ Object* ptr;
+ const char* str;
+ double number;
+};
+
+
+// A template-ized version of the IsXXX functions.
+template <class C> static inline bool Is(Object* obj);
+
+
+// Object is the abstract superclass for all classes in the
+// object hierarchy.
+// Object does not use any virtual functions to avoid the
+// allocation of the C++ vtable.
+// Since Smi and Failure are subclasses of Object no
+// data members can be present in Object.
+class Object BASE_EMBEDDED {
+ public:
+ // Type testing.
+ inline bool IsSmi();
+ inline bool IsHeapObject();
+ inline bool IsHeapNumber();
+ inline bool IsString();
+ inline bool IsSeqString();
+ inline bool IsAsciiString();
+ inline bool IsTwoByteString();
+ inline bool IsConsString();
+ inline bool IsSlicedString();
+ inline bool IsExternalString();
+ inline bool IsExternalAsciiString();
+ inline bool IsExternalTwoByteString();
+ inline bool IsShortString();
+ inline bool IsMediumString();
+ inline bool IsLongString();
+ inline bool IsSymbol();
+ inline bool IsNumber();
+ inline bool IsByteArray();
+ inline bool IsFailure();
+ inline bool IsRetryAfterGC();
+ inline bool IsException();
+ inline bool IsJSObject();
+ inline bool IsMap();
+ inline bool IsFixedArray();
+ inline bool IsDescriptorArray();
+ inline bool IsContext();
+ inline bool IsGlobalContext();
+ inline bool IsJSFunction();
+ inline bool IsCode();
+ inline bool IsOddball();
+ inline bool IsSharedFunctionInfo();
+ inline bool IsJSValue();
+ inline bool IsProxy();
+ inline bool IsBoolean();
+ inline bool IsJSArray();
+ inline bool IsHashTable();
+ inline bool IsDictionary();
+ inline bool IsSymbolTable();
+ inline bool IsPrimitive();
+ inline bool IsGlobalObject();
+ inline bool IsJSGlobalObject();
+ inline bool IsJSBuiltinsObject();
+ inline bool IsUndetectableObject();
+ inline bool IsAccessCheckNeeded();
+
+ // Returns true if this object is an instance of the specified
+ // function template.
+ bool IsInstanceOf(FunctionTemplateInfo* type);
+
+ inline bool IsStruct();
+#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
+ STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
+#undef DECLARE_STRUCT_PREDICATE
+
+ // Oddball testing.
+ INLINE(bool IsUndefined());
+ INLINE(bool IsTheHole());
+ INLINE(bool IsNull());
+ INLINE(bool IsTrue());
+ INLINE(bool IsFalse());
+
+ // Extract the number.
+ inline double Number();
+
+ Object* ToObject(); // ECMA-262 9.9.
+ Object* ToBoolean(); // ECMA-262 9.2.
+
+ // Convert to a JSObject if needed.
+ // global_context is used when creating wrapper object.
+ Object* ToObject(Context* global_context);
+
+ // Converts this to a Smi if possible.
+ // Failure is returned otherwise.
+ inline Object* ToSmi();
+
+ void Lookup(String* name, LookupResult* result);
+
+ // Property access.
+ inline Object* GetProperty(String* key);
+ inline Object* GetProperty(String* key, PropertyAttributes* attributes);
+ Object* GetPropertyWithReceiver(Object* receiver,
+ String* key,
+ PropertyAttributes* attributes);
+ Object* GetProperty(Object* receiver,
+ LookupResult* result,
+ String* key,
+ PropertyAttributes* attributes);
+ Object* GetPropertyWithCallback(Object* receiver,
+ Object* structure,
+ String* name,
+ Object* holder);
+
+ inline Object* GetElement(uint32_t index);
+ Object* GetElementWithReceiver(Object* receiver, uint32_t index);
+
+ // Return the object's prototype (might be Heap::null_value()).
+ Object* GetPrototype();
+
+ // Returns true if this is a JSValue containing a string and the index is
+ // < the length of the string. Used to implement [] on strings.
+ inline bool IsStringObjectWithCharacterAt(uint32_t index);
+
+#ifdef DEBUG
+ // Prints this object with details.
+ void Print();
+ void PrintLn();
+ // Verifies the object.
+ void Verify();
+
+ // Verify a pointer is a valid object pointer.
+ static void VerifyPointer(Object* p);
+#endif
+
+ // Prints this object without details.
+ void ShortPrint();
+
+ // Prints this object without details to a message accumulator.
+ void ShortPrint(StringStream* accumulator);
+
+ // Casting: This cast is only needed to satisfy macros in objects-inl.h.
+ static Object* cast(Object* value) { return value; }
+
+ // Layout description.
+ static const int kSize = 0; // Object does not take up any space.
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
+};
+
+
+// Smi represents integer Numbers that can be stored in 31 bits.
+// Smis are immediate which means they are NOT allocated in the heap.
+// The this pointer has the following format: [31 bit signed int] 0
+// Smi stands for small integer.
+class Smi: public Object {
+ public:
+ // Returns the integer value.
+ inline int value();
+
+ // Convert a value to a Smi object.
+ static inline Smi* FromInt(int value);
+
+ // Returns whether value can be represented in a Smi.
+ static inline bool IsValid(int value);
+
+ // Casting.
+ static inline Smi* cast(Object* object);
+
+ // Dispatched behavior.
+ void SmiPrint();
+ void SmiPrint(StringStream* accumulator);
+#ifdef DEBUG
+ void SmiVerify();
+#endif
+
+ // Min and max limits for Smi values.
+ static const int kMinValue = -(1 << (kBitsPerPointer - (kSmiTagSize + 1)));
+ static const int kMaxValue = (1 << (kBitsPerPointer - (kSmiTagSize + 1))) - 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
+};
+
+
+// Failure is used for reporing out of memory situations and
+// propagating exceptions through the runtime system. Failure objects
+// are transient and cannot occur as part of the objects graph.
+//
+// Failures are a single word, encoded as follows:
+// +-------------------------+---+--+--+
+// |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
+// +-------------------------+---+--+--+
+//
+// The low two bits, 0-1, are the failure tag, 11. The next two bits,
+// 2-3, are a failure type tag 'tt' with possible values:
+// 00 RETRY_AFTER_GC
+// 01 EXCEPTION
+// 10 INTERNAL_ERROR
+// 11 OUT_OF_MEMORY_EXCEPTION
+//
+// The next three bits, 4-6, are an allocation space tag 'sss'. The
+// allocation space tag is 000 for all failure types except
+// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are
+// (the encoding is found in globals.h):
+// 000 NEW_SPACE
+// 001 OLD_SPACE
+// 010 CODE_SPACE
+// 011 MAP_SPACE
+// 100 LO_SPACE
+//
+// The remaining bits is the number of words requested by the
+// allocation request that failed, and is zeroed except for
+// RETRY_AFTER_GC failures. The 25 bits (on a 32 bit platform) gives
+// a representable range of 2^27 bytes (128MB).
+
+// Failure type tag info.
+const int kFailureTypeTagSize = 2;
+const int kFailureTypeTagMask = (1 << kFailureTypeTagSize) - 1;
+
+class Failure: public Object {
+ public:
+ // RuntimeStubs assumes EXCEPTION = 1 in the compiler-generated code.
+ enum Type {
+ RETRY_AFTER_GC = 0,
+ EXCEPTION = 1, // Returning this marker tells the real exception
+ // is in Top::pending_exception.
+ INTERNAL_ERROR = 2,
+ OUT_OF_MEMORY_EXCEPTION = 3
+ };
+
+ inline Type type() const;
+
+ // Returns the space that needs to be collected for RetryAfterGC failures.
+ inline AllocationSpace allocation_space() const;
+
+ // Returns the number of bytes requested (up to the representable maximum)
+ // for RetryAfterGC failures.
+ inline int requested() const;
+
+ inline bool IsInternalError() const;
+ inline bool IsOutOfMemoryException() const;
+
+ static Failure* RetryAfterGC(int requested_bytes, AllocationSpace space);
+ static inline Failure* Exception();
+ static inline Failure* InternalError();
+ static inline Failure* OutOfMemoryException();
+ // Casting.
+ static inline Failure* cast(Object* object);
+
+ // Dispatched behavior.
+ void FailurePrint();
+ void FailurePrint(StringStream* accumulator);
+#ifdef DEBUG
+ void FailureVerify();
+#endif
+
+ private:
+ inline int value() const;
+ static inline Failure* Construct(Type type, int value = 0);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
+};
+
+
+// HeapObject is the superclass for all classes describing heap allocated
+// objects.
+class HeapObject: public Object {
+ public:
+ // [map]: contains a Map which contains the objects reflective information.
+ inline Map* map();
+ inline void set_map(Map* value);
+
+ // Converts an address to a HeapObject pointer.
+ static inline HeapObject* FromAddress(Address address);
+
+ // Returns the address of this HeapObject.
+ inline Address address();
+
+ // Iterates over pointers contained in the object (including the Map)
+ void Iterate(ObjectVisitor* v);
+
+ // Iterates over all pointers contained in the object except the
+ // first map pointer. The object type is given in the first
+ // parameter. This function does not access the map pointer in the
+ // object, and so is safe to call while the map pointer is modified.
+ void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
+
+ // This method only applies to struct objects. Iterates over all the fields
+ // of this struct.
+ void IterateStructBody(int object_size, ObjectVisitor* v);
+
+ // Copy the body from the 'from' object to this.
+ // Please note the two object must have the same map prior to the call.
+ inline void CopyBody(JSObject* from);
+
+ // Returns the heap object's size in bytes
+ inline int Size();
+
+ // Given a heap object's map pointer, returns the heap size in bytes
+ // Useful when the map pointer field is used for other purposes.
+ // GC internal.
+ inline int SizeFromMap(Map* map);
+
+ static inline Object* GetHeapObjectField(HeapObject* obj, int index);
+
+ // Casting.
+ static inline HeapObject* cast(Object* obj);
+
+ // Dispatched behavior.
+ void HeapObjectShortPrint(StringStream* accumulator);
+#ifdef DEBUG
+ void HeapObjectPrint();
+ void HeapObjectVerify();
+ inline void VerifyObjectField(int offset);
+
+ void PrintHeader(const char* id);
+
+ // Verify a pointer is a valid HeapObject pointer that points to object
+ // areas in the heap.
+ static void VerifyHeapPointer(Object* p);
+#endif
+
+ // Layout description.
+ // First field in a heap object is map.
+ static const int kMapOffset = Object::kSize;
+ static const int kSize = kMapOffset + kPointerSize;
+
+ protected:
+ // helpers for calling an ObjectVisitor to iterate over pointers in the
+ // half-open range [start, end) specified as integer offsets
+ inline void IteratePointers(ObjectVisitor* v, int start, int end);
+ // as above, for the single element at "offset"
+ inline void IteratePointer(ObjectVisitor* v, int offset);
+
+ // Computes the object size from the map.
+ // Should only be used from SizeFromMap.
+ int SlowSizeFromMap(Map* map);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
+};
+
+
+// The HeapNumber class describes heap allocated numbers that cannot be
+// represented in a Smi (small integer)
+class HeapNumber: public HeapObject {
+ public:
+ // [value]: number value.
+ inline double value();
+ inline void set_value(double value);
+
+ // Casting.
+ static inline HeapNumber* cast(Object* obj);
+
+ // Dispatched behavior.
+ Object* HeapNumberToBoolean();
+ void HeapNumberPrint();
+ void HeapNumberPrint(StringStream* accumulator);
+#ifdef DEBUG
+ void HeapNumberVerify();
+#endif
+
+ // Layout description.
+ static const int kValueOffset = HeapObject::kSize;
+ static const int kSize = kValueOffset + kDoubleSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
+};
+
+
+// The JSObject describes real heap allocated JavaScript objects with
+// properties.
+// Note that the map of JSObject changes during execution to enable inline
+// caching.
+class JSObject: public HeapObject {
+ public:
+ // [properties]: Backing storage for properties.
+ DECL_ACCESSORS(properties, FixedArray)
+ inline void initialize_properties();
+
+ // [elements]: The elements in the fast case.
+ DECL_ACCESSORS(elements, HeapObject)
+ inline void initialize_elements();
+
+ // Accessors for properties.
+ inline bool HasFastProperties();
+
+ // Do we want to keep the elements in fast case when increasing the
+ // capacity?
+ bool KeepInFastCase(int new_capacity);
+
+ // Accessors for slow properties
+ inline Dictionary* property_dictionary(); // asserts !HasFastProperties
+ inline Dictionary* element_dictionary(); // asserts !HasFastElements
+
+ Object* SetProperty(String* key,
+ Object* value,
+ PropertyAttributes attributes);
+ Object* SetProperty(LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes);
+ Object* SetPropertyWithFailedAccessCheck(LookupResult* result,
+ String* name,
+ Object* value);
+ Object* SetPropertyWithCallback(Object* structure,
+ String* name,
+ Object* value,
+ JSObject* holder);
+ Object* SetPropertyWithInterceptor(String* name,
+ Object* value,
+ PropertyAttributes attributes);
+ Object* SetPropertyPostInterceptor(String* name,
+ Object* value,
+ PropertyAttributes attributes);
+ Object* IgnoreAttributesAndSetLocalProperty(String* key,
+ Object* value);
+ Object* SetLazyProperty(LookupResult* result,
+ String* name,
+ Object* value,
+ PropertyAttributes attributes);
+
+ // Returns the class name ([[Class]] property in the specification).
+ String* class_name();
+
+ // Retrieve interceptors.
+ InterceptorInfo* GetNamedInterceptor();
+ InterceptorInfo* GetIndexedInterceptor();
+
+ inline PropertyAttributes GetPropertyAttribute(String* name);
+ PropertyAttributes GetPropertyAttributeWithReceiver(JSObject* receiver,
+ String* name);
+ PropertyAttributes GetLocalPropertyAttribute(String* name);
+
+ Object* DefineAccessor(String* name, bool is_getter, JSFunction* fun,
+ PropertyAttributes attributes);
+ Object* LookupAccessor(String* name, bool is_getter);
+
+ // Used from Object::GetProperty().
+ Object* GetPropertyWithFailedAccessCheck(Object* receiver,
+ LookupResult* result,
+ String* name);
+ Object* GetPropertyWithInterceptor(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes);
+ Object* GetPropertyPostInterceptor(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes);
+ Object* GetLazyProperty(Object* receiver,
+ LookupResult* result,
+ String* name,
+ PropertyAttributes* attributes);
+
+ bool HasProperty(String* name) {
+ return GetPropertyAttribute(name) != ABSENT;
+ }
+
+ bool HasLocalProperty(String* name) {
+ return GetLocalPropertyAttribute(name) != ABSENT;
+ }
+
+ Object* DeleteProperty(String* name);
+ Object* DeleteElement(uint32_t index);
+ Object* DeleteLazyProperty(LookupResult* result, String* name);
+
+ // Tests for the fast common case for property enumeration.
+ bool IsSimpleEnum();
+
+ // Tells whether the backing storage for elements is fast (FixedArray).
+ inline bool HasFastElements();
+
+ // Returns true if the backing storage for the slow-case elements of
+ // this object takes up nearly as much space as a fast-case backing
+ // storage would. In that case the JSObject should have fast
+ // elements.
+ bool ShouldHaveFastElements();
+
+ // Return the object's prototype (might be Heap::null_value()).
+ inline Object* GetPrototype();
+
+ // Tells whether the index'th element is present.
+ inline bool HasElement(uint32_t index);
+ bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
+ bool HasLocalElement(uint32_t index);
+
+ bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
+ bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
+
+ Object* SetFastElement(uint32_t index, Object* value);
+
+ // Set the index'th array element.
+ // A Failure object is returned if GC is needed.
+ Object* SetElement(uint32_t index, Object* value);
+
+ // Returns the index'th element.
+ // The undefined object if index is out of bounds.
+ Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
+
+ void SetFastElements(FixedArray* elements);
+ Object* SetSlowElements(Object* length);
+
+ // Lookup interceptors are used for handling properties controlled by host
+ // objects.
+ inline bool HasNamedInterceptor();
+ inline bool HasIndexedInterceptor();
+
+ // Support functions for v8 api (needed for correct interceptor behavior).
+ bool HasRealNamedProperty(String* key);
+ bool HasRealElementProperty(uint32_t index);
+ bool HasRealNamedCallbackProperty(String* key);
+
+ // Initializes the array to a certain length
+ Object* SetElementsLength(Object* length);
+
+ // Get the header size for a JSObject. Used to compute the index of
+ // internal fields as well as the number of internal fields.
+ inline int GetHeaderSize();
+
+ inline int GetInternalFieldCount();
+ inline Object* GetInternalField(int index);
+ inline void SetInternalField(int index, Object* value);
+
+ // Returns a deep copy of the JavaScript object.
+ // Properties and elements are copied too.
+ // Returns failure if allocation failed.
+ Object* Copy(PretenureFlag pretenure = NOT_TENURED);
+
+ // Lookup a property. If found, the result is valid and has
+ // detailed information.
+ void LocalLookup(String* name, LookupResult* result);
+ void Lookup(String* name, LookupResult* result);
+
+ // The following lookup functions skip interceptors.
+ void LocalLookupRealNamedProperty(String* name, LookupResult* result);
+ void LookupRealNamedProperty(String* name, LookupResult* result);
+ void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
+ void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
+
+ // Returns the number of properties on this object filtering out properties
+ // with the specified attributes (ignoring interceptors).
+ int NumberOfLocalProperties(PropertyAttributes filter);
+ // Returns the number of enumerable properties (ignoring interceptors).
+ int NumberOfEnumProperties();
+ // Fill in details for properties into storage.
+ void GetLocalPropertyNames(FixedArray* storage);
+
+ // Returns the number of properties on this object filtering out properties
+ // with the specified attributes (ignoring interceptors).
+ int NumberOfLocalElements(PropertyAttributes filter);
+ // Returns the number of enumerable elements (ignoring interceptors).
+ int NumberOfEnumElements();
+ // Returns the number of elements on this object filtering out elements
+ // with the specified attributes (ignoring interceptors).
+ int GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter);
+ // Count and fill in the enumerable elements into storage.
+ // (storage->length() == NumberOfEnumElements()).
+ // If storage is NULL, will count the elements without adding
+ // them to any storage.
+ // Returns the number of enumerable elements.
+ int GetEnumElementKeys(FixedArray* storage);
+
+ // Add a property to a fast-case object using a map transition to
+ // new_map.
+ Object* AddFastPropertyUsingMap(Map* new_map,
+ String* name,
+ Object* value);
+
+ // Add a constant function property to a fast-case object.
+ Object* AddConstantFunctionProperty(String* name,
+ JSFunction* function,
+ PropertyAttributes attributes);
+
+ // Replace a constant function property on a fast-case object.
+ Object* ReplaceConstantFunctionProperty(String* name,
+ Object* value);
+
+ // Add a property to a fast-case object.
+ Object* AddFastProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes);
+
+ // Add a property to a slow-case object.
+ Object* AddSlowProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes);
+
+ // Add a property to an object.
+ Object* AddProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes);
+
+ // Convert the object to use the canonical dictionary
+ // representation.
+ Object* NormalizeProperties();
+ Object* NormalizeElements();
+
+ // Transform slow named properties to fast variants.
+ // Returns failure if allocation failed.
+ Object* TransformToFastProperties(int unused_property_fields);
+
+ // initializes the body after properties slot, properties slot is
+ // initialized by set_properties
+ // Note: this call does not update write barrier, it is caller's
+ // reponsibility to ensure that *v* can be collected without WB here.
+ inline void InitializeBody(int object_size);
+
+ // Check whether this object references another object
+ bool ReferencesObject(Object* obj);
+
+ // Casting.
+ static inline JSObject* cast(Object* obj);
+
+ // Dispatched behavior.
+ void JSObjectIterateBody(int object_size, ObjectVisitor* v);
+ void JSObjectShortPrint(StringStream* accumulator);
+#ifdef DEBUG
+ void JSObjectPrint();
+ void JSObjectVerify();
+ void PrintProperties();
+ void PrintElements();
+
+ // Structure for collecting spill information about JSObjects.
+ class SpillInformation {
+ public:
+ void Clear();
+ void Print();
+ int number_of_objects_;
+ int number_of_objects_with_fast_properties_;
+ int number_of_objects_with_fast_elements_;
+ int number_of_fast_used_fields_;
+ int number_of_fast_unused_fields_;
+ int number_of_slow_used_properties_;
+ int number_of_slow_unused_properties_;
+ int number_of_fast_used_elements_;
+ int number_of_fast_unused_elements_;
+ int number_of_slow_used_elements_;
+ int number_of_slow_unused_elements_;
+ };
+
+ void IncrementSpillStatistics(SpillInformation* info);
+#endif
+ Object* SlowReverseLookup(Object* value);
+
+ static const uint32_t kMaxGap = 1024;
+ static const int kMaxFastElementsLength = 5000;
+
+ // Layout description.
+ static const int kPropertiesOffset = HeapObject::kSize;
+ static const int kElementsOffset = kPropertiesOffset + kPointerSize;
+ static const int kHeaderSize = kElementsOffset + kPointerSize;
+
+ Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
+
+ private:
+ Object* SetElementWithInterceptor(uint32_t index, Object* value);
+ Object* SetElementPostInterceptor(uint32_t index, Object* value);
+
+ Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
+
+ Object* DeletePropertyPostInterceptor(String* name);
+ Object* DeletePropertyWithInterceptor(String* name);
+
+ Object* DeleteElementPostInterceptor(uint32_t index);
+ Object* DeleteElementWithInterceptor(uint32_t index);
+
+ PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
+ String* name,
+ bool continue_search);
+ PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
+ String* name,
+ bool continue_search);
+ PropertyAttributes GetPropertyAttribute(JSObject* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
+
+ // Returns true if most of the elements backing storage is used.
+ bool HasDenseElements();
+
+ Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
+
+ void LookupInDescriptor(String* name, LookupResult* result);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
+};
+
+
+// Abstract super class arrays. It provides length behavior.
+class Array: public HeapObject {
+ public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
+ // Convert an object to an array index.
+ // Returns true if the conversion succeeded.
+ static inline bool IndexFromObject(Object* object, uint32_t* index);
+
+ // Layout descriptor.
+ static const int kLengthOffset = HeapObject::kSize;
+ static const int kHeaderSize = kLengthOffset + kIntSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
+};
+
+
+// FixedArray describes fixed sized arrays where element
+// type is Object*.
+
+class FixedArray: public Array {
+ public:
+
+ // Setter and getter for elements.
+ inline Object* get(int index);
+ inline void set(int index, Object* value);
+
+ // Setters for frequently used oddballs located in old space.
+ inline void set_undefined(int index);
+ inline void set_the_hole(int index);
+
+ // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
+ enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+ // Return the write barrier mode for this.
+ inline WriteBarrierMode GetWriteBarrierMode();
+
+ // Copy operations.
+ Object* Copy();
+ Object* CopySize(int new_length);
+
+ // Add the elements of a JSArray to this FixedArray.
+ Object* AddKeysFromJSArray(JSArray* array);
+
+ // Compute the union of this and other.
+ Object* UnionOfKeys(FixedArray* other);
+
+ // Copy a sub array from the receiver to dest.
+ void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
+
+ // Garbage collection support.
+ static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
+
+ // Casting.
+ static inline FixedArray* cast(Object* obj);
+
+ // Dispatched behavior.
+ int FixedArraySize() { return SizeFor(length()); }
+ void FixedArrayIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+ void FixedArrayPrint();
+ void FixedArrayVerify();
+#endif
+
+ // Swap two elements.
+ void Swap(int i, int j);
+
+ // Sort this array and the smis as pairs wrt. the smis.
+ void SortPairs(FixedArray* smis);
+
+ protected:
+ // Set operation on FixedArray without using write barriers.
+ static inline void fast_set(FixedArray* array, int index, Object* value);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
+};
+
+
+// DescriptorArrays are fixed arrays used to hold instance descriptors.
+// The format of the these objects is:
+// [0]: point to a fixed array with (value, detail) pairs.
+// [1]: next enumeration index (Smi), or pointer to small fixed array:
+// [0]: next enumeration index (Smi)
+// [1]: pointer to fixed array with enum cache
+// [2]: first key
+// [length() - 1]: last key
+//
+class DescriptorArray: public FixedArray {
+ public:
+ // Returns the number of descriptors in the array.
+ int number_of_descriptors() {
+ int len = length();
+ return len == 0 ? 0 : len - kFirstIndex;
+ }
+
+ int NextEnumerationIndex() {
+ if (length() == 0) return PropertyDetails::kInitialIndex;
+ Object* obj = get(kEnumerationIndexIndex);
+ if (obj->IsSmi()) {
+ return Smi::cast(obj)->value();
+ } else {
+ Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeEnumIndex);
+ return Smi::cast(index)->value();
+ }
+ }
+
+ // Set next enumeration index and flush any enum cache.
+ void SetNextEnumerationIndex(int value) {
+ fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
+ }
+
+ bool HasEnumCache() {
+ return length() > 0 && !get(kEnumerationIndexIndex)->IsSmi();
+ }
+
+ Object* GetEnumCache() {
+ ASSERT(HasEnumCache());
+ FixedArray* bridge = FixedArray::cast(get(kEnumerationIndexIndex));
+ return bridge->get(kEnumCacheBridgeCacheIndex);
+ }
+
+ // Initialize or change the enum cache,
+ // using the supplied storage for the small "bridge".
+ void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
+
+ // Accessors for fetching instance descriptor at descriptor number..
+ inline String* GetKey(int descriptor_number);
+ inline Object* GetValue(int descriptor_number);
+ inline Smi* GetDetails(int descriptor_number);
+
+ // Accessor for complete descriptor.
+ inline void Get(int descriptor_number, Descriptor* desc);
+ inline void Set(int descriptor_number, Descriptor* desc);
+
+ void ReplaceConstantFunction(int descriptor_number, JSFunction* value);
+
+ // Copy the descriptor array, insert a new descriptor and optionally
+ // remove map transitions.
+ Object* CopyInsert(Descriptor* desc, bool remove_map_transitions = false);
+
+ // Copy the descriptor array, replace the property index and attributes
+ // of the named property, but preserve its enumeration index.
+ Object* CopyReplace(String* name, int index, PropertyAttributes attributes);
+
+ // Sort the instance descriptors by the hash codes of their keys.
+ void Sort();
+
+ // Is the descriptor array sorted and without duplicates?
+ bool IsSortedNoDuplicates();
+
+ // Search the instance descriptors for given name.
+ inline int Search(String* name);
+
+ // Tells whether the name is present int the array.
+ bool Contains(String* name) { return kNotFound != Search(name); }
+
+ // Perform a binary search in the instance descriptors represented
+ // by this fixed array. low and high are descriptor indices. If there
+ // are three instance descriptors in this array it should be called
+ // with low=0 and high=2.
+ int BinarySearch(String* name, int low, int high);
+
+ static Object* Allocate(int number_of_descriptors);
+
+ // Casting.
+ static inline DescriptorArray* cast(Object* obj);
+
+ // Constant for denoting key was not found.
+ static const int kNotFound = -1;
+
+ static const int kContentArrayIndex = 0;
+ static const int kEnumerationIndexIndex = 1;
+ static const int kFirstIndex = 2;
+
+ // The length of the "bridge" to the enum cache.
+ static const int kEnumCacheBridgeLength = 2;
+ static const int kEnumCacheBridgeEnumIndex = 0;
+ static const int kEnumCacheBridgeCacheIndex = 1;
+
+ // Layout description.
+ static const int kContentArrayOffset = FixedArray::kHeaderSize;
+ static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
+ static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
+
+ // Layout description for the bridge array.
+ static const int kEnumCacheBridgeEnumOffset = FixedArray::kHeaderSize;
+ static const int kEnumCacheBridgeCacheOffset =
+ kEnumCacheBridgeEnumOffset + kPointerSize;
+
+#ifdef DEBUG
+ // Print all the descriptors.
+ void PrintDescriptors();
+#endif
+
+ // The maximum number of descriptors we want in a descriptor array (should
+ // fit in a page).
+ static const int kMaxNumberOfDescriptors = 1024 + 512;
+
+ private:
+ // Conversion from descriptor number to array indices.
+ static int ToKeyIndex(int descriptor_number) {
+ return descriptor_number+kFirstIndex;
+ }
+ static int ToValueIndex(int descriptor_number) {
+ return descriptor_number << 1;
+ }
+ static int ToDetailsIndex(int descriptor_number) {
+ return( descriptor_number << 1) + 1;
+ }
+
+ // Swap operation on FixedArray without using write barriers.
+ static inline void fast_swap(FixedArray* array, int first, int second);
+
+ // Swap descriptor first and second.
+ inline void Swap(int first, int second);
+
+ FixedArray* GetContentArray() {
+ return FixedArray::cast(get(kContentArrayIndex));
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray);
+};
+
+
+// HashTable is a subclass of FixedArray that implements a hash table
+// that uses open addressing and quadratic probing.
+//
+// In order for the quadratic probing to work, elements that have not
+// yet been used and elements that have been deleted are
+// distinguished. Probing continues when deleted elements are
+// encountered and stops when unused elements are encountered.
+//
+// - Elements with key == undefined have not been used yet.
+// - Elements with key == null have been deleted.
+//
+// The hash table class is parameterized with a prefix size and with
+// the size, including the key size, of the elements held in the hash
+// table. The prefix size indicates an amount of memory in the
+// beginning of the backing storage that can be used for non-element
+// information by subclasses.
+template<int prefix_size, int element_size>
+class HashTable: public FixedArray {
+ public:
+ // Returns the number of elements in the dictionary.
+ int NumberOfElements() {
+ return Smi::cast(get(kNumberOfElementsIndex))->value();
+ }
+
+ // Returns the capacity of the dictionary.
+ int Capacity() {
+ return Smi::cast(get(kCapacityIndex))->value();
+ }
+
+ // ElementAdded should be called whenever an element is added to a
+ // dictionary.
+ void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
+
+ // ElementRemoved should be called whenever an element is removed from
+ // a dictionary.
+ void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
+ void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
+
+ // Returns a new array for dictionary usage. Might return Failure.
+ static Object* Allocate(int at_least_space_for);
+
+ // Returns the key at entry.
+ Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+
+ // Tells wheter k is a real key. Null and undefined are not allowed
+ // as keys and can be used to indicate missing or deleted elements.
+ bool IsKey(Object* k) {
+ return !k->IsNull() && !k->IsUndefined();
+ }
+
+ // Garbage collection support.
+ void IteratePrefix(ObjectVisitor* visitor);
+ void IterateElements(ObjectVisitor* visitor);
+
+ // Casting.
+ static inline HashTable* cast(Object* obj);
+
+ // Key is an abstract superclass keys.
+ class Key {
+ public:
+ // Returns whether the other object matches this key.
+ virtual bool IsMatch(Object* other) = 0;
+ typedef uint32_t (*HashFunction)(Object* obj);
+ // Returns the hash function used for this key.
+ virtual HashFunction GetHashFunction() = 0;
+ // Returns the hash value for this key.
+ virtual uint32_t Hash() = 0;
+ // Returns the key object for storing into the dictionary.
+ // If allocations fails a failure object is returned.
+ virtual Object* GetObject() = 0;
+ virtual bool IsStringKey() = 0;
+ // Required.
+ virtual ~Key() {}
+ };
+
+ // Compute the probe offset (quadratic probing).
+ INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
+ return (n + n * n) >> 1;
+ }
+
+ static const int kNumberOfElementsIndex = 0;
+ static const int kCapacityIndex = 1;
+ static const int kPrefixStartIndex = 2;
+ static const int kElementsStartIndex = kPrefixStartIndex + prefix_size;
+ static const int kElementSize = element_size;
+ static const int kElementsStartOffset =
+ kHeaderSize + kElementsStartIndex * kPointerSize;
+
+ protected:
+ // Find entry for key otherwise return -1.
+ int FindEntry(Key* key);
+
+ // Find the entry at which to insert element with the given key that
+ // has the given hash value.
+ uint32_t FindInsertionEntry(Object* key, uint32_t hash);
+
+ // Returns the index for an entry (of the key)
+ static inline int EntryToIndex(int entry) {
+ return (entry * kElementSize) + kElementsStartIndex;
+ }
+
+ // Update the number of elements in the dictionary.
+ void SetNumberOfElements(int nof) {
+ fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
+ }
+
+ // Sets the capacity of the hash table.
+ void SetCapacity(int capacity) {
+ // To scale a computed hash code to fit within the hash table, we
+ // use bit-wise AND with a mask, so the capacity must be positive
+ // and non-zero.
+ ASSERT(capacity > 0);
+ fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
+ }
+
+
+ // Returns probe entry.
+ static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
+ ASSERT(IsPowerOf2(size));
+ return (hash + GetProbeOffset(number)) & (size - 1);
+ }
+
+ // Ensure enough space for n additional elements.
+ Object* EnsureCapacity(int n, Key* key);
+};
+
+
+// SymbolTable.
+//
+// No special elements in the prefix and the element size is 1
+// because only the symbol itself (the key) needs to be stored.
+class SymbolTable: public HashTable<0, 1> {
+ public:
+ // Find symbol in the symbol table. If it is not there yet, it is
+ // added. The return value is the symbol table which might have
+ // been enlarged. If the return value is not a failure, the symbol
+ // pointer *s is set to the symbol found.
+ Object* LookupSymbol(Vector<const char> str, Object** s);
+ Object* LookupString(String* key, Object** s);
+
+ // Casting.
+ static inline SymbolTable* cast(Object* obj);
+
+ private:
+ Object* LookupKey(Key* key, Object** s);
+ class Utf8Key; // Key based on utf8 string.
+ class StringKey; // Key based on String*.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable);
+};
+
+
+// Dictionary for keeping properties and elements in slow case.
+//
+// One element in the prefix is used for storing non-element
+// information about the dictionary.
+//
+// The rest of the array embeds triples of (key, value, details).
+// if key == undefined the triple is empty.
+// if key == null the triple has been deleted.
+// otherwise key contains the name of a property.
+class DictionaryBase: public HashTable<2, 3> {};
+
+class Dictionary: public DictionaryBase {
+ public:
+ // Returns the value at entry.
+ Object* ValueAt(int entry) { return get(EntryToIndex(entry)+1); }
+
+ // Set the value for entry.
+ void ValueAtPut(int entry, Object* value) {
+ set(EntryToIndex(entry)+1, value);
+ }
+
+ // Returns the property details for the property at entry.
+ PropertyDetails DetailsAt(int entry) {
+ return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2)));
+ }
+
+ // Set the details for entry.
+ void DetailsAtPut(int entry, PropertyDetails value) {
+ set(EntryToIndex(entry) + 2, value.AsSmi());
+ }
+
+ // Remove all entries were key is a number and (from <= key && key < to).
+ void RemoveNumberEntries(uint32_t from, uint32_t to);
+
+ // Sorting support
+ Object* RemoveHoles();
+ void CopyValuesTo(FixedArray* elements);
+
+ // Casting.
+ static inline Dictionary* cast(Object* obj);
+
+ // Find entry for string key otherwise return -1.
+ int FindStringEntry(String* key);
+
+ // Find entry for number key otherwise return -1.
+ int FindNumberEntry(uint32_t index);
+
+ // Delete a property from the dictionary.
+ Object* DeleteProperty(int entry);
+
+ // Type specific at put (default NONE attributes is used when adding).
+ Object* AtStringPut(String* key, Object* value);
+ Object* AtNumberPut(uint32_t key, Object* value);
+
+ Object* AddStringEntry(String* key, Object* value, PropertyDetails details);
+ Object* AddNumberEntry(uint32_t key, Object* value, PropertyDetails details);
+
+ // Set and existing string entry or add a new one if needed.
+ Object* SetOrAddStringEntry(String* key,
+ Object* value,
+ PropertyDetails details);
+
+ // Returns the number of elements in the dictionary filtering out properties
+ // with the specified attributes.
+ int NumberOfElementsFilterAttributes(PropertyAttributes filter);
+
+ // Returns the number of enumerable elements in the dictionary.
+ int NumberOfEnumElements();
+
+ // Copies keys to preallocated fixed array.
+ void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
+ // Copies enumerable keys to preallocated fixed array.
+ void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
+ // Fill in details for properties into storage.
+ void CopyKeysTo(FixedArray* storage);
+
+ // Returns the value at entry.
+ static int ValueIndexFor(int entry) { return EntryToIndex(entry)+1; }
+
+ // For transforming properties of a JSObject.
+ Object* TransformPropertiesToFastFor(JSObject* obj,
+ int unused_property_fields);
+
+ // If slow elements are required we will never go back to fast-case
+ // for the elements kept in this dictionary. We require slow
+ // elements if an element has been added at an index larger than
+ // kRequiresSlowElementsLimit.
+ inline bool requires_slow_elements();
+
+ // Get the value of the max number key that has been added to this
+ // dictionary. max_number_key can only be called if
+ // requires_slow_elements returns false.
+ inline uint32_t max_number_key();
+
+ // Accessors for next enumeration index.
+ void SetNextEnumerationIndex(int index) {
+ fast_set(this, kNextEnumnerationIndexIndex, Smi::FromInt(index));
+ }
+
+ int NextEnumerationIndex() {
+ return Smi::cast(get(kNextEnumnerationIndexIndex))->value();
+ }
+
+ // Returns a new array for dictionary usage. Might return Failure.
+ static Object* Allocate(int at_least_space_for);
+
+ // Ensure enough space for n additional elements.
+ Object* EnsureCapacity(int n, Key* key);
+
+#ifdef DEBUG
+ void Print();
+#endif
+ // Returns the key (slow).
+ Object* SlowReverseLookup(Object* value);
+
+ // Bit masks.
+ static const int kRequiresSlowElementsMask = 1;
+ static const int kRequiresSlowElementsTagSize = 1;
+ static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
+
+ private:
+ // Generic at put operation.
+ Object* AtPut(Key* key, Object* value);
+
+ Object* Add(Key* key, Object* value, PropertyDetails details);
+
+ // Add entry to dictionary.
+ void AddEntry(Object* key,
+ Object* value,
+ PropertyDetails details,
+ uint32_t hash);
+
+ // Sets the entry to (key, value) pair.
+ inline void SetEntry(int entry,
+ Object* key,
+ Object* value,
+ PropertyDetails details);
+
+ void UpdateMaxNumberKey(uint32_t key);
+
+ // Generate new enumneration indices to avoid enumeration insdex overflow.
+ Object* GenerateNewEnumerationIndices();
+
+ static const int kMaxNumberKeyIndex = kPrefixStartIndex;
+ static const int kNextEnumnerationIndexIndex = kMaxNumberKeyIndex + 1;
+
+ class NumberKey; // Key containing uint32_t.
+ class StringKey; // Key containing String*.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Dictionary);
+};
+
+
+// ByteArray represents fixed sized byte arrays. Used by the outside world,
+// such as PCRE, and also by the memory allocator and garbage collector to
+// fill in free blocks in the heap.
+class ByteArray: public Array {
+ public:
+ // Setter and getter.
+ inline byte get(int index);
+ inline void set(int index, byte value);
+
+ // Treat contents as an int array.
+ inline int get_int(int index);
+
+ static int SizeFor(int length) {
+ return kHeaderSize + OBJECT_SIZE_ALIGN(length);
+ }
+ // We use byte arrays for free blocks in the heap. Given a desired size in
+ // bytes that is a multiple of the word size and big enough to hold a byte
+ // array, this function returns the number of elements a byte array should
+ // have.
+ static int LengthFor(int size_in_bytes) {
+ ASSERT(IsAligned(size_in_bytes, kPointerSize));
+ ASSERT(size_in_bytes >= kHeaderSize);
+ return size_in_bytes - kHeaderSize;
+ }
+
+ // Returns data start address.
+ inline Address GetDataStartAddress();
+
+ // Returns a pointer to the ByteArray object for a given data start address.
+ static inline ByteArray* FromDataStartAddress(Address address);
+
+ // Casting.
+ static inline ByteArray* cast(Object* obj);
+
+ // Dispatched behavior.
+ int ByteArraySize() { return SizeFor(length()); }
+#ifdef DEBUG
+ void ByteArrayPrint();
+ void ByteArrayVerify();
+#endif
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
+};
+
+
+// Code describes objects with on-the-fly generated machine code.
+class Code: public HeapObject {
+ public:
+ // Opaque data type for encapsulating code flags like kind, inline
+ // cache state, and arguments count.
+ enum Flags { };
+
+ enum Kind {
+ FUNCTION,
+ STUB,
+ BUILTIN,
+ LOAD_IC,
+ KEYED_LOAD_IC,
+ CALL_IC,
+ STORE_IC,
+ KEYED_STORE_IC,
+
+ // Pseudo-kinds.
+ FIRST_IC_KIND = LOAD_IC,
+ LAST_IC_KIND = KEYED_STORE_IC
+ };
+
+ enum {
+ NUMBER_OF_KINDS = LAST_IC_KIND + 1
+ };
+
+ // A state indicates that inline cache in this Code object contains
+ // objects or relative instruction addresses.
+ enum ICTargetState {
+ IC_TARGET_IS_ADDRESS,
+ IC_TARGET_IS_OBJECT
+ };
+
+#ifdef DEBUG
+ static const char* Kind2String(Kind kind);
+#endif
+
+ // [instruction_size]: Size of the native instructions
+ inline int instruction_size();
+ inline void set_instruction_size(int value);
+
+ // [relocation_size]: Size of relocation information.
+ inline int relocation_size();
+ inline void set_relocation_size(int value);
+
+ // [sinfo_size]: Size of scope information.
+ inline int sinfo_size();
+ inline void set_sinfo_size(int value);
+
+ // [flags]: Various code flags.
+ inline Flags flags();
+ inline void set_flags(Flags flags);
+
+ // [flags]: Access to specific code flags.
+ inline Kind kind();
+ inline InlineCacheState state(); // only valid for IC stubs
+ inline PropertyType type(); // only valid for monomorphic IC stubs
+ inline int arguments_count(); // only valid for call IC stubs
+ inline CodeStub::Major major_key(); // only valid for kind STUB
+
+ // Testers for IC stub kinds.
+ inline bool is_inline_cache_stub();
+ inline bool is_load_stub() { return kind() == LOAD_IC; }
+ inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
+ inline bool is_store_stub() { return kind() == STORE_IC; }
+ inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
+ inline bool is_call_stub() { return kind() == CALL_IC; }
+
+ // [ic_flag]: State of inline cache targets. The flag is set to the
+ // object variant in ConvertICTargetsFromAddressToObject, and set to
+ // the address variant in ConvertICTargetsFromObjectToAddress.
+ inline ICTargetState ic_flag();
+ inline void set_ic_flag(ICTargetState value);
+
+ // Flags operations.
+ static inline Flags ComputeFlags(Kind kind,
+ InlineCacheState state = UNINITIALIZED,
+ PropertyType type = NORMAL,
+ int argc = -1);
+
+ static inline Flags ComputeMonomorphicFlags(Kind kind,
+ PropertyType type,
+ int argc = -1);
+
+ static inline Kind ExtractKindFromFlags(Flags flags);
+ static inline InlineCacheState ExtractStateFromFlags(Flags flags);
+ static inline PropertyType ExtractTypeFromFlags(Flags flags);
+ static inline int ExtractArgumentsCountFromFlags(Flags flags);
+ static inline Flags RemoveTypeFromFlags(Flags flags);
+
+
+ // Returns the address of the first instruction.
+ inline byte* instruction_start();
+
+ // Returns the size of the instructions, padding, and relocation information.
+ inline int body_size();
+
+ // Returns the address of the first relocation info (read backwards!).
+ inline byte* relocation_start();
+
+ // Code entry point.
+ inline byte* entry();
+
+ // Returns true if pc is inside this object's instructions.
+ inline bool contains(byte* pc);
+
+ // Returns the adddress of the scope information.
+ inline byte* sinfo_start();
+
+ // Convert inline cache target from address to code object before GC.
+ void ConvertICTargetsFromAddressToObject();
+
+ // Convert inline cache target from code object to address after GC
+ void ConvertICTargetsFromObjectToAddress();
+
+ // Relocate the code by delta bytes. Called to signal that this code
+ // object has been moved by delta bytes.
+ void Relocate(int delta);
+
+ // Migrate code described by desc.
+ void CopyFrom(const CodeDesc& desc);
+
+ // Returns the object size for a given body and sinfo size (Used for
+ // allocation).
+ static int SizeFor(int body_size, int sinfo_size) {
+ ASSERT_SIZE_TAG_ALIGNED(body_size);
+ ASSERT_SIZE_TAG_ALIGNED(sinfo_size);
+ return kHeaderSize + body_size + sinfo_size;
+ }
+
+ // Locating source position.
+ int SourcePosition(Address pc);
+ int SourceStatementPosition(Address pc);
+
+ // Casting.
+ static inline Code* cast(Object* obj);
+
+ // Dispatched behavior.
+ int CodeSize() { return SizeFor(body_size(), sinfo_size()); }
+ void CodeIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+ void CodePrint();
+ void CodeVerify();
+#endif
+
+ // Layout description.
+ static const int kInstructionSizeOffset = HeapObject::kSize;
+ static const int kRelocationSizeOffset = kInstructionSizeOffset + kIntSize;
+ static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
+ static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
+ static const int kICFlagOffset = kFlagsOffset + kIntSize;
+ static const int kHeaderSize = kICFlagOffset + kIntSize;
+
+ // Flags layout.
+ static const int kFlagsStateShift = 0;
+ static const int kFlagsKindShift = 3;
+ static const int kFlagsTypeShift = 6;
+ static const int kFlagsArgumentsCountShift = 9;
+
+ static const int kFlagsStateMask = 0x00000007; // 000000111
+ static const int kFlagsKindMask = 0x00000038; // 000111000
+ static const int kFlagsTypeMask = 0x000001C0; // 111000000
+ static const int kFlagsArgumentsCountMask = 0xFFFFFE00;
+
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
+};
+
+
+// All heap objects have a Map that describes their structure.
+// A Map contains information about:
+// - Size information about the object
+// - How to iterate over an object (for garbage collection)
+class Map: public HeapObject {
+ public:
+ // instance size.
+ inline int instance_size();
+ inline void set_instance_size(int value);
+
+ // instance type.
+ inline InstanceType instance_type();
+ inline void set_instance_type(InstanceType value);
+
+ // tells how many unused property fields are available in the instance.
+ // (only used for JSObject in fast mode).
+ inline int unused_property_fields();
+ inline void set_unused_property_fields(int value);
+
+ // bit field.
+ inline byte bit_field();
+ inline void set_bit_field(byte value);
+
+ // Tells whether this object has a special lookup behavior.
+ void set_special_lookup() {
+ set_bit_field(bit_field() | (1 << kHasSpecialLookup));
+ }
+
+ bool has_special_lookup() {
+ return ((1 << kHasSpecialLookup) & bit_field()) != 0;
+ }
+
+ // Tells whether the object in the prototype property will be used
+ // for instances created from this function. If the prototype
+ // property is set to a value that is not a JSObject, the prototype
+ // property will not be used to create instances of the function.
+ // See ECMA-262, 13.2.2.
+ inline void set_non_instance_prototype(bool value);
+ inline bool has_non_instance_prototype();
+
+ // Tells whether the instance with this map should be ignored by the
+ // __proto__ accessor.
+ inline void set_is_hidden_prototype() {
+ set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
+ }
+
+ inline bool is_hidden_prototype() {
+ return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
+ }
+
+ // Tells whether the instance has a named interceptor.
+ inline void set_has_named_interceptor() {
+ set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
+ }
+
+ inline bool has_named_interceptor() {
+ return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
+ }
+
+ // Tells whether the instance has a named interceptor.
+ inline void set_has_indexed_interceptor() {
+ set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
+ }
+
+ inline bool has_indexed_interceptor() {
+ return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
+ }
+
+ // Tells whether the instance is undetectable.
+ // An undetectable object is a special class of JSObject: 'typeof' operator
+ // returns undefined, ToBoolean returns false. Otherwise it behaves like
+ // a normal JS object. It is useful for implementing undetectable
+ // document.all in Firefox & Safari.
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
+ inline void set_is_undetectable() {
+ set_bit_field(bit_field() | (1 << kIsUndetectable));
+ }
+
+ inline bool is_undetectable() {
+ return ((1 << kIsUndetectable) & bit_field()) != 0;
+ }
+
+ // Tells whether the instance has a call-as-function handler.
+ inline void set_has_instance_call_handler() {
+ set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
+ }
+
+ inline bool has_instance_call_handler() {
+ return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
+ }
+
+ // Tells whether the instance needs security checks when accessing its
+ // properties.
+ inline void set_needs_access_check() {
+ set_bit_field(bit_field() | (1 << kNeedsAccessCheck));
+ }
+
+ inline bool needs_access_check() {
+ return ((1 << kNeedsAccessCheck) & bit_field()) != 0;
+ }
+
+ // [prototype]: implicit prototype object.
+ DECL_ACCESSORS(prototype, Object)
+
+ // [constructor]: points back to the function responsible for this map.
+ DECL_ACCESSORS(constructor, Object)
+
+ // [instance descriptors]: describes the object.
+ DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+
+ // [stub cache]: contains stubs compiled for this map.
+ DECL_ACCESSORS(code_cache, FixedArray)
+
+ // Returns a copy of the map.
+ Object* Copy();
+
+ // Returns the property index for name (only valid for FAST MODE).
+ int PropertyIndexFor(String* name);
+
+ // Returns the next free property index (only valid for FAST MODE).
+ int NextFreePropertyIndex();
+
+ // Returns the number of properties described in instance_descriptors.
+ int NumberOfDescribedProperties();
+
+ // Casting.
+ static inline Map* cast(Object* obj);
+
+ // Locate an accessor in the instance descriptor.
+ AccessorDescriptor* FindAccessor(String* name);
+
+ // Make sure the instance descriptor has no map transitions
+ Object* EnsureNoMapTransitions();
+
+ // Code cache operations.
+
+ // Clears the code cache.
+ inline void ClearCodeCache();
+
+ // Update code cache.
+ Object* UpdateCodeCache(String* name, Code* code);
+
+ // Returns the found code or undefined if absent.
+ Object* FindInCodeCache(String* name, Code::Flags flags);
+
+ // Tells whether code is in the code cache.
+ bool IncludedInCodeCache(Code* code);
+
+ // Dispatched behavior.
+ void MapIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+ void MapPrint();
+ void MapVerify();
+#endif
+
+ // Layout description.
+ static const int kInstanceAttributesOffset = HeapObject::kSize;
+ static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
+ static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
+ static const int kInstanceDescriptorsOffset =
+ kConstructorOffset + kPointerSize;
+ static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
+ static const int kSize = kCodeCacheOffset + kIntSize;
+
+ // Byte offsets within kInstanceAttributesOffset attributes.
+ static const int kInstanceSizeOffset = kInstanceAttributesOffset + 0;
+ static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
+ static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 2;
+ static const int kBitFieldOffset = kInstanceAttributesOffset + 3;
+
+ // Bit positions for bit field.
+ static const int kHasSpecialLookup = 0;
+ static const int kHasNonInstancePrototype = 1;
+ static const int kIsHiddenPrototype = 2;
+ static const int kHasNamedInterceptor = 3;
+ static const int kHasIndexedInterceptor = 4;
+ static const int kIsUndetectable = 5;
+ static const int kHasInstanceCallHandler = 6;
+ static const int kNeedsAccessCheck = 7;
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
+};
+
+
+// An abstract superclass, a marker class really, for simple structure classes.
+// It doesn't carry much functionality but allows struct classes to me
+// identified in the type system.
+class Struct: public HeapObject {
+ public:
+ inline void InitializeBody(int object_size);
+ static inline Struct* cast(Object* that);
+};
+
+
+// Script types.
+enum ScriptType {
+ SCRIPT_TYPE_NATIVE,
+ SCRIPT_TYPE_EXTENSION,
+ SCRIPT_TYPE_NORMAL
+};
+
+
+// Script describes a script which has beed added to the VM.
+class Script: public Struct {
+ public:
+ // [source]: the script source.
+ DECL_ACCESSORS(source, Object)
+
+ // [name]: the script name.
+ DECL_ACCESSORS(name, Object)
+
+ // [line_offset]: script line offset in resource from where it was extracted.
+ DECL_ACCESSORS(line_offset, Smi)
+
+ // [column_offset]: script column offset in resource from where it was
+ // extracted.
+ DECL_ACCESSORS(column_offset, Smi)
+
+ // [wrapper]: the wrapper cache.
+ DECL_ACCESSORS(wrapper, Proxy)
+
+ // [type]: the script type.
+ DECL_ACCESSORS(type, Smi)
+
+ static inline Script* cast(Object* obj);
+
+#ifdef DEBUG
+ void ScriptPrint();
+ void ScriptVerify();
+#endif
+
+ static const int kSourceOffset = HeapObject::kSize;
+ static const int kNameOffset = kSourceOffset + kPointerSize;
+ static const int kLineOffsetOffset = kNameOffset + kPointerSize;
+ static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
+ static const int kWrapperOffset = kColumnOffsetOffset + kPointerSize;
+ static const int kTypeOffset = kWrapperOffset + kPointerSize;
+ static const int kSize = kTypeOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
+};
+
+
+// SharedFunctionInfo describes the JSFunction information that can be
+// shared by multiple instances of the function.
+class SharedFunctionInfo: public HeapObject {
+ public:
+ // [name]: Function name.
+ DECL_ACCESSORS(name, Object)
+
+ // [code]: Function code.
+ DECL_ACCESSORS(code, Code)
+
+ // Returns if this function has been compiled to native code yet.
+ inline bool is_compiled();
+
+ // [length]: The function length - usually the number of declared parameters.
+ // Use up to 2^30 parameters.
+ inline int length();
+ inline void set_length(int value);
+
+ // [formal parameter count]: The declared number of parameters.
+ inline int formal_parameter_count();
+ inline void set_formal_parameter_count(int value);
+
+ // [expected_nof_properties]: Expected number of properties for the function.
+ inline int expected_nof_properties();
+ inline void set_expected_nof_properties(int value);
+
+ // [instance class name]: class name for instances.
+ DECL_ACCESSORS(instance_class_name, Object)
+
+ // [function data]: This field has been added for make benefit the API.
+ // In the long run we don't want all functions to have this field but
+ // we can fix that when we have a better model for storing hidden data
+ // on objects.
+ DECL_ACCESSORS(function_data, Object)
+
+ // [lazy load data]: If the function has lazy loading, this field
+ // contains contexts and other data needed to load it.
+ DECL_ACCESSORS(lazy_load_data, Object)
+
+ // [script info]: Script from which the function originates.
+ DECL_ACCESSORS(script, Object)
+
+ // [start_position_and_type]: Field used to store both the source code
+ // position, whether or not the function is a function expression,
+ // and whether or not the function is a toplevel function. The two
+ // least significants bit indicates whether the function is an
+ // expression and the rest contains the source code position.
+ inline int start_position_and_type();
+ inline void set_start_position_and_type(int value);
+
+ // [debug info]: Debug information.
+ DECL_ACCESSORS(debug_info, Object)
+
+ // Position of the 'function' token in the script source.
+ inline int function_token_position();
+ inline void set_function_token_position(int function_token_position);
+
+ // Position of this function in the script source.
+ inline int start_position();
+ inline void set_start_position(int start_position);
+
+ // End position of this function in the script source.
+ inline int end_position();
+ inline void set_end_position(int end_position);
+
+ // Is this function a function expression in the source code.
+ inline bool is_expression();
+ inline void set_is_expression(bool value);
+
+ // Is this function a top-level function. Used for accessing the
+ // caller of functions. Top-level functions (scripts, evals) are
+ // returned as null; see JSFunction::GetCallerAccessor(...).
+ inline bool is_toplevel();
+ inline void set_is_toplevel(bool value);
+
+ // [source code]: Source code for the function.
+ bool HasSourceCode();
+ Object* GetSourceCode();
+
+ // Dispatched behavior.
+ void SharedFunctionInfoIterateBody(ObjectVisitor* v);
+ // Set max_length to -1 for unlimited length.
+ void SourceCodePrint(StringStream* accumulator, int max_length);
+#ifdef DEBUG
+ void SharedFunctionInfoPrint();
+ void SharedFunctionInfoVerify();
+#endif
+
+ // Casting.
+ static inline SharedFunctionInfo* cast(Object* obj);
+
+ // Layout description.
+ static const int kNameOffset = HeapObject::kSize;
+ static const int kCodeOffset = kNameOffset + kPointerSize;
+ static const int kLengthOffset = kCodeOffset + kPointerSize;
+ static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
+ static const int kExpectedNofPropertiesOffset =
+ kFormalParameterCountOffset + kIntSize;
+ static const int kInstanceClassNameOffset =
+ kExpectedNofPropertiesOffset + kIntSize;
+ static const int kExternalReferenceDataOffset =
+ kInstanceClassNameOffset + kPointerSize;
+ static const int kLazyLoadDataOffset =
+ kExternalReferenceDataOffset + kPointerSize;
+ static const int kScriptOffset = kLazyLoadDataOffset + kPointerSize;
+ static const int kStartPositionAndTypeOffset = kScriptOffset + kPointerSize;
+ static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
+ static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
+ static const int kDebugInfoOffset = kFunctionTokenPositionOffset + kIntSize;
+ static const int kAccessAttributesOffset = kDebugInfoOffset + kPointerSize;
+ static const int kSize = kAccessAttributesOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
+
+ // Bit positions in length_and_flg.
+ // The least significant bit is used as the flag.
+ static const int kFlagBit = 0;
+ static const int kLengthShift = 1;
+ static const int kLengthMask = ~((1 << kLengthShift) - 1);
+
+ // Bit positions in start_position_and_type.
+ // The source code start position is in the 30 most significant bits of
+ // the start_position_and_type field.
+ static const int kIsExpressionBit = 0;
+ static const int kIsTopLevelBit = 1;
+ static const int kStartPositionShift = 2;
+ static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
+};
+
+
+// JSFunction describes JavaScript functions.
+class JSFunction: public JSObject {
+ public:
+ // [prototype_or_initial_map]:
+ DECL_ACCESSORS(prototype_or_initial_map, Object)
+
+ // [shared_function_info]: The information about the function that
+ // can be shared by instances.
+ DECL_ACCESSORS(shared, SharedFunctionInfo)
+
+ // [context]: The context for this function.
+ inline Context* context();
+ inline Object* unchecked_context();
+ inline void set_context(Object* context);
+
+ // [code]: The generated code object for this function. Executed
+ // when the function is invoked, e.g. foo() or new foo(). See
+ // [[Call]] and [[Construct]] description in ECMA-262, section
+ // 8.6.2, page 27.
+ inline Code* code();
+ inline void set_code(Code* value);
+
+ // Tells whether this function is a context-independent boilerplate
+ // function.
+ inline bool IsBoilerplate();
+
+ // Tells whether this function needs to be loaded.
+ inline bool IsLoaded();
+
+ // [literals]: Fixed array holding the materialized literals.
+ DECL_ACCESSORS(literals, FixedArray)
+
+ // The initial map for an object created by this constructor.
+ inline Map* initial_map();
+ inline void set_initial_map(Map* value);
+ inline bool has_initial_map();
+
+ // Get and set the prototype property on a JSFunction. If the
+ // function has an initial map the prototype is set on the initial
+ // map. Otherwise, the prototype is put in the initial map field
+ // until an initial map is needed.
+ inline bool has_prototype();
+ inline bool has_instance_prototype();
+ inline Object* prototype();
+ inline Object* instance_prototype();
+ Object* SetInstancePrototype(Object* value);
+ Object* SetPrototype(Object* value);
+
+ // Accessor for this function's initial map's [[class]]
+ // property. This is primarily used by ECMA native functions. This
+ // method sets the class_name field of this function's initial map
+ // to a given value. It creates an initial map if this function does
+ // not have one. Note that this method does not copy the initial map
+ // if it has one already, but simply replaces it with the new value.
+ // Instances created afterwards will have a map whose [[class]] is
+ // set to 'value', but there is no guarantees on instances created
+ // before.
+ Object* SetInstanceClassName(String* name);
+
+ // Returns if this function has been compiled to native code yet.
+ inline bool is_compiled();
+
+ // Casting.
+ static inline JSFunction* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef DEBUG
+ void JSFunctionPrint();
+ void JSFunctionVerify();
+#endif
+
+ // Returns the number of allocated literals.
+ int NumberOfLiterals();
+
+ // Layout descriptors.
+ static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
+ static const int kSharedFunctionInfoOffset =
+ kPrototypeOrInitialMapOffset + kPointerSize;
+ static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
+ static const int kLiteralsOffset = kContextOffset + kPointerSize;
+ static const int kSize = kLiteralsOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
+};
+
+
+// Forward declaration.
+class JSBuiltinsObject;
+
+// Common super class for JavaScript global objects and the special
+// builtins global objects.
+class GlobalObject: public JSObject {
+ public:
+ // [builtins]: the object holding the runtime routines written in JS.
+ DECL_ACCESSORS(builtins, JSBuiltinsObject)
+
+ // [global context]: the global context corresponding to this global objet.
+ DECL_ACCESSORS(global_context, Context)
+
+ // Layout description.
+ static const int kBuiltinsOffset = JSObject::kHeaderSize;
+ static const int kGlobalContextOffset = kBuiltinsOffset + kPointerSize;
+ static const int kHeaderSize = kGlobalContextOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
+ friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+};
+
+
+// JavaScript global object.
+class JSGlobalObject: public GlobalObject {
+ public:
+ // [security token]: the object being used for security check when accessing
+ // global properties.
+ DECL_ACCESSORS(security_token, Object)
+
+ // Casting.
+ static inline JSGlobalObject* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef DEBUG
+ void JSGlobalObjectPrint();
+ void JSGlobalObjectVerify();
+#endif
+
+ // Layout description.
+ static const int kSecurityTokenOffset = GlobalObject::kHeaderSize;
+ static const int kSize = kSecurityTokenOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
+};
+
+
+// Builtins global object which holds the runtime routines written in
+// JavaScript.
+class JSBuiltinsObject: public GlobalObject {
+ public:
+ // Accessors for the runtime routines written in JavaScript.
+ inline Object* javascript_builtin(Builtins::JavaScript id);
+ inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
+
+ // Casting.
+ static inline JSBuiltinsObject* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef DEBUG
+ void JSBuiltinsObjectPrint();
+ void JSBuiltinsObjectVerify();
+#endif
+
+ // Layout description. The size of the builtins object includes
+ // room for one pointer per runtime routine written in javascript.
+ static const int kJSBuiltinsCount = Builtins::id_count;
+ static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
+ static const int kSize =
+ kJSBuiltinsOffset + (kJSBuiltinsCount * kPointerSize);
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
+};
+
+
+// Representation for JS Wrapper objects, String, Number, Boolean, Date, etc.
+class JSValue: public JSObject {
+ public:
+ // [value]: the object being wrapped.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ static inline JSValue* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef DEBUG
+ void JSValuePrint();
+ void JSValueVerify();
+#endif
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
+};
+
+
+enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
+enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
+
+
+// The String abstract class captures JavaScript string values:
+//
+// Ecma-262:
+// 4.3.16 String Value
+// A string value is a member of the type String and is a finite
+// ordered sequence of zero or more 16-bit unsigned integer values.
+//
+// All string values have a length field.
+class String: public HeapObject {
+ public:
+ // Get and set the length of the string.
+ inline int length();
+ inline void set_length(int value);
+
+ // Get and set the uninterpreted length field of the string. Notice
+ // that the length field is also used to cache the hash value of
+ // strings. In order to get or set the actual length of the string
+ // use the length() and set_length methods.
+ inline int length_field();
+ inline void set_length_field(int value);
+
+ // Get and set individual two byte chars in the string.
+ inline void Set(int index, uint16_t value);
+ // Get individual two byte char in the string. Repeated calls
+ // to this method are not efficient unless the string is flat.
+ inline uint16_t Get(int index);
+
+ // Flatten the top level ConsString that is hiding behind this
+ // string. This is a no-op unless the string is a ConsString or a
+ // SlicedString. Flatten mutates the ConsString and might return a
+ // failure.
+ Object* Flatten();
+ // Try to flatten the string. Do not allow handling of allocation
+ // failures. After calling TryFlatten, the string could still be a
+ // ConsString.
+ inline void TryFlatten();
+
+ // Is this string an ascii string.
+ inline bool IsAscii();
+
+ // Fast testing routines that assume the receiver is a string and
+ // just check whether it is a certain kind of string.
+ inline bool StringIsSlicedString();
+ inline bool StringIsConsString();
+
+ // Mark the string as an undetectable object. It only applies to
+ // ascii and two byte string types.
+ bool MarkAsUndetectable();
+
+ // Slice the string and return a substring.
+ Object* Slice(int from, int to);
+
+ // String equality operations.
+ inline bool Equals(String* other);
+ bool IsEqualTo(Vector<const char> str);
+
+ // Return a UTF8 representation of the string. The string is null
+ // terminated but may optionally contain nulls. Length is returned
+ // in length_output if length_output is not a null pointer The string
+ // should be nearly flat, otherwise the performance of this method may
+ // be very slow (quadratic in the length). Setting robustness_flag to
+ // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
+ // handles unexpected data without causing assert failures and it does not
+ // do any heap allocations. This is useful when printing stack traces.
+ SmartPointer<char> ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robustness_flag,
+ int offset,
+ int length,
+ int* length_output = 0);
+ SmartPointer<char> ToCString(
+ AllowNullsFlag allow_nulls = DISALLOW_NULLS,
+ RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
+ int* length_output = 0);
+
+ // Return a 16 bit Unicode representation of the string.
+ // The string should be nearly flat, otherwise the performance of
+ // of this method may be very bad. Setting robustness_flag to
+ // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
+ // handles unexpected data without causing assert failures and it does not
+ // do any heap allocations. This is useful when printing stack traces.
+ uc16* ToWideCString(RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
+
+ // Tells whether the hash code has been computed.
+ inline bool HasHashCode();
+
+ // Returns a hash value used for the property table
+ inline uint32_t Hash();
+
+ static uint32_t ComputeHashCode(unibrow::CharacterStream* buffer, int length);
+ static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
+ uint32_t* index,
+ int length);
+
+ // Conversion.
+ inline bool AsArrayIndex(uint32_t* index);
+
+ // Casting.
+ static inline String* cast(Object* obj);
+
+ void PrintOn(FILE* out);
+
+ // Get the size tag.
+ inline uint32_t size_tag();
+ static inline uint32_t map_size_tag(Map* map);
+
+ // True if the string is a symbol.
+ inline bool is_symbol();
+ static inline bool is_symbol_map(Map* map);
+
+ // True if the string is ASCII.
+ inline bool is_ascii();
+ static inline bool is_ascii_map(Map* map);
+
+ // Get the representation tag.
+ inline StringRepresentationTag representation_tag();
+ static inline StringRepresentationTag map_representation_tag(Map* map);
+
+ // For use during stack traces. Performs rudimentary sanity check.
+ bool LooksValid();
+
+ // Dispatched behavior.
+ void StringShortPrint(StringStream* accumulator);
+#ifdef DEBUG
+ void StringPrint();
+ void StringVerify();
+#endif
+ inline bool IsFlat();
+
+ // Layout description.
+ static const int kLengthOffset = HeapObject::kSize;
+ static const int kSize = kLengthOffset + kIntSize;
+
+ // Limits on sizes of different types of strings.
+ static const int kMaxShortStringSize = 255;
+ static const int kMaxMediumStringSize = 65535;
+
+ // Max ascii char code.
+ static const int kMaxAsciiCharCode = 127;
+
+ // Shift constants for retriving length from length/hash field.
+ static const int kShortLengthShift = 3 * kBitsPerByte;
+ static const int kMediumLengthShift = 2 * kBitsPerByte;
+ static const int kLongLengthShift = 2;
+
+ // Mask constant for checking if a string has a computed hash code
+ // and if it is an array index. The least significant bit indicates
+ // whether a hash code has been computed. If the hash code has been
+ // computed the 2nd bit tells whether the string can be used as an
+ // array index.
+ static const int kHashComputedMask = 1;
+ static const int kIsArrayIndexMask = 1 << 1;
+
+ // Support for regular expressions.
+ const uc16* GetTwoByteData();
+ const uc16* GetTwoByteData(unsigned start);
+
+ // Support for StringInputBuffer
+ static const unibrow::byte* ReadBlock(String* input,
+ unibrow::byte* util_buffer,
+ unsigned capacity,
+ unsigned* remaining,
+ unsigned* offset);
+ static const unibrow::byte* ReadBlock(String** input,
+ unibrow::byte* util_buffer,
+ unsigned capacity,
+ unsigned* remaining,
+ unsigned* offset);
+
+ // Helper function for flattening strings.
+ static void Flatten(String* source,
+ String* sink,
+ int from,
+ int to,
+ int sink_offset);
+
+ protected:
+ class ReadBlockBuffer {
+ public:
+ ReadBlockBuffer(unibrow::byte* util_buffer_,
+ unsigned cursor_,
+ unsigned capacity_,
+ unsigned remaining_) :
+ util_buffer(util_buffer_),
+ cursor(cursor_),
+ capacity(capacity_),
+ remaining(remaining_) {
+ }
+ unibrow::byte* util_buffer;
+ unsigned cursor;
+ unsigned capacity;
+ unsigned remaining;
+ };
+
+ // NOTE: If you call StringInputBuffer routines on strings that are
+ // too deeply nested trees of cons and slice strings, then this
+ // routine will overflow the stack. Strings that are merely deeply
+ // nested trees of cons strings do not have a problem apart from
+ // performance.
+
+ static inline const unibrow::byte* ReadBlock(String* input,
+ ReadBlockBuffer* buffer,
+ unsigned* offset,
+ unsigned max_chars);
+ static void ReadBlockIntoBuffer(String* input,
+ ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned max_chars);
+
+ private:
+ // Slow case of String::Equals. This implementation works on any strings
+ // but it is most efficient on strings that are almost flat.
+ bool SlowEquals(String* other);
+
+ // Slow case of AsArrayIndex.
+ bool SlowAsArrayIndex(uint32_t* index);
+
+ // Compute and set the hash code.
+ uint32_t ComputeAndSetHash();
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(String);
+};
+
+
+// The SeqString abstract class captures sequential string values.
+class SeqString: public String {
+ public:
+
+ // Casting.
+ static inline SeqString* cast(Object* obj);
+
+ // Dispatched behaviour.
+ // For regexp code.
+ uint16_t* SeqStringGetTwoByteAddress();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
+};
+
+
+// The AsciiString class captures sequential ascii string objects.
+// Each character in the AsciiString is an ascii character.
+class AsciiString: public SeqString {
+ public:
+ // Dispatched behavior.
+ inline uint16_t AsciiStringGet(int index);
+ inline void AsciiStringSet(int index, uint16_t value);
+
+ // Get the address of the characters in this string.
+ inline Address GetCharsAddress();
+
+ // Casting
+ static inline AsciiString* cast(Object* obj);
+
+ // Garbage collection support. This method is called by the
+ // garbage collector to compute the actual size of an AsciiString
+ // instance.
+ inline int AsciiStringSize(Map* map);
+
+ // Computes the size for an AsciiString instance of a given length.
+ static int SizeFor(int length) {
+ return kHeaderSize + OBJECT_SIZE_ALIGN(length * kCharSize);
+ }
+
+ // Layout description.
+ static const int kHeaderSize = String::kSize;
+
+ // Support for StringInputBuffer.
+ inline void AsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset,
+ unsigned chars);
+ inline const unibrow::byte* AsciiStringReadBlock(unsigned* remaining,
+ unsigned* offset,
+ unsigned chars);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsciiString);
+};
+
+
+// The TwoByteString class captures sequential unicode string objects.
+// Each character in the TwoByteString is a two-byte uint16_t.
+class TwoByteString: public SeqString {
+ public:
+ // Dispatched behavior.
+ inline uint16_t TwoByteStringGet(int index);
+ inline void TwoByteStringSet(int index, uint16_t value);
+
+ // For regexp code.
+ const uint16_t* TwoByteStringGetData(unsigned start);
+
+ // Casting
+ static inline TwoByteString* cast(Object* obj);
+
+ // Garbage collection support. This method is called by the
+ // garbage collector to compute the actual size of a TwoByteString
+ // instance.
+ inline int TwoByteStringSize(Map* map);
+
+ // Computes the size for a TwoByteString instance of a given length.
+ static int SizeFor(int length) {
+ return kHeaderSize + OBJECT_SIZE_ALIGN(length * kShortSize);
+ }
+
+ // Layout description.
+ static const int kHeaderSize = String::kSize;
+
+ // Support for StringInputBuffer.
+ inline void TwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TwoByteString);
+};
+
+
+// The ConsString class describes string values built by using the
+// addition operator on strings. A ConsString is a pair where the
+// first and second components are pointers to other string values.
+// One or both components of a ConsString can be pointers to other
+// ConsStrings, creating a binary tree of ConsStrings where the leaves
+// are non-ConsString string values. The string value represented by
+// a ConsString can be obtained by concatenating the leaf string
+// values in a left-to-right depth-first traversal of the tree.
+class ConsString: public String {
+ public:
+ // First object of the cons cell.
+ inline Object* first();
+ inline void set_first(Object* first);
+
+ // Second object of the cons cell.
+ inline Object* second();
+ inline void set_second(Object* second);
+
+ // Dispatched behavior.
+ uint16_t ConsStringGet(int index);
+
+ // Casting.
+ static inline ConsString* cast(Object* obj);
+
+ // Garbage collection support. This method is called during garbage
+ // collection to iterate through the heap pointers in the body of
+ // the ConsString.
+ void ConsStringIterateBody(ObjectVisitor* v);
+
+ // Layout description.
+ static const int kFirstOffset = String::kSize;
+ static const int kSecondOffset = kFirstOffset + kPointerSize;
+ static const int kSize = kSecondOffset + kPointerSize;
+
+ // Support for StringInputBuffer.
+ inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
+ inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
+
+
+ // Minimum lenth for a cons string.
+ static const int kMinLength = 13;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
+};
+
+
+// The SlicedString class describes string values that are slices of
+// some other string. SlicedStrings consist of a reference to an
+// underlying heap-allocated string value, a start index, and the
+// length field common to all strings.
+class SlicedString: public String {
+ public:
+ // The underlying string buffer.
+ inline Object* buffer();
+ inline void set_buffer(Object* buffer);
+
+ // The start index of the slice.
+ inline int start();
+ inline void set_start(int start);
+
+ // Dispatched behavior.
+ uint16_t SlicedStringGet(int index);
+
+ // Flatten any ConsString hiding behind this SlicedString.
+ Object* SlicedStringFlatten();
+
+ // Casting.
+ static inline SlicedString* cast(Object* obj);
+
+ // Garbage collection support.
+ void SlicedStringIterateBody(ObjectVisitor* v);
+
+ // Layout description
+ static const int kBufferOffset = String::kSize;
+ static const int kStartOffset = kBufferOffset + kPointerSize;
+ static const int kSize = kStartOffset + kIntSize;
+
+ // Support for StringInputBuffer.
+ inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
+ inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
+
+ // Minimum lenth for a sliced string.
+ static const int kMinLength = 13;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
+};
+
+
+// The ExternalString class describes string values that are backed by
+// a string resource that lies outside the V8 heap. ExternalStrings
+// consist of the length field common to all strings, a pointer to the
+// external resource. It is important to ensure (externally) that the
+// resource is not deallocated while the ExternalString is live in the
+// V8 heap.
+//
+// The API expects that all ExternalStrings are created through the
+// API. Therefore, ExternalStrings should not be used internally.
+class ExternalString: public String {
+ public:
+ // Casting
+ static inline ExternalString* cast(Object* obj);
+
+ // Layout description.
+ static const int kResourceOffset = String::kSize;
+ static const int kSize = kResourceOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
+};
+
+
+// The ExternalAsciiString class is an external string backed by an
+// ASCII string.
+class ExternalAsciiString: public ExternalString {
+ public:
+ typedef v8::String::ExternalAsciiStringResource Resource;
+
+ // The underlying resource.
+ inline Resource* resource();
+ inline void set_resource(Resource* buffer);
+
+ // Dispatched behavior.
+ uint16_t ExternalAsciiStringGet(int index);
+
+ // Casting.
+ static inline ExternalAsciiString* cast(Object* obj);
+
+ // Support for StringInputBuffer.
+ const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
+ unsigned* offset,
+ unsigned chars);
+ inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset,
+ unsigned chars);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
+};
+
+
+// The ExternalTwoByteString class is an external string backed by a UTF-16
+// encoded string.
+class ExternalTwoByteString: public ExternalString {
+ public:
+ typedef v8::String::ExternalStringResource Resource;
+
+ // The underlying string resource.
+ inline Resource* resource();
+ inline void set_resource(Resource* buffer);
+
+ // Dispatched behavior.
+ uint16_t ExternalTwoByteStringGet(int index);
+
+ // For regexp code.
+ const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+
+ // Casting.
+ static inline ExternalTwoByteString* cast(Object* obj);
+
+ // Support for StringInputBuffer.
+ void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
+};
+
+
+// Note that StringInputBuffers are not valid across a GC! To fix this
+// it would have to store a String Handle instead of a String* and
+// AsciiStringReadBlock would have to be modified to use memcpy.
+//
+// StringInputBuffer is able to traverse any string regardless of how
+// deeply nested a sequence of ConsStrings it is made of. However,
+// performance will be better if deep strings are flattened before they
+// are traversed. Since flattening requires memory allocation this is
+// not always desirable, however (esp. in debugging situations).
+class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
+ public:
+ virtual void Seek(unsigned pos);
+ inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
+ inline StringInputBuffer(String* backing):
+ unibrow::InputBuffer<String, String*, 1024>(backing) {}
+};
+
+
+class SafeStringInputBuffer
+ : public unibrow::InputBuffer<String, String**, 256> {
+ public:
+ virtual void Seek(unsigned pos);
+ inline SafeStringInputBuffer()
+ : unibrow::InputBuffer<String, String**, 256>() {}
+ inline SafeStringInputBuffer(String** backing)
+ : unibrow::InputBuffer<String, String**, 256>(backing) {}
+};
+
+
+// The Oddball describes objects null, undefined, true, and false.
+class Oddball: public HeapObject {
+ public:
+ // [to_string]: Cached to_string computed at startup.
+ DECL_ACCESSORS(to_string, String)
+
+ // [to_number]: Cached to_number computed at startup.
+ DECL_ACCESSORS(to_number, Object)
+
+ // Casting.
+ static inline Oddball* cast(Object* obj);
+
+ // Dispatched behavior.
+ void OddballIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+ void OddballVerify();
+#endif
+
+ // Initialize the fields.
+ Object* Initialize(const char* to_string, Object* to_number);
+
+ // Layout description.
+ static const int kToStringOffset = HeapObject::kSize;
+ static const int kToNumberOffset = kToStringOffset + kPointerSize;
+ static const int kSize = kToNumberOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
+};
+
+
+// Proxy describes objects pointing from JavaScript to C structures.
+class Proxy: public HeapObject {
+ public:
+ // [proxy]: field containing the address.
+ inline Address proxy();
+ inline void set_proxy(Address value);
+
+ // Casting.
+ static inline Proxy* cast(Object* obj);
+
+ // Dispatched behavior.
+ inline void ProxyIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+ void ProxyPrint();
+ void ProxyVerify();
+#endif
+
+ // Layout description.
+
+ static const int kProxyOffset = HeapObject::kSize;
+ static const int kSize = kProxyOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
+};
+
+
+// The JSArray describes JavaScript Arrays
+// Such an array can be in one of two modes:
+// - fast, backing storage is a FixedArray and length <= elements.length();
+// Please note: push and pop can be used to grow and shrink the array.
+// - slow, backing storage is a HashTable with numbers as keys.
+class JSArray: public JSObject {
+ public:
+ // [length]: The length property.
+ DECL_ACCESSORS(length, Object)
+
+ Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value);
+
+ // Initialize the array with the given capacity. The function may
+ // fail due to out-of-memory situations, but only if the requested
+ // capacity is non-zero.
+ Object* Initialize(int capacity);
+
+ // Set the content of the array to the content of storage.
+ void SetContent(FixedArray* storage);
+
+ // Support for sorting
+ Object* RemoveHoles();
+
+ // Casting.
+ static inline JSArray* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef DEBUG
+ void JSArrayPrint();
+ void JSArrayVerify();
+#endif
+
+ // Layout description.
+ static const int kLengthOffset = JSObject::kHeaderSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
+};
+
+
+// An accesor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// To allow shadow an accessor property, the accessor can
+// have READ_ONLY property attribute so that a new value
+// is added to the local object to shadow the accessor
+// in prototypes.
+class AccessorInfo: public Struct {
+ public:
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(name, Object)
+ DECL_ACCESSORS(flag, Smi)
+
+ inline bool all_can_read();
+ inline void set_all_can_read(bool value);
+
+ inline bool all_can_write();
+ inline void set_all_can_write(bool value);
+
+ inline PropertyAttributes property_attributes();
+ inline void set_property_attributes(PropertyAttributes attributes);
+
+ static inline AccessorInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void AccessorInfoPrint();
+ void AccessorInfoVerify();
+#endif
+
+ static const int kGetterOffset = HeapObject::kSize;
+ static const int kSetterOffset = kGetterOffset + kPointerSize;
+ static const int kDataOffset = kSetterOffset + kPointerSize;
+ static const int kNameOffset = kDataOffset + kPointerSize;
+ static const int kFlagOffset = kNameOffset + kPointerSize;
+ static const int kSize = kFlagOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
+
+ // Bit positions in flag.
+ static const int kAllCanReadBit = 0;
+ static const int kAllCanWriteBit = 1;
+ class AttributesField: public BitField<PropertyAttributes, 2, 3> {};
+};
+
+
+class AccessCheckInfo: public Struct {
+ public:
+ DECL_ACCESSORS(named_callback, Object)
+ DECL_ACCESSORS(indexed_callback, Object)
+ DECL_ACCESSORS(data, Object)
+
+ static inline AccessCheckInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void AccessCheckInfoPrint();
+ void AccessCheckInfoVerify();
+#endif
+
+ static const int kNamedCallbackOffset = HeapObject::kSize;
+ static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
+ static const int kDataOffset = kIndexedCallbackOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AccessCheckInfo);
+};
+
+
+class InterceptorInfo: public Struct {
+ public:
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(query, Object)
+ DECL_ACCESSORS(deleter, Object)
+ DECL_ACCESSORS(enumerator, Object)
+ DECL_ACCESSORS(data, Object)
+
+ static inline InterceptorInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void InterceptorInfoPrint();
+ void InterceptorInfoVerify();
+#endif
+
+ static const int kGetterOffset = HeapObject::kSize;
+ static const int kSetterOffset = kGetterOffset + kPointerSize;
+ static const int kQueryOffset = kSetterOffset + kPointerSize;
+ static const int kDeleterOffset = kQueryOffset + kPointerSize;
+ static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
+ static const int kDataOffset = kEnumeratorOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
+};
+
+
+class CallHandlerInfo: public Struct {
+ public:
+ DECL_ACCESSORS(callback, Object)
+ DECL_ACCESSORS(data, Object)
+
+ static inline CallHandlerInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void CallHandlerInfoPrint();
+ void CallHandlerInfoVerify();
+#endif
+
+ static const int kCallbackOffset = HeapObject::kSize;
+ static const int kDataOffset = kCallbackOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
+};
+
+
+class TemplateInfo: public Struct {
+ public:
+ DECL_ACCESSORS(tag, Object)
+ DECL_ACCESSORS(property_list, Object)
+
+#ifdef DEBUG
+ void TemplateInfoVerify();
+#endif
+
+ static const int kTagOffset = HeapObject::kSize;
+ static const int kPropertyListOffset = kTagOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyListOffset + kPointerSize;
+ protected:
+ friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
+};
+
+
+class FunctionTemplateInfo: public TemplateInfo {
+ public:
+ DECL_ACCESSORS(serial_number, Object)
+ DECL_ACCESSORS(call_code, Object)
+ DECL_ACCESSORS(internal_field_count, Object)
+ DECL_ACCESSORS(property_accessors, Object)
+ DECL_ACCESSORS(prototype_template, Object)
+ DECL_ACCESSORS(parent_template, Object)
+ DECL_ACCESSORS(named_property_handler, Object)
+ DECL_ACCESSORS(indexed_property_handler, Object)
+ DECL_ACCESSORS(instance_template, Object)
+ DECL_ACCESSORS(class_name, Object)
+ DECL_ACCESSORS(signature, Object)
+ DECL_ACCESSORS(lookup_callback, Object)
+ DECL_ACCESSORS(instance_call_handler, Object)
+ DECL_ACCESSORS(access_check_info, Object)
+ DECL_ACCESSORS(flag, Smi)
+
+ // Following properties use flag bits.
+ DECL_BOOLEAN_ACCESSORS(hidden_prototype)
+ DECL_BOOLEAN_ACCESSORS(undetectable)
+ // If the bit is set, object instances created by this function
+ // requires access check.
+ DECL_BOOLEAN_ACCESSORS(needs_access_check)
+
+ static inline FunctionTemplateInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void FunctionTemplateInfoPrint();
+ void FunctionTemplateInfoVerify();
+#endif
+
+ static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
+ static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
+ static const int kInternalFieldCountOffset = kCallCodeOffset + kPointerSize;
+ static const int kPropertyAccessorsOffset =
+ kInternalFieldCountOffset + kPointerSize;
+ static const int kPrototypeTemplateOffset =
+ kPropertyAccessorsOffset + kPointerSize;
+ static const int kParentTemplateOffset =
+ kPrototypeTemplateOffset + kPointerSize;
+ static const int kNamedPropertyHandlerOffset =
+ kParentTemplateOffset + kPointerSize;
+ static const int kIndexedPropertyHandlerOffset =
+ kNamedPropertyHandlerOffset + kPointerSize;
+ static const int kInstanceTemplateOffset =
+ kIndexedPropertyHandlerOffset + kPointerSize;
+ static const int kClassNameOffset = kInstanceTemplateOffset + kPointerSize;
+ static const int kSignatureOffset = kClassNameOffset + kPointerSize;
+ static const int kLookupCallbackOffset = kSignatureOffset + kPointerSize;
+ static const int kInstanceCallHandlerOffset =
+ kLookupCallbackOffset + kPointerSize;
+ static const int kAccessCheckInfoOffset =
+ kInstanceCallHandlerOffset + kPointerSize;
+ static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
+ static const int kSize = kFlagOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
+
+ // Bit position in the flag, from least significant bit position.
+ static const int kHiddenPrototypeBit = 0;
+ static const int kUndetectableBit = 1;
+ static const int kNeedsAccessCheckBit = 2;
+};
+
+
+class ObjectTemplateInfo: public TemplateInfo {
+ public:
+ DECL_ACCESSORS(constructor, Object)
+
+ static inline ObjectTemplateInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void ObjectTemplateInfoPrint();
+ void ObjectTemplateInfoVerify();
+#endif
+
+ static const int kConstructorOffset = TemplateInfo::kHeaderSize;
+ static const int kSize = kConstructorOffset + kHeaderSize;
+};
+
+
+class SignatureInfo: public Struct {
+ public:
+ DECL_ACCESSORS(receiver, Object)
+ DECL_ACCESSORS(args, Object)
+
+ static inline SignatureInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void SignatureInfoPrint();
+ void SignatureInfoVerify();
+#endif
+
+ static const int kReceiverOffset = Struct::kSize;
+ static const int kArgsOffset = kReceiverOffset + kPointerSize;
+ static const int kSize = kArgsOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SignatureInfo);
+};
+
+
+class TypeSwitchInfo: public Struct {
+ public:
+ DECL_ACCESSORS(types, Object)
+
+ static inline TypeSwitchInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void TypeSwitchInfoPrint();
+ void TypeSwitchInfoVerify();
+#endif
+
+ static const int kTypesOffset = Struct::kSize;
+ static const int kSize = kTypesOffset + kPointerSize;
+};
+
+
+// The DebugInfo class holds additional information for a function beeing
+// debugged.
+class DebugInfo: public Struct {
+ public:
+ // The shared function info for the source beeing debugged.
+ DECL_ACCESSORS(shared, SharedFunctionInfo)
+ // Code object for the original code.
+ DECL_ACCESSORS(original_code, Code)
+ // Code object for the patched code. This code object is the code object
+ // currently active for the function.
+ DECL_ACCESSORS(code, Code)
+ // Fixed array holding status information for each active break point.
+ DECL_ACCESSORS(break_points, FixedArray)
+
+ // Check if there is a break point at a code position.
+ bool HasBreakPoint(int code_position);
+ // Get the break point info object for a code position.
+ Object* GetBreakPointInfo(int code_position);
+ // Clear a break point.
+ static void ClearBreakPoint(Handle<DebugInfo> debug_info,
+ int code_position,
+ Handle<Object> break_point_object);
+ // Set a break point.
+ static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_position,
+ int source_position, int statement_position,
+ Handle<Object> break_point_object);
+ // Get the break point objects for a code position.
+ Object* GetBreakPointObjects(int code_position);
+ // Find the break point info holding this break point object.
+ static Object* FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ Handle<Object> break_point_object);
+ // Get the number of break points for this function.
+ int GetBreakPointCount();
+
+ static inline DebugInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void DebugInfoPrint();
+ void DebugInfoVerify();
+#endif
+
+ static const int kSharedFunctionInfoIndex = Struct::kSize;
+ static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
+ static const int kPatchedCodeIndex = kOriginalCodeIndex + kPointerSize;
+ static const int kActiveBreakPointsCountIndex =
+ kPatchedCodeIndex + kPointerSize;
+ static const int kBreakPointsStateIndex =
+ kActiveBreakPointsCountIndex + kPointerSize;
+ static const int kSize = kBreakPointsStateIndex + kPointerSize;
+
+ private:
+ static const int kNoBreakPointInfo = -1;
+
+ // Lookup the index in the break_points array for a code position.
+ int GetBreakPointInfoIndex(int code_position);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
+};
+
+
+// The BreakPointInfo class holds information for break points set in a
+// function. The DebugInfo object holds a BreakPointInfo object for each code
+// position with one or more break points.
+class BreakPointInfo: public Struct {
+ public:
+ // The position in the code for the break point.
+ DECL_ACCESSORS(code_position, Smi)
+ // The position in the source for the break position.
+ DECL_ACCESSORS(source_position, Smi)
+ // The position in the source for the last statement before this break
+ // position.
+ DECL_ACCESSORS(statement_position, Smi)
+ // List of related JavaScript break points.
+ DECL_ACCESSORS(break_point_objects, Object)
+
+ // Removes a break point.
+ static void ClearBreakPoint(Handle<BreakPointInfo> info,
+ Handle<Object> break_point_object);
+ // Set a break point.
+ static void SetBreakPoint(Handle<BreakPointInfo> info,
+ Handle<Object> break_point_object);
+ // Check if break point info has this break point object.
+ static bool HasBreakPointObject(Handle<BreakPointInfo> info,
+ Handle<Object> break_point_object);
+ // Get the number of break points for this code position.
+ int GetBreakPointCount();
+
+ static inline BreakPointInfo* cast(Object* obj);
+
+#ifdef DEBUG
+ void BreakPointInfoPrint();
+ void BreakPointInfoVerify();
+#endif
+
+ static const int kCodePositionIndex = Struct::kSize;
+ static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
+ static const int kStatementPositionIndex =
+ kSourcePositionIndex + kPointerSize;
+ static const int kBreakPointObjectsIndex =
+ kStatementPositionIndex + kPointerSize;
+ static const int kSize = kBreakPointObjectsIndex + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
+};
+
+
+#undef DECL_BOOLEAN_ACCESSORS
+#undef DECL_ACCESSORS
+
+
+// Abstract base class for visiting, and optionally modifying, the
+// pointers contained in Objects. Used in GC and serialization/deserialization.
+class ObjectVisitor BASE_EMBEDDED {
+ public:
+ virtual ~ObjectVisitor() {}
+
+ // Visits a contiguous arrays of pointers in the half-open range
+ // [start, end). Any or all of the values may be modified on return.
+ virtual void VisitPointers(Object** start, Object** end) = 0;
+
+ // To allow lazy clearing of inline caches the visitor has
+ // a rich interface for iterating over Code objects..
+
+ // Called prior to visiting the body of a Code object.
+ virtual void BeginCodeIteration(Code* code);
+
+ // Visits a code target in the instruction stream.
+ virtual void VisitCodeTarget(RelocInfo* rinfo);
+
+ // Visits a runtime entry in the instruction stream.
+ virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
+
+ // Visits a debug call target in the instruction stream.
+ virtual void VisitDebugTarget(RelocInfo* rinfo);
+
+ // Called after completing visiting the body of a Code object.
+ virtual void EndCodeIteration(Code* code) {}
+
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+
+ // Visits a contiguous arrays of external references (references to the C++
+ // heap) in the half-open range [start, end). Any or all of the values
+ // may be modified on return.
+ virtual void VisitExternalReferences(Address* start, Address* end) {}
+
+ inline void VisitExternalReference(Address* p) {
+ VisitExternalReferences(p, p + 1);
+ }
+
+#ifdef DEBUG
+ // Intended for serialization/deserialization checking: insert, or
+ // check for the presence of, a tag at this position in the stream.
+ virtual void Synchronize(const char* tag) {}
+#endif
+};
+
+
+// BooleanBit is a helper class for setting and getting a bit in an
+// integer or Smi.
+class BooleanBit : public AllStatic {
+ public:
+ static inline bool get(Smi* smi, int bit_position) {
+ return get(smi->value(), bit_position);
+ }
+
+ static inline bool get(int value, int bit_position) {
+ return (value & (1 << bit_position)) != 0;
+ }
+
+ static inline Smi* set(Smi* smi, int bit_position, bool v) {
+ return Smi::FromInt(set(smi->value(), bit_position, v));
+ }
+
+ static inline int set(int value, int bit_position, bool v) {
+ if (v) {
+ value |= (1 << bit_position);
+ } else {
+ value &= ~(1 << bit_position);
+ }
+ return value;
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_OBJECTS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "ast.h"
+#include "bootstrapper.h"
+#include "platform.h"
+#include "runtime.h"
+#include "parser.h"
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+DECLARE_bool(lazy);
+DEFINE_bool(allow_natives_syntax, false, "allow natives syntax");
+
+
+class ParserFactory;
+class ParserLog;
+class TemporaryScope;
+template <typename T> class ZoneListWrapper;
+
+
+class Parser {
+ public:
+ Parser(Handle<Script> script, bool allow_natives_syntax,
+ v8::Extension* extension, bool is_pre_parsing,
+ ParserFactory* factory, ParserLog* log, ScriptDataImpl* pre_data);
+ virtual ~Parser() { }
+
+ // Pre-parse the program from the character stream; returns true on
+ // success, false if a stack-overflow happened during parsing.
+ bool PreParseProgram(unibrow::CharacterStream* stream);
+
+ void ReportMessage(const char* message, Vector<const char*> args);
+ virtual void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<const char*> args) = 0;
+
+
+ // Returns NULL if parsing failed.
+ FunctionLiteral* ParseProgram(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ bool in_global_context);
+ FunctionLiteral* ParseLazy(Handle<String> source,
+ Handle<String> name,
+ int start_position, bool is_expression);
+
+ protected:
+
+ enum Mode {
+ PARSE_LAZILY,
+ PARSE_EAGERLY
+ };
+
+ // Report syntax error
+ void ReportUnexpectedToken(Token::Value token);
+
+ Handle<Script> script_;
+ Scanner scanner_;
+
+ Scope* top_scope_;
+ int with_nesting_level_;
+
+ TemporaryScope* temp_scope_;
+ Mode mode_;
+ List<Node*>* target_stack_; // for break, continue statements
+ bool allow_natives_syntax_;
+ v8::Extension* extension_;
+ ParserFactory* factory_;
+ ParserLog* log_;
+ bool is_pre_parsing_;
+ ScriptDataImpl* pre_data_;
+
+ bool inside_with() const { return with_nesting_level_ > 0; }
+ ParserFactory* factory() const { return factory_; }
+ ParserLog* log() const { return log_; }
+ Scanner& scanner() { return scanner_; }
+ Mode mode() const { return mode_; }
+ ScriptDataImpl* pre_data() const { return pre_data_; }
+
+ // All ParseXXX functions take as the last argument an *ok parameter
+ // which is set to false if parsing failed; it is unchanged otherwise.
+ // By making the 'exception handling' explicit, we are forced to check
+ // for failure at the call sites.
+ void* ParseSourceElements(ZoneListWrapper<Statement>* processor,
+ int end_token, bool* ok);
+ Statement* ParseStatement(ZoneStringList* labels, bool* ok);
+ Statement* ParseFunctionDeclaration(bool* ok);
+ Statement* ParseNativeDeclaration(bool* ok);
+ Block* ParseBlock(ZoneStringList* labels, bool* ok);
+ Block* ParseVariableStatement(bool* ok);
+ Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
+ Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
+ bool* ok);
+ IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
+ Statement* ParseContinueStatement(bool* ok);
+ Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
+ Statement* ParseReturnStatement(bool* ok);
+ Block* WithHelper(Expression* obj, ZoneStringList* labels, bool* ok);
+ Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
+ CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
+ SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
+ LoopStatement* ParseDoStatement(ZoneStringList* labels, bool* ok);
+ LoopStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
+ Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
+ Statement* ParseThrowStatement(bool* ok);
+ Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
+ TryStatement* ParseTryStatement(bool* ok);
+ DebuggerStatement* ParseDebuggerStatement(bool* ok);
+
+ Expression* ParseExpression(bool accept_IN, bool* ok);
+ Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
+ Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
+ Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+ Expression* ParseUnaryExpression(bool* ok);
+ Expression* ParsePostfixExpression(bool* ok);
+ Expression* ParseLeftHandSideExpression(bool* ok);
+ Expression* ParseNewExpression(bool* ok);
+ Expression* ParseMemberExpression(bool* ok);
+ Expression* ParseMemberWithNewPrefixesExpression(List<int>* new_prefixes,
+ bool* ok);
+ Expression* ParsePrimaryExpression(bool* ok);
+ Expression* ParseArrayLiteral(bool* ok);
+ Expression* ParseObjectLiteral(bool* ok);
+ Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
+
+ enum FunctionLiteralType {
+ EXPRESSION,
+ DECLARATION,
+ NESTED
+ };
+
+ ZoneList<Expression*>* ParseArguments(bool* ok);
+ FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
+ int function_token_position,
+ FunctionLiteralType type,
+ bool* ok);
+
+
+ // Magical syntax support.
+ Expression* ParseV8Intrinsic(bool* ok);
+
+ INLINE(Token::Value peek()) { return scanner_.peek(); }
+ INLINE(Token::Value Next()) { return scanner_.Next(); }
+ INLINE(void Consume(Token::Value token));
+ void Expect(Token::Value token, bool* ok);
+ void ExpectSemicolon(bool* ok);
+
+ // Get odd-ball literals.
+ Literal* GetLiteralUndefined();
+ Literal* GetLiteralTheHole();
+ Literal* GetLiteralNumber(double value);
+
+ Handle<String> ParseIdentifier(bool* ok);
+ Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
+ // Parser support
+ virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+ FunctionLiteral* fun,
+ bool resolve,
+ bool* ok) = 0;
+
+ bool TargetStackContainsLabel(Handle<String> label);
+ BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
+ IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
+
+ void RegisterLabelUse(Label* label, int index);
+
+ // Create a number literal.
+ Literal* NewNumberLiteral(double value);
+
+ // Generate AST node that throw a ReferenceError with the given type.
+ Expression* NewThrowReferenceError(Handle<String> type);
+
+ // Generate AST node that throw a SyntaxError with the given
+ // type. The first argument may be null (in the handle sense) in
+ // which case no arguments are passed to the constructor.
+ Expression* NewThrowSyntaxError(Handle<String> type, Handle<Object> first);
+
+ // Generate AST node that throw a TypeError with the given
+ // type. Both arguments must be non-null (in the handle sense).
+ Expression* NewThrowTypeError(Handle<String> type,
+ Handle<Object> first,
+ Handle<Object> second);
+
+ // Generic AST generator for throwing errors from compiled code.
+ Expression* NewThrowError(Handle<String> constructor,
+ Handle<String> type,
+ Vector< Handle<Object> > arguments);
+
+ friend class Target;
+ friend class TargetScope;
+ friend class LexicalScope;
+ friend class TemporaryScope;
+};
+
+
+// A temporary scope stores information during parsing, just like
+// a plain scope. However, temporary scopes are not kept around
+// after parsing or referenced by syntax trees so they can be stack-
+// allocated and hence used by the pre-parser.
+class TemporaryScope BASE_EMBEDDED {
+ public:
+ explicit TemporaryScope(Parser* parser);
+ ~TemporaryScope();
+ int NextMaterializedLiteralIndex() { return materialized_literal_count_++; }
+ void AddProperty() { expected_property_count_++; }
+
+ int materialized_literal_count() { return materialized_literal_count_; }
+ int expected_property_count() { return expected_property_count_; }
+ private:
+ // Captures the number of nodes that need materialization in the
+ // function. regexp literals, boilerplate for array literals, and
+ // boilerplate for object literals.
+ int materialized_literal_count_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // Bookkeeping
+ Parser* parser_;
+ TemporaryScope* parent_;
+
+ friend class Parser;
+};
+
+
+TemporaryScope::TemporaryScope(Parser* parser)
+ : materialized_literal_count_(0),
+ expected_property_count_(0),
+ parser_(parser),
+ parent_(parser->temp_scope_) {
+ parser->temp_scope_ = this;
+}
+
+
+TemporaryScope::~TemporaryScope() {
+ parser_->temp_scope_ = parent_;
+}
+
+
+// A zone list wrapper lets code either access a access a zone list
+// or appear to do so while actually ignoring all operations.
+template <typename T>
+class ZoneListWrapper {
+ public:
+ ZoneListWrapper() : list_(NULL) { }
+ explicit ZoneListWrapper(int size) : list_(new ZoneList<T*>(size)) { }
+ void Add(T* that) { if (list_) list_->Add(that); }
+ int length() { return list_->length(); }
+ ZoneList<T*>* elements() { return list_; }
+ T* at(int index) { return list_->at(index); }
+ private:
+ ZoneList<T*>* list_;
+};
+
+
+// Allocation macro that should be used to allocate objects that must
+// only be allocated in real parsing mode. Note that in preparse mode
+// not only is the syntax tree not created but the constructor
+// arguments are not evaulated.
+#define NEW(expr) (is_pre_parsing_ ? NULL : new expr)
+
+
+class ParserFactory BASE_EMBEDDED {
+ public:
+ explicit ParserFactory(bool is_pre_parsing) :
+ is_pre_parsing_(is_pre_parsing) { }
+
+ virtual ~ParserFactory() { }
+
+ virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+
+ virtual Handle<String> LookupSymbol(const char* string, int length) {
+ return Handle<String>();
+ }
+
+ virtual Handle<String> EmptySymbol() {
+ return Handle<String>();
+ }
+
+ virtual Expression* NewProperty(Expression* obj, Expression* key, int pos) {
+ if (obj == VariableProxySentinel::this_proxy()) {
+ return Property::this_property();
+ } else {
+ return ValidLeftHandSideSentinel::instance();
+ }
+ }
+
+ virtual Expression* NewCall(Expression* expression,
+ ZoneList<Expression*>* arguments,
+ bool is_eval, int pos) {
+ return Call::sentinel();
+ }
+
+ virtual Statement* EmptyStatement() {
+ return NULL;
+ }
+
+ template <typename T> ZoneListWrapper<T> NewList(int size) {
+ return is_pre_parsing_ ? ZoneListWrapper<T>() : ZoneListWrapper<T>(size);
+ }
+
+ private:
+ bool is_pre_parsing_;
+};
+
+
+class ParserLog BASE_EMBEDDED {
+ public:
+ virtual ~ParserLog() { }
+
+ // Records the occurrence of a function. The returned object is
+ // only guaranteed to be valid until the next function has been
+ // logged.
+ virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); }
+
+ virtual void LogError() { }
+};
+
+
+class AstBuildingParserFactory : public ParserFactory {
+ public:
+ AstBuildingParserFactory() : ParserFactory(false) { }
+
+ virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+
+ virtual Handle<String> LookupSymbol(const char* string, int length) {
+ return Factory::LookupSymbol(Vector<const char>(string, length));
+ }
+
+ virtual Handle<String> EmptySymbol() {
+ return Factory::empty_symbol();
+ }
+
+ virtual Expression* NewProperty(Expression* obj, Expression* key, int pos) {
+ return new Property(obj, key, pos);
+ }
+
+ virtual Expression* NewCall(Expression* expression,
+ ZoneList<Expression*>* arguments,
+ bool is_eval, int pos) {
+ return new Call(expression, arguments, is_eval, pos);
+ }
+
+ virtual Statement* EmptyStatement() {
+ // Use a statically allocated empty statement singleton to avoid
+ // allocating lots and lots of empty statements.
+ static v8::internal::EmptyStatement empty;
+ return ∅
+ }
+};
+
+
+class ParserRecorder: public ParserLog {
+ public:
+ ParserRecorder();
+ virtual FunctionEntry LogFunction(int start);
+ virtual void LogError() { }
+ virtual void LogMessage(Scanner::Location loc,
+ const char* message,
+ Vector<const char*> args);
+ void WriteString(Vector<const char> str);
+ static const char* ReadString(unsigned* start, int* chars);
+ List<unsigned>* store() { return &store_; }
+ private:
+ bool has_error_;
+ List<unsigned> store_;
+};
+
+
+FunctionEntry ScriptDataImpl::GetFunctionEnd(int start) {
+ if (nth(last_entry_).start_pos() > start) {
+ // If the last entry we looked up is higher than what we're
+ // looking for then it's useless and we reset it.
+ last_entry_ = 0;
+ }
+ for (int i = last_entry_; i < EntryCount(); i++) {
+ FunctionEntry entry = nth(i);
+ if (entry.start_pos() == start) {
+ last_entry_ = i;
+ return entry;
+ }
+ }
+ return FunctionEntry();
+}
+
+
+bool ScriptDataImpl::SanityCheck() {
+ if (store_.length() < static_cast<int>(ScriptDataImpl::kHeaderSize))
+ return false;
+ if (magic() != ScriptDataImpl::kMagicNumber)
+ return false;
+ if (version() != ScriptDataImpl::kCurrentVersion)
+ return false;
+ return true;
+}
+
+
+int ScriptDataImpl::EntryCount() {
+ return (store_.length() - kHeaderSize) / FunctionEntry::kSize;
+}
+
+
+FunctionEntry ScriptDataImpl::nth(int n) {
+ int offset = kHeaderSize + n * FunctionEntry::kSize;
+ return FunctionEntry(Vector<unsigned>(store_.start() + offset,
+ FunctionEntry::kSize));
+}
+
+
+ParserRecorder::ParserRecorder()
+ : has_error_(false), store_(4) {
+ Vector<unsigned> preamble = store()->AddBlock(0, ScriptDataImpl::kHeaderSize);
+ preamble[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
+ preamble[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
+ preamble[ScriptDataImpl::kHasErrorOffset] = false;
+}
+
+
+void ParserRecorder::WriteString(Vector<const char> str) {
+ store()->Add(str.length());
+ for (int i = 0; i < str.length(); i++)
+ store()->Add(str[i]);
+}
+
+
+const char* ParserRecorder::ReadString(unsigned* start, int* chars) {
+ int length = start[0];
+ char* result = NewArray<char>(length + 1);
+ for (int i = 0; i < length; i++)
+ result[i] = start[i + 1];
+ result[length] = '\0';
+ if (chars != NULL) *chars = length;
+ return result;
+}
+
+
+void ParserRecorder::LogMessage(Scanner::Location loc, const char* message,
+ Vector<const char*> args) {
+ if (has_error_) return;
+ store()->Rewind(ScriptDataImpl::kHeaderSize);
+ store()->at(ScriptDataImpl::kHasErrorOffset) = true;
+ store()->Add(loc.beg_pos);
+ store()->Add(loc.end_pos);
+ store()->Add(args.length());
+ WriteString(CStrVector(message));
+ for (int i = 0; i < args.length(); i++)
+ WriteString(CStrVector(args[i]));
+}
+
+
+Scanner::Location ScriptDataImpl::MessageLocation() {
+ int beg_pos = Read(0);
+ int end_pos = Read(1);
+ return Scanner::Location(beg_pos, end_pos);
+}
+
+
+const char* ScriptDataImpl::BuildMessage() {
+ unsigned* start = ReadAddress(3);
+ return ParserRecorder::ReadString(start, NULL);
+}
+
+
+Vector<const char*> ScriptDataImpl::BuildArgs() {
+ int arg_count = Read(2);
+ const char** array = NewArray<const char*>(arg_count);
+ int pos = ScriptDataImpl::kHeaderSize + Read(3);
+ for (int i = 0; i < arg_count; i++) {
+ int count = 0;
+ array[i] = ParserRecorder::ReadString(ReadAddress(pos), &count);
+ pos += count + 1;
+ }
+ return Vector<const char*>(array, arg_count);
+}
+
+
+unsigned ScriptDataImpl::Read(int position) {
+ return store_[ScriptDataImpl::kHeaderSize + position];
+}
+
+
+unsigned* ScriptDataImpl::ReadAddress(int position) {
+ return &store_[ScriptDataImpl::kHeaderSize + position];
+}
+
+
+FunctionEntry ParserRecorder::LogFunction(int start) {
+ if (has_error_) return FunctionEntry();
+ FunctionEntry result(store()->AddBlock(0, FunctionEntry::kSize));
+ result.set_start_pos(start);
+ return result;
+}
+
+
+class AstBuildingParser : public Parser {
+ public:
+ AstBuildingParser(Handle<Script> script, bool allow_natives_syntax,
+ v8::Extension* extension, ScriptDataImpl* pre_data)
+ : Parser(script, allow_natives_syntax, extension, false,
+ factory(), log(), pre_data) { }
+ virtual void ReportMessageAt(Scanner::Location loc, const char* message,
+ Vector<const char*> args);
+ virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+ FunctionLiteral* fun, bool resolve, bool* ok);
+ AstBuildingParserFactory* factory() { return &factory_; }
+ ParserLog* log() { return &log_; }
+
+ private:
+ ParserLog log_;
+ AstBuildingParserFactory factory_;
+};
+
+
+class PreParser : public Parser {
+ public:
+ PreParser(Handle<Script> script, bool allow_natives_syntax,
+ v8::Extension* extension)
+ : Parser(script, allow_natives_syntax, extension, true,
+ factory(), recorder(), NULL)
+ , factory_(true) { }
+ virtual void ReportMessageAt(Scanner::Location loc, const char* message,
+ Vector<const char*> args);
+ virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+ FunctionLiteral* fun, bool resolve, bool* ok);
+ ParserFactory* factory() { return &factory_; }
+ ParserRecorder* recorder() { return &recorder_; }
+
+ private:
+ ParserRecorder recorder_;
+ ParserFactory factory_;
+};
+
+
+Scope* AstBuildingParserFactory::NewScope(Scope* parent, Scope::Type type,
+ bool inside_with) {
+ Scope* result = new Scope(parent, type);
+ result->Initialize(inside_with);
+ return result;
+}
+
+
+Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type,
+ bool inside_with) {
+ ASSERT(parent != NULL);
+ parent->type_ = type;
+ return parent;
+}
+
+
+VariableProxy* PreParser::Declare(Handle<String> name, Variable::Mode mode,
+ FunctionLiteral* fun, bool resolve,
+ bool* ok) {
+ return NULL;
+}
+
+
+
+// ----------------------------------------------------------------------------
+// Target is a support class to facilitate manipulation of the
+// Parser's target_stack_ (the stack of potential 'break' and
+// 'continue' statement targets). Upon construction, a new target is
+// added; it is removed upon destruction.
+
+class Target BASE_EMBEDDED {
+ public:
+ Target(Parser* parser, Node* node) : parser_(parser) {
+ parser_->target_stack_->Add(node);
+ }
+
+ ~Target() {
+ parser_->target_stack_->RemoveLast();
+ }
+
+ private:
+ Parser* parser_;
+};
+
+
+class TargetScope BASE_EMBEDDED {
+ public:
+ explicit TargetScope(Parser* parser)
+ : parser_(parser), previous_(parser->target_stack_), stack_(0) {
+ parser_->target_stack_ = &stack_;
+ }
+
+ ~TargetScope() {
+ ASSERT(stack_.is_empty());
+ parser_->target_stack_ = previous_;
+ }
+
+ private:
+ Parser* parser_;
+ List<Node*>* previous_;
+ List<Node*> stack_;
+};
+
+
+// ----------------------------------------------------------------------------
+// LexicalScope is a support class to facilitate manipulation of the
+// Parser's scope stack. The constructor sets the parser's top scope
+// to the incoming scope, and the destructor resets it.
+
+class LexicalScope BASE_EMBEDDED {
+ public:
+ LexicalScope(Parser* parser, Scope* scope)
+ : parser_(parser),
+ prev_scope_(parser->top_scope_),
+ prev_level_(parser->with_nesting_level_) {
+ parser_->top_scope_ = scope;
+ parser_->with_nesting_level_ = 0;
+ }
+
+ ~LexicalScope() {
+ parser_->top_scope_ = prev_scope_;
+ parser_->with_nesting_level_ = prev_level_;
+ }
+
+ private:
+ Parser* parser_;
+ Scope* prev_scope_;
+ int prev_level_;
+};
+
+
+// ----------------------------------------------------------------------------
+// The CHECK_OK macro is a convenient macro to enforce error
+// handling for functions that may fail (by returning !*ok).
+//
+// CAUTION: This macro appends extra statements after a call,
+// thus it must never be used where only a single statement
+// is correct (e.g. an if statement branch w/o braces)!
+
+#define CHECK_OK ok); \
+ if (!*ok) return NULL; \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Parser
+
+Parser::Parser(Handle<Script> script,
+ bool allow_natives_syntax,
+ v8::Extension* extension,
+ bool is_pre_parsing,
+ ParserFactory* factory,
+ ParserLog* log,
+ ScriptDataImpl* pre_data)
+ : script_(script),
+ scanner_(is_pre_parsing),
+ top_scope_(NULL),
+ with_nesting_level_(0),
+ temp_scope_(NULL),
+ target_stack_(NULL),
+ allow_natives_syntax_(allow_natives_syntax),
+ extension_(extension),
+ factory_(factory),
+ log_(log),
+ is_pre_parsing_(is_pre_parsing),
+ pre_data_(pre_data) {
+}
+
+
+bool Parser::PreParseProgram(unibrow::CharacterStream* stream) {
+ StatsRateScope timer(&Counters::pre_parse);
+
+ AssertNoZoneAllocation assert_no_zone_allocation;
+ AssertNoAllocation assert_no_allocation;
+ NoHandleAllocation no_handle_allocation;
+ scanner_.Init(Handle<String>(), stream, 0);
+ ASSERT(target_stack_ == NULL);
+ mode_ = PARSE_EAGERLY;
+ DummyScope top_scope;
+ LexicalScope scope(this, &top_scope);
+ TemporaryScope temp_scope(this);
+ ZoneListWrapper<Statement> processor;
+ bool ok = true;
+ ParseSourceElements(&processor, Token::EOS, &ok);
+ return !scanner().stack_overflow();
+}
+
+
+FunctionLiteral* Parser::ParseProgram(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ bool in_global_context) {
+ StatsRateScope timer(&Counters::parse);
+ Counters::total_parse_size.Increment(source->length());
+
+ // Initialize parser state.
+ source->TryFlatten();
+ scanner_.Init(source, stream, 0);
+ ASSERT(target_stack_ == NULL);
+
+ // Compute the parsing mode.
+ mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
+
+ Scope::Type type =
+ in_global_context
+ ? Scope::GLOBAL_SCOPE
+ : Scope::EVAL_SCOPE;
+ Handle<String> no_name = factory()->EmptySymbol();
+
+ FunctionLiteral* result = NULL;
+ { Scope* scope = factory()->NewScope(top_scope_, type, inside_with());
+ LexicalScope lexical_scope(this, scope);
+ TemporaryScope temp_scope(this);
+ ZoneListWrapper<Statement> body(16);
+ bool ok = true;
+ ParseSourceElements(&body, Token::EOS, &ok);
+ if (ok) {
+ result = NEW(FunctionLiteral(no_name, top_scope_,
+ body.elements(),
+ temp_scope.materialized_literal_count(),
+ temp_scope.expected_property_count(),
+ 0, 0, source->length(), false));
+ } else if (scanner().stack_overflow()) {
+ Top::StackOverflow();
+ }
+ }
+
+ // Make sure the target stack is empty.
+ ASSERT(target_stack_ == NULL);
+
+ // If there was a syntax error we have to get rid of the AST
+ // and it is not safe to do so before the scope has been deleted.
+ if (result == NULL) Zone::DeleteAll();
+ return result;
+}
+
+
+FunctionLiteral* Parser::ParseLazy(Handle<String> source,
+ Handle<String> name,
+ int start_position,
+ bool is_expression) {
+ StatsRateScope timer(&Counters::parse_lazy);
+ Counters::total_parse_size.Increment(source->length());
+ SafeStringInputBuffer buffer(source.location());
+
+ // Initialize parser state.
+ source->TryFlatten();
+ scanner_.Init(source, &buffer, start_position);
+ ASSERT(target_stack_ == NULL);
+ mode_ = PARSE_EAGERLY;
+
+ // Place holder for the result.
+ FunctionLiteral* result = NULL;
+
+ {
+ // Parse the function literal.
+ Handle<String> no_name = factory()->EmptySymbol();
+ Scope* scope =
+ factory()->NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
+ LexicalScope lexical_scope(this, scope);
+ TemporaryScope temp_scope(this);
+
+ FunctionLiteralType type = is_expression ? EXPRESSION : DECLARATION;
+ bool ok = true;
+ result = ParseFunctionLiteral(name, kNoPosition, type, &ok);
+ // Make sure the results agree.
+ ASSERT(ok == (result != NULL));
+ // The only errors should be stack overflows.
+ ASSERT(ok || scanner_.stack_overflow());
+ }
+
+ // Make sure the target stack is empty.
+ ASSERT(target_stack_ == NULL);
+
+ // If there was a stack overflow we have to get rid of AST and it is
+ // not safe to do before scope has been deleted.
+ if (result == NULL) {
+ Top::StackOverflow();
+ Zone::DeleteAll();
+ }
+ return result;
+}
+
+
+void Parser::ReportMessage(const char* type, Vector<const char*> args) {
+ Scanner::Location source_location = scanner_.location();
+ ReportMessageAt(source_location, type, args);
+}
+
+
+void AstBuildingParser::ReportMessageAt(Scanner::Location source_location,
+ const char* type,
+ Vector<const char*> args) {
+ MessageLocation location(script_,
+ source_location.beg_pos, source_location.end_pos);
+ Handle<JSArray> array = Factory::NewJSArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ SetElement(array, i, Factory::NewStringFromUtf8(CStrVector(args[i])));
+ }
+ Handle<Object> result = Factory::NewSyntaxError(type, array);
+ Top::Throw(*result, &location);
+}
+
+
+void PreParser::ReportMessageAt(Scanner::Location source_location,
+ const char* type,
+ Vector<const char*> args) {
+ recorder()->LogMessage(source_location, type, args);
+}
+
+
+void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor,
+ int end_token,
+ bool* ok) {
+ // SourceElements ::
+ // (Statement)* <end_token>
+
+ // Allocate a target stack to use for this set of source
+ // elements. This way, all scripts and functions get their own
+ // target stack thus avoiding illegal breaks and continues across
+ // functions.
+ TargetScope scope(this);
+
+ ASSERT(processor != NULL);
+ while (peek() != end_token) {
+ Statement* stat = ParseStatement(NULL, CHECK_OK);
+ if (stat && !stat->IsEmpty()) processor->Add(stat);
+ }
+ return 0;
+}
+
+
+Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
+ // Statement ::
+ // Block
+ // VariableStatement
+ // EmptyStatement
+ // ExpressionStatement
+ // IfStatement
+ // IterationStatement
+ // ContinueStatement
+ // BreakStatement
+ // ReturnStatement
+ // WithStatement
+ // LabelledStatement
+ // SwitchStatement
+ // ThrowStatement
+ // TryStatement
+ // DebuggerStatement
+
+ // Note: Since labels can only be used by 'break' and 'continue'
+ // statements, which themselves are only valid within blocks,
+ // iterations or 'switch' statements (i.e., BreakableStatements),
+ // labels can be simply ignored in all other cases; except for
+ // trivial labelled break statements 'label: break label' which is
+ // parsed into an empty statement.
+
+ // Keep the source position of the statement
+ int statement_pos = scanner().peek_location().beg_pos;
+ Statement* stmt = NULL;
+ switch (peek()) {
+ case Token::LBRACE:
+ return ParseBlock(labels, ok);
+
+ case Token::CONST: // fall through
+ case Token::VAR:
+ stmt = ParseVariableStatement(ok);
+ break;
+
+ case Token::SEMICOLON:
+ Next();
+ return factory()->EmptyStatement();
+
+ case Token::IF:
+ stmt = ParseIfStatement(labels, ok);
+ break;
+
+ case Token::DO:
+ stmt = ParseDoStatement(labels, ok);
+ break;
+
+ case Token::WHILE:
+ stmt = ParseWhileStatement(labels, ok);
+ break;
+
+ case Token::FOR:
+ stmt = ParseForStatement(labels, ok);
+ break;
+
+ case Token::CONTINUE:
+ stmt = ParseContinueStatement(ok);
+ break;
+
+ case Token::BREAK:
+ stmt = ParseBreakStatement(labels, ok);
+ break;
+
+ case Token::RETURN:
+ stmt = ParseReturnStatement(ok);
+ break;
+
+ case Token::WITH:
+ stmt = ParseWithStatement(labels, ok);
+ break;
+
+ case Token::SWITCH:
+ stmt = ParseSwitchStatement(labels, ok);
+ break;
+
+ case Token::THROW:
+ stmt = ParseThrowStatement(ok);
+ break;
+
+ case Token::TRY: {
+ // NOTE: It is somewhat complicated to have labels on
+ // try-statements. When breaking out of a try-finally statement,
+ // one must take great care not to treat it as a
+ // fall-through. It is much easier just to wrap the entire
+ // try-statement in a statement block and put the labels there
+ Block* result = NEW(Block(labels, 1, false));
+ Target target(this, result);
+ TryStatement* statement = ParseTryStatement(CHECK_OK);
+ if (result) result->AddStatement(statement);
+ return result;
+ }
+
+ case Token::FUNCTION:
+ return ParseFunctionDeclaration(ok);
+
+ case Token::NATIVE:
+ return ParseNativeDeclaration(ok);
+
+ case Token::DEBUGGER:
+ stmt = ParseDebuggerStatement(ok);
+ break;
+
+ default:
+ stmt = ParseExpressionOrLabelledStatement(labels, ok);
+ }
+
+ // Store the source position of the statement
+ if (stmt != NULL) stmt->set_statement_pos(statement_pos);
+ return stmt;
+}
+
+
+VariableProxy* AstBuildingParser::Declare(Handle<String> name,
+ Variable::Mode mode,
+ FunctionLiteral* fun,
+ bool resolve,
+ bool* ok) {
+ Variable* var = NULL;
+ // If we are inside a function, a declaration of a variable
+ // is a truly local variable, and the scope of the variable
+ // is always the function scope.
+
+ // If a function scope exists, then we can statically declare this
+ // variable and also set its mode. In any case, a Declaration node
+ // will be added to the scope so that the declaration can be added
+ // to the corresponding activation frame at runtime if necessary.
+ // For instance declarations inside an eval scope need to be added
+ // to the calling function context.
+ if (top_scope_->is_function_scope()) {
+ // Declare the variable in the function scope.
+ var = top_scope_->Lookup(name);
+ if (var == NULL) {
+ // Declare the name.
+ var = top_scope_->Declare(name, mode);
+ } else {
+ // The name was declared before; check for conflicting
+ // re-declarations. If the previous declaration was a const or the
+ // current declaration is a const then we have a conflict. There is
+ // similar code in runtime.cc in the Declare functions.
+ if ((mode == Variable::CONST) || (var->mode() == Variable::CONST)) {
+ // We only have vars and consts in declarations.
+ ASSERT(var->mode() == Variable::VAR ||
+ var->mode() == Variable::CONST);
+ const char* type = (var->mode() == Variable::VAR) ? "var" : "const";
+ Handle<String> type_string =
+ Factory::NewStringFromUtf8(CStrVector(type), TENURED);
+ Expression* expression =
+ NewThrowTypeError(Factory::redeclaration_symbol(),
+ type_string, name);
+ top_scope_->SetIllegalRedeclaration(expression);
+ }
+ }
+ }
+
+ // We add a declaration node for every declaration. The compiler
+ // will only generate code if necessary. In particular, declarations
+ // for inner local variables that do not represent functions won't
+ // result in any generated code.
+ //
+ // Note that we always add an unresolved proxy even if it's not
+ // used, simply because we don't know in this method (w/o extra
+ // parameters) if the proxy is needed or not. The proxy will be
+ // bound during variable resolution time unless it was pre-bound
+ // below.
+ //
+ // WARNING: This will lead to multiple declaration nodes for the
+ // same variable if it is declared several times. This is not a
+ // semantic issue as long as we keep the source order, but it may be
+ // a performance issue since it may lead to repeated
+ // Runtime::DeclareContextSlot() calls.
+ VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
+ top_scope_->AddDeclaration(NEW(Declaration(proxy, mode, fun)));
+
+ // For global const variables we bind the proxy to a variable.
+ if (mode == Variable::CONST && top_scope_->is_global_scope()) {
+ ASSERT(resolve); // should be set by all callers
+ var = NEW(Variable(top_scope_, name, Variable::CONST, true, false));
+ }
+
+ // If requested and we have a local variable, bind the proxy to the variable
+ // at parse-time. This is used for functions (and consts) declared inside
+ // statements: the corresponding function (or const) variable must be in the
+ // function scope and not a statement-local scope, e.g. as provided with a
+ // 'with' statement:
+ //
+ // with (obj) {
+ // function f() {}
+ // }
+ //
+ // which is translated into:
+ //
+ // with (obj) {
+ // // in this case this is not: 'var f; f = function () {};'
+ // var f = function () {};
+ // }
+ //
+ // Note that if 'f' is accessed from inside the 'with' statement, it
+ // will be allocated in the context (because we must be able to look
+ // it up dynamically) but it will also be accessed statically, i.e.,
+ // with a context slot index and a context chain length for this
+ // initialization code. Thus, inside the 'with' statement, we need
+ // both access to the static and the dynamic context chain; the
+ // runtime needs to provide both.
+ if (resolve && var != NULL) proxy->BindTo(var);
+
+ return proxy;
+}
+
+
+// Language extension which is only enabled for source files loaded
+// through the API's extension mechanism. A native function
+// declaration is resolved by looking up the function through a
+// callback provided by the extension.
+Statement* Parser::ParseNativeDeclaration(bool* ok) {
+ if (extension_ == NULL) {
+ ReportUnexpectedToken(Token::NATIVE);
+ *ok = false;
+ return NULL;
+ }
+
+ Expect(Token::NATIVE, CHECK_OK);
+ Expect(Token::FUNCTION, CHECK_OK);
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ bool done = (peek() == Token::RPAREN);
+ while (!done) {
+ ParseIdentifier(CHECK_OK);
+ done = (peek() == Token::RPAREN);
+ if (!done) Expect(Token::COMMA, CHECK_OK);
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
+
+ if (is_pre_parsing_) return NULL;
+
+ // Make sure that the function containing the native declaration
+ // isn't lazily compiled. The extension structures are only
+ // accessible while parsing the first time not when reparsing
+ // because of lazy compilation.
+ top_scope_->ForceEagerCompilation();
+
+ // Compute the function template for the native function.
+ v8::Handle<v8::FunctionTemplate> fun_template =
+ extension_->GetNativeFunction(v8::Utils::ToLocal(name));
+ ASSERT(!fun_template.IsEmpty());
+
+ // Instantiate the function and create a boilerplate function from it.
+ Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
+ const int literals = fun->NumberOfLiterals();
+ Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<JSFunction> boilerplate =
+ Factory::NewFunctionBoilerplate(name, literals, code);
+
+ // Copy the function data to the boilerplate. Used by
+ // builtins.cc:HandleApiCall to perform argument type checks and to
+ // find the right native code to call.
+ boilerplate->shared()->set_function_data(fun->shared()->function_data());
+
+ // TODO(1240846): It's weird that native function declarations are
+ // introduced dynamically when we meet their declarations, whereas
+ // other functions are setup when entering the surrounding scope.
+ FunctionBoilerplateLiteral* lit =
+ NEW(FunctionBoilerplateLiteral(boilerplate));
+ VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
+ return NEW(ExpressionStatement(
+ new Assignment(Token::INIT_VAR, var, lit, kNoPosition)));
+}
+
+
+Statement* Parser::ParseFunctionDeclaration(bool* ok) {
+ // Parse a function literal. We may or may not have a function name.
+ // If we have a name we use it as the variable name for the function
+ // (a function declaration) and not as the function name of a function
+ // expression.
+
+ Expect(Token::FUNCTION, CHECK_OK);
+ int function_token_position = scanner().location().beg_pos;
+
+ Handle<String> name;
+ if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
+ FunctionLiteral* fun = ParseFunctionLiteral(name, function_token_position,
+ DECLARATION, CHECK_OK);
+
+ if (name.is_null()) {
+ // We don't have a name - it is always an anonymous function
+ // expression.
+ return NEW(ExpressionStatement(fun));
+ } else {
+ // We have a name so even if we're not at the top-level of the
+ // global or a function scope, we treat is as such and introduce
+ // the function with it's initial value upon entering the
+ // corresponding scope.
+ Declare(name, Variable::VAR, fun, true, CHECK_OK);
+ return factory()->EmptyStatement();
+ }
+}
+
+
+Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
+ // Block ::
+ // '{' Statement* '}'
+
+ // Note that a Block does not introduce a new execution scope!
+ // (ECMA-262, 3rd, 12.2)
+ //
+ // Construct block expecting 16 statements.
+ Block* result = NEW(Block(labels, 16, false));
+ Target target(this, result);
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Statement* stat = ParseStatement(NULL, CHECK_OK);
+ if (stat && !stat->IsEmpty()) result->AddStatement(stat);
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+ return result;
+}
+
+
+Block* Parser::ParseVariableStatement(bool* ok) {
+ // VariableStatement ::
+ // VariableDeclarations ';'
+
+ Expression* dummy; // to satisfy the ParseVariableDeclarations() signature
+ Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ return result;
+}
+
+
+// If the variable declaration declares exactly one non-const
+// variable, then *var is set to that variable. In all other cases,
+// *var is untouched; in particular, it is the caller's responsibility
+// to initialize it properly. This mechanism is used for the parsing
+// of 'for-in' loops.
+Block* Parser::ParseVariableDeclarations(bool accept_IN,
+ Expression** var,
+ bool* ok) {
+ // VariableDeclarations ::
+ // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
+
+ Variable::Mode mode = Variable::VAR;
+ bool is_const = false;
+ if (peek() == Token::VAR) {
+ Consume(Token::VAR);
+ } else if (peek() == Token::CONST) {
+ Consume(Token::CONST);
+ mode = Variable::CONST;
+ is_const = true;
+ } else {
+ UNREACHABLE(); // by current callers
+ }
+
+ // The scope of a variable/const declared anywhere inside a function
+ // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
+ // transform a source-level variable/const declaration into a (Function)
+ // Scope declaration, and rewrite the source-level initialization into an
+ // assignment statement. We use a block to collect multiple assignments.
+ //
+ // We mark the block as initializer block because we don't want the
+ // rewriter to add a '.result' assignment to such a block (to get compliant
+ // behavior for code such as print(eval('var x = 7')), and for cosmetic
+ // reasons when pretty-printing. Also, unless an assignment (initialization)
+ // is inside an initializer block, it is ignored.
+ //
+ // Create new block with one expected declaration.
+ Block* block = NEW(Block(NULL, 1, true));
+ VariableProxy* last_var = NULL; // the last variable declared
+ int nvars = 0; // the number of variables declared
+ do {
+ // Parse variable name.
+ if (nvars > 0) Consume(Token::COMMA);
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+
+ // Declare variable.
+ // Note that we *always* must treat the initial value via a separate init
+ // assignment for variables and constants because the value must be assigned
+ // when the variable is encountered in the source. But the variable/constant
+ // is declared (and set to 'undefined') upon entering the function within
+ // which the variable or constant is declared. Only function variables have
+ // an initial value in the declaration (because they are initialized upon
+ // entering the function).
+ //
+ // If we have a const declaration, in an inner scope, the proxy is always
+ // bound to the declared variable (independent of possibly surrounding with
+ // statements).
+ last_var = Declare(name, mode, NULL,
+ is_const /* always bound for CONST! */,
+ CHECK_OK);
+ nvars++;
+
+ // Parse initialization expression if present and/or needed. A
+ // declaration of the form:
+ //
+ // var v = x;
+ //
+ // is syntactic sugar for:
+ //
+ // var v; v = x;
+ //
+ // In particular, we need to re-lookup 'v' as it may be a
+ // different 'v' than the 'v' in the declaration (if we are inside
+ // a 'with' statement that makes a object property with name 'v'
+ // visible).
+ //
+ // However, note that const declarations are different! A const
+ // declaration of the form:
+ //
+ // const c = x;
+ //
+ // is *not* syntactic sugar for:
+ //
+ // const c; c = x;
+ //
+ // The "variable" c initialized to x is the same as the declared
+ // one - there is no re-lookup (see the last parameter of the
+ // Declare() call above).
+
+ Expression* value = NULL;
+ int position = -1;
+ if (peek() == Token::ASSIGN) {
+ Expect(Token::ASSIGN, CHECK_OK);
+ position = scanner().location().beg_pos;
+ value = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ }
+
+ // Make sure that 'const c' actually initializes 'c' to undefined
+ // even though it seems like a stupid thing to do.
+ if (value == NULL && is_const) {
+ value = GetLiteralUndefined();
+ }
+
+ // Global variable declarations must be compiled in a specific
+ // way. When the script containing the global variable declaration
+ // is entered, the global variable must be declared, so that if it
+ // doesn't exist (not even in a prototype of the global object) it
+ // gets created with an initial undefined value. This is handled
+ // by the declarations part of the function representing the
+ // top-level global code; see Runtime::DeclareGlobalVariable. If
+ // it already exists (in the object or in a prototype), it is
+ // *not* touched until the variable declaration statement is
+ // executed.
+ //
+ // Executing the variable declaration statement will always
+ // guarantee to give the global object a "local" variable; a
+ // variable defined in the global object and not in any
+ // prototype. This way, global variable declarations can shadow
+ // properties in the prototype chain, but only after the variable
+ // declaration statement has been executed. This is important in
+ // browsers where the global object (window) has lots of
+ // properties defined in prototype objects.
+
+ if (!is_pre_parsing_ && top_scope_->is_global_scope()) {
+ // Compute the arguments for the runtime call.
+ ZoneList<Expression*>* arguments = new ZoneList<Expression*>(2);
+ // Be careful not to assign a value to the global variable if
+ // we're in a with. The initialization value should not
+ // necessarily be stored in the global object in that case,
+ // which is why we need to generate a separate assignment node.
+ arguments->Add(NEW(Literal(name))); // we have at least 1 parameter
+ if (is_const || (value != NULL && !inside_with())) {
+ arguments->Add(value);
+ value = NULL; // zap the value to avoid the unnecessary assignment
+ }
+ // Construct the call to Runtime::DeclareGlobal{Variable,Const}Locally
+ // and add it to the initialization statement block. Note that
+ // this function does different things depending on if we have
+ // 1 or 2 parameters.
+ CallRuntime* initialize;
+ if (is_const) {
+ initialize =
+ NEW(CallRuntime(
+ Factory::InitializeConstGlobal_symbol(),
+ Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+ arguments));
+ } else {
+ initialize =
+ NEW(CallRuntime(
+ Factory::InitializeVarGlobal_symbol(),
+ Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
+ arguments));
+ }
+ block->AddStatement(NEW(ExpressionStatement(initialize)));
+ }
+
+ // Add an assignment node to the initialization statement block if
+ // we still have a pending initialization value. We must distinguish
+ // between variables and constants: Variable initializations are simply
+ // assignments (with all the consequences if they are inside a 'with'
+ // statement - they may change a 'with' object property). Constant
+ // initializations always assign to the declared constant which is
+ // always at the function scope level. This is only relevant for
+ // dynamically looked-up variables and constants (the start context
+ // for constant lookups is always the function context, while it is
+ // the top context for variables). Sigh...
+ if (value != NULL) {
+ Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
+ Assignment* assignment = NEW(Assignment(op, last_var, value, position));
+ if (block) block->AddStatement(NEW(ExpressionStatement(assignment)));
+ }
+ } while (peek() == Token::COMMA);
+
+ if (!is_const && nvars == 1) {
+ // We have a single, non-const variable.
+ if (is_pre_parsing_) {
+ // If we're preparsing then we need to set the var to something
+ // in order for for-in loops to parse correctly.
+ *var = ValidLeftHandSideSentinel::instance();
+ } else {
+ ASSERT(last_var != NULL);
+ *var = last_var;
+ }
+ }
+
+ return block;
+}
+
+
+static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) {
+ ASSERT(!label.is_null());
+ if (labels != NULL)
+ for (int i = labels->length(); i-- > 0; )
+ if (labels->at(i).is_identical_to(label))
+ return true;
+
+ return false;
+}
+
+
+Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
+ bool* ok) {
+ // ExpressionStatement | LabelledStatement ::
+ // Expression ';'
+ // Identifier ':' Statement
+
+ Expression* expr = ParseExpression(true, CHECK_OK);
+ if (peek() == Token::COLON && expr &&
+ expr->AsVariableProxy() != NULL &&
+ !expr->AsVariableProxy()->is_this()) {
+ VariableProxy* var = expr->AsVariableProxy();
+ Handle<String> label = var->name();
+ // TODO(1240780): We don't check for redeclaration of labels
+ // during preparsing since keeping track of the set of active
+ // labels requires nontrivial changes to the way scopes are
+ // structured. However, these are probably changes we want to
+ // make later anyway so we should go back and fix this then.
+ if (!is_pre_parsing_) {
+ if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
+ SmartPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
+ const char* elms[2] = { "Label", *c_string };
+ Vector<const char*> args(elms, 2);
+ ReportMessage("redeclaration", args);
+ *ok = false;
+ return NULL;
+ }
+ if (labels == NULL) labels = new ZoneStringList(4);
+ labels->Add(label);
+ // Remove the "ghost" variable that turned out to be a label
+ // from the top scope. This way, we don't try to resolve it
+ // during the scope processing.
+ top_scope_->RemoveUnresolved(var);
+ }
+ Expect(Token::COLON, CHECK_OK);
+ return ParseStatement(labels, ok);
+ }
+
+ // Parsed expression statement.
+ ExpectSemicolon(CHECK_OK);
+ return NEW(ExpressionStatement(expr));
+}
+
+
+IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
+ // IfStatement ::
+ // 'if' '(' Expression ')' Statement ('else' Statement)?
+
+ Expect(Token::IF, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ Expression* condition = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ Statement* then_statement = ParseStatement(labels, CHECK_OK);
+ Statement* else_statement = NULL;
+ if (peek() == Token::ELSE) {
+ Next();
+ else_statement = ParseStatement(labels, CHECK_OK);
+ } else if (!is_pre_parsing_) {
+ else_statement = factory()->EmptyStatement();
+ }
+ return NEW(IfStatement(condition, then_statement, else_statement));
+}
+
+
+Statement* Parser::ParseContinueStatement(bool* ok) {
+ // ContinueStatement ::
+ // 'continue' Identifier? ';'
+
+ Expect(Token::CONTINUE, CHECK_OK);
+ Handle<String> label(static_cast<String**>(NULL));
+ Token::Value tok = peek();
+ if (!scanner_.has_line_terminator_before_next() &&
+ tok != Token::SEMICOLON && tok != Token::RBRACE) {
+ label = ParseIdentifier(CHECK_OK);
+ }
+ IterationStatement* target = NULL;
+ if (!is_pre_parsing_) {
+ target = LookupContinueTarget(label, CHECK_OK);
+ if (target == NULL) {
+ // Illegal continue statement. To be consistent with KJS we delay
+ // reporting of the syntax error until runtime.
+ Handle<String> error_type = Factory::illegal_continue_symbol();
+ if (!label.is_null()) error_type = Factory::unknown_label_symbol();
+ Expression* throw_error = NewThrowSyntaxError(error_type, label);
+ return NEW(ExpressionStatement(throw_error));
+ }
+ }
+ ExpectSemicolon(CHECK_OK);
+ return NEW(ContinueStatement(target));
+}
+
+
+Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
+ // BreakStatement ::
+ // 'break' Identifier? ';'
+
+ Expect(Token::BREAK, CHECK_OK);
+ Handle<String> label;
+ Token::Value tok = peek();
+ if (!scanner_.has_line_terminator_before_next() &&
+ tok != Token::SEMICOLON && tok != Token::RBRACE) {
+ label = ParseIdentifier(CHECK_OK);
+ }
+ // Parse labelled break statements that target themselves into
+ // empty statements, e.g. 'l1: l2: l3: break l2;'
+ if (!label.is_null() && ContainsLabel(labels, label)) {
+ return factory()->EmptyStatement();
+ }
+ BreakableStatement* target = NULL;
+ if (!is_pre_parsing_) {
+ target = LookupBreakTarget(label, CHECK_OK);
+ if (target == NULL) {
+ // Illegal break statement. To be consistent with KJS we delay
+ // reporting of the syntax error until runtime.
+ Handle<String> error_type = Factory::illegal_break_symbol();
+ if (!label.is_null()) error_type = Factory::unknown_label_symbol();
+ Expression* throw_error = NewThrowSyntaxError(error_type, label);
+ return NEW(ExpressionStatement(throw_error));
+ }
+ }
+ ExpectSemicolon(CHECK_OK);
+ return NEW(BreakStatement(target));
+}
+
+
+Statement* Parser::ParseReturnStatement(bool* ok) {
+ // ReturnStatement ::
+ // 'return' Expression? ';'
+
+ // Consume the return token. It is necessary to do the before
+ // reporting any errors on it, because of the way errors are
+ // reported (underlining).
+ Expect(Token::RETURN, CHECK_OK);
+
+ // An ECMAScript program is considered syntactically incorrect if it
+ // contains a return statement that is not within the body of a
+ // function. See ECMA-262, section 12.9, page 67.
+ //
+ // To be consistent with KJS we report the syntax error at runtime.
+ if (!is_pre_parsing_ && !top_scope_->is_function_scope()) {
+ Handle<String> type = Factory::illegal_return_symbol();
+ Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
+ return NEW(ExpressionStatement(throw_error));
+ }
+
+ Token::Value tok = peek();
+ if (scanner_.has_line_terminator_before_next() ||
+ tok == Token::SEMICOLON ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ ExpectSemicolon(CHECK_OK);
+ return NEW(ReturnStatement(GetLiteralUndefined()));
+ }
+
+ Expression* expr = ParseExpression(true, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ return NEW(ReturnStatement(expr));
+}
+
+
+Block* Parser::WithHelper(Expression* obj, ZoneStringList* labels, bool* ok) {
+ // Parse the statement and collect escaping labels.
+ ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
+ LabelCollector collector(label_list);
+ Statement* stat;
+ { Target target(this, &collector);
+ with_nesting_level_++;
+ top_scope_->RecordWithStatement();
+ stat = ParseStatement(labels, CHECK_OK);
+ with_nesting_level_--;
+ }
+ // Create resulting block with two statements.
+ // 1: Evaluate the with expression.
+ // 2: The try-finally block evaluating the body.
+ Block* result = NEW(Block(NULL, 2, false));
+
+ if (result) {
+ result->AddStatement(NEW(WithEnterStatement(obj)));
+
+ // Create body block.
+ Block* body = NEW(Block(NULL, 1, false));
+ body->AddStatement(stat);
+
+ // Create exit block.
+ Block* exit = NEW(Block(NULL, 1, false));
+ exit->AddStatement(NEW(WithExitStatement()));
+
+ // Return a try-finally statement.
+ TryFinally* wrapper = NEW(TryFinally(body, NULL, exit));
+ wrapper->set_escaping_labels(collector.labels());
+ result->AddStatement(wrapper);
+ return result;
+ } else {
+ return NULL;
+ }
+}
+
+
+Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
+ // WithStatement ::
+ // 'with' '(' Expression ')' Statement
+
+ // We do not allow the use of 'with' statements in the internal JS
+ // code. If 'with' statements were allowed, the simplified setup of
+ // the runtime context chain would allow access to properties in the
+ // global object from within a 'with' statement.
+ ASSERT(!Bootstrapper::IsActive());
+
+ Expect(Token::WITH, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ Expression* expr = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ return WithHelper(expr, labels, CHECK_OK);
+}
+
+
+CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
+ // CaseClause ::
+ // 'case' Expression ':' Statement*
+ // 'default' ':' Statement*
+
+ Expression* label = NULL; // NULL expression indicates default case
+ if (peek() == Token::CASE) {
+ Expect(Token::CASE, CHECK_OK);
+ label = ParseExpression(true, CHECK_OK);
+ } else {
+ Expect(Token::DEFAULT, CHECK_OK);
+ if (*default_seen_ptr) {
+ ReportMessage("multiple_defaults_in_switch",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ *default_seen_ptr = true;
+ }
+ Expect(Token::COLON, CHECK_OK);
+
+ ZoneListWrapper<Statement> statements = factory()->NewList<Statement>(5);
+ while (peek() != Token::CASE &&
+ peek() != Token::DEFAULT &&
+ peek() != Token::RBRACE) {
+ Statement* stat = ParseStatement(NULL, CHECK_OK);
+ statements.Add(stat);
+ }
+
+ return NEW(CaseClause(label, statements.elements()));
+}
+
+
+SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
+ bool* ok) {
+ // SwitchStatement ::
+ // 'switch' '(' Expression ')' '{' CaseClause* '}'
+
+ SwitchStatement* statement = NEW(SwitchStatement(labels));
+ Target target(this, statement);
+
+ Expect(Token::SWITCH, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ Expression* tag = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ bool default_seen = false;
+ ZoneListWrapper<CaseClause> cases = factory()->NewList<CaseClause>(4);
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
+ cases.Add(clause);
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ if (statement) statement->Initialize(tag, cases.elements());
+ return statement;
+}
+
+
+Statement* Parser::ParseThrowStatement(bool* ok) {
+ // ThrowStatement ::
+ // 'throw' Expression ';'
+
+ Expect(Token::THROW, CHECK_OK);
+ int pos = scanner().location().beg_pos;
+ if (scanner_.has_line_terminator_before_next()) {
+ ReportMessage("newline_after_throw", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ Expression* exception = ParseExpression(true, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+
+ return NEW(ExpressionStatement(new Throw(exception, pos)));
+}
+
+
+Expression* Parser::MakeCatchContext(Handle<String> id, VariableProxy* value) {
+ ZoneListWrapper<ObjectLiteral::Property> properties =
+ factory()->NewList<ObjectLiteral::Property>(1);
+ Literal* key = NEW(Literal(id));
+ ObjectLiteral::Property* property = NEW(ObjectLiteral::Property(key, value));
+ properties.Add(property);
+
+ // This must be called always, even during pre-parsing!
+ // (Computation of literal index must happen before pre-parse bailout.)
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ if (is_pre_parsing_) {
+ return NULL;
+ }
+
+ // Construct the expression for calling Runtime::CreateObjectLiteral
+ // with the literal array as argument.
+ Handle<FixedArray> constant_properties = Factory::empty_fixed_array();
+ ZoneList<Expression*>* arguments = new ZoneList<Expression*>(1);
+ arguments->Add(new Literal(constant_properties));
+ Expression* literal = new CallRuntime(
+ Factory::CreateObjectLiteralBoilerplate_symbol(),
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralBoilerplate),
+ arguments);
+
+ return new ObjectLiteral(constant_properties, literal,
+ properties.elements(), literal_index);
+}
+
+
+TryStatement* Parser::ParseTryStatement(bool* ok) {
+ // TryStatement ::
+ // 'try' Block Catch
+ // 'try' Block Finally
+ // 'try' Block Catch Finally
+ //
+ // Catch ::
+ // 'catch' '(' Identifier ')' Block
+ //
+ // Finally ::
+ // 'finally' Block
+
+ Expect(Token::TRY, CHECK_OK);
+
+ ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
+ LabelCollector collector(label_list);
+ Block* try_block;
+
+ { Target target(this, &collector);
+ try_block = ParseBlock(NULL, CHECK_OK);
+ }
+
+ Block* catch_block = NULL;
+ VariableProxy* catch_var = NULL;
+ Block* finally_block = NULL;
+
+ Token::Value tok = peek();
+ if (tok != Token::CATCH && tok != Token::FINALLY) {
+ ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
+ // If we can break out from the catch block and there is a finally block,
+ // then we will need to collect labels from the catch block. Since we don't
+ // know yet if there will be a finally block, we always collect the labels.
+ ZoneList<Label*>* catch_label_list = NEW(ZoneList<Label*>(0));
+ LabelCollector catch_collector(catch_label_list);
+ bool has_catch = false;
+ if (tok == Token::CATCH) {
+ has_catch = true;
+ Consume(Token::CATCH);
+
+ Expect(Token::LPAREN, CHECK_OK);
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ if (peek() == Token::LBRACE) {
+ // Allocate a temporary for holding the finally state while
+ // executing the finally block.
+ catch_var = top_scope_->NewTemporary(Factory::catch_var_symbol());
+ Expression* obj = MakeCatchContext(name, catch_var);
+ { Target target(this, &catch_collector);
+ catch_block = WithHelper(obj, NULL, CHECK_OK);
+ }
+ } else {
+ Expect(Token::LBRACE, CHECK_OK);
+ }
+
+ tok = peek();
+ }
+
+ VariableProxy* finally_var = NULL;
+ if (tok == Token::FINALLY || !has_catch) {
+ Consume(Token::FINALLY);
+ // Declare a variable for holding the finally state while
+ // executing the finally block.
+ finally_var = top_scope_->NewTemporary(Factory::finally_state_symbol());
+ finally_block = ParseBlock(NULL, CHECK_OK);
+ }
+
+ // Simplify the AST nodes by converting:
+ // 'try { } catch { } finally { }'
+ // to:
+ // 'try { try { } catch { } } finally { }'
+
+ if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
+ TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
+ statement->set_escaping_labels(collector.labels());
+ try_block = NEW(Block(NULL, 1, false));
+ try_block->AddStatement(statement);
+ catch_block = NULL;
+ }
+
+ TryStatement* result = NULL;
+ if (!is_pre_parsing_) {
+ if (catch_block != NULL) {
+ ASSERT(finally_block == NULL);
+ result = NEW(TryCatch(try_block, catch_var, catch_block));
+ result->set_escaping_labels(collector.labels());
+ } else {
+ ASSERT(finally_block != NULL);
+ result = NEW(TryFinally(try_block, finally_var, finally_block));
+ // Add the labels of the try block and the catch block.
+ for (int i = 0; i < collector.labels()->length(); i++) {
+ catch_collector.labels()->Add(collector.labels()->at(i));
+ }
+ result->set_escaping_labels(catch_collector.labels());
+ }
+ }
+
+ return result;
+}
+
+
+LoopStatement* Parser::ParseDoStatement(ZoneStringList* labels, bool* ok) {
+ // DoStatement ::
+ // 'do' Statement 'while' '(' Expression ')' ';'
+
+ LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::DO_LOOP));
+ Target target(this, loop);
+
+ Expect(Token::DO, CHECK_OK);
+ Statement* body = ParseStatement(NULL, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ Expression* cond = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ // Allow do-statements to be terminated with and without
+ // semi-colons. This allows code such as 'do;while(0)return' to
+ // parse, which would not be the case if we had used the
+ // ExpectSemicolon() functionality here.
+ if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
+
+ if (loop) loop->Initialize(NULL, cond, NULL, body);
+ return loop;
+}
+
+
+LoopStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
+ // WhileStatement ::
+ // 'while' '(' Expression ')' Statement
+
+ LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::WHILE_LOOP));
+ Target target(this, loop);
+
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ Expression* cond = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ Statement* body = ParseStatement(NULL, CHECK_OK);
+
+ if (loop) loop->Initialize(NULL, cond, NULL, body);
+ return loop;
+}
+
+
+Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
+ // ForStatement ::
+ // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+
+ Statement* init = NULL;
+
+ Expect(Token::FOR, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ if (peek() != Token::SEMICOLON) {
+ if (peek() == Token::VAR || peek() == Token::CONST) {
+ Expression* each = NULL;
+ Block* variable_statement =
+ ParseVariableDeclarations(false, &each, CHECK_OK);
+ if (peek() == Token::IN && each != NULL) {
+ ForInStatement* loop = NEW(ForInStatement(labels));
+ Target target(this, loop);
+
+ Expect(Token::IN, CHECK_OK);
+ Expression* enumerable = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ Statement* body = ParseStatement(NULL, CHECK_OK);
+ if (is_pre_parsing_) {
+ return NULL;
+ } else {
+ loop->Initialize(each, enumerable, body);
+ Block* result = NEW(Block(NULL, 2, false));
+ result->AddStatement(variable_statement);
+ result->AddStatement(loop);
+ // Parsed for-in loop w/ variable/const declaration.
+ return result;
+ }
+
+ } else {
+ init = variable_statement;
+ }
+
+ } else {
+ Expression* expression = ParseExpression(false, CHECK_OK);
+ if (peek() == Token::IN) {
+ // Report syntax error if the expression is an invalid
+ // left-hand side expression.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ if (expression != NULL && expression->AsCall() != NULL) {
+ // According to ECMA-262 host function calls are permitted to
+ // return references. This cannot happen in our system so we
+ // will always get an error. We could report this as a syntax
+ // error here but for compatibility with KJS and SpiderMonkey we
+ // choose to report the error at runtime.
+ Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+ expression = NewThrowReferenceError(type);
+ } else {
+ // Invalid left hand side expressions that are not function
+ // calls are reported as syntax errors at compile time.
+ ReportMessage("invalid_lhs_in_for_in",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ }
+ ForInStatement* loop = NEW(ForInStatement(labels));
+ Target target(this, loop);
+
+ Expect(Token::IN, CHECK_OK);
+ Expression* enumerable = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ Statement* body = ParseStatement(NULL, CHECK_OK);
+ if (loop) loop->Initialize(expression, enumerable, body);
+
+ // Parsed for-in loop.
+ return loop;
+
+ } else {
+ init = NEW(ExpressionStatement(expression));
+ }
+ }
+ }
+
+ // Standard 'for' loop
+ LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::FOR_LOOP));
+ Target target(this, loop);
+
+ // Parsed initializer at this point.
+ Expect(Token::SEMICOLON, CHECK_OK);
+
+ Expression* cond = NULL;
+ if (peek() != Token::SEMICOLON) {
+ cond = ParseExpression(true, CHECK_OK);
+ }
+ Expect(Token::SEMICOLON, CHECK_OK);
+
+ Statement* next = NULL;
+ if (peek() != Token::RPAREN) {
+ Expression* exp = ParseExpression(true, CHECK_OK);
+ next = NEW(ExpressionStatement(exp));
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+
+ Statement* body = ParseStatement(NULL, CHECK_OK);
+
+ if (loop) loop->Initialize(init, cond, next, body);
+ return loop;
+}
+
+
+// Precedence = 1
+Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
+ // Expression ::
+ // AssignmentExpression
+ // Expression ',' AssignmentExpression
+
+ Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
+ Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ result = NEW(BinaryOperation(Token::COMMA, result, right));
+ }
+ return result;
+}
+
+
+// Precedence = 2
+Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
+ // AssignmentExpression ::
+ // ConditionalExpression
+ // LeftHandSideExpression AssignmentOperator AssignmentExpression
+
+ Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
+
+ if (!Token::IsAssignmentOp(peek())) {
+ // Parsed conditional expression only (no assignment).
+ return expression;
+ }
+
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ if (expression != NULL && expression->AsCall() != NULL) {
+ // According to ECMA-262 host function calls are permitted to
+ // return references. This cannot happen in our system so we
+ // will always get an error. We could report this as a syntax
+ // error here but for compatibility with KJS and SpiderMonkey we
+ // choose to report the error at runtime.
+ Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+ expression = NewThrowReferenceError(type);
+ } else {
+ // Invalid left hand side expressions that are not function
+ // calls are reported as syntax errors at compile time.
+ //
+ // NOTE: KJS sometimes delay the error reporting to runtime. If
+ // we want to be completely compatible we should do the same.
+ // For example: "(x++) = 42" gives a reference error at runtime
+ // with KJS whereas we report a syntax error at compile time.
+ ReportMessage("invalid_lhs_in_assignment", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ }
+
+
+ Token::Value op = Next(); // Get assignment operator.
+ int pos = scanner().location().beg_pos;
+ Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+
+ // TODO(1231235): We try to estimate the set of properties set by
+ // constructors. We define a new property whenever there is an
+ // assignment to a property of 'this'. We should probably only add
+ // properties if we haven't seen them before. Otherwise we'll
+ // probably overestimate the number of properties.
+ Property* property = expression ? expression->AsProperty() : NULL;
+ if (op == Token::ASSIGN &&
+ property != NULL &&
+ property->obj()->AsVariableProxy() != NULL &&
+ property->obj()->AsVariableProxy()->is_this()) {
+ temp_scope_->AddProperty();
+ }
+
+ return NEW(Assignment(op, expression, right, pos));
+}
+
+
+// Precedence = 3
+Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
+ // ConditionalExpression ::
+ // LogicalOrExpression
+ // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+
+ // We start using the binary expression parser for prec >= 4 only!
+ Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
+ // In parsing the first assignment expression in conditional
+ // expressions we always accept the 'in' keyword; see ECMA-262,
+ // section 11.12, page 58.
+ Expression* left = ParseAssignmentExpression(true, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
+ Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ return NEW(Conditional(expression, left, right));
+}
+
+
+static int Precedence(Token::Value tok, bool accept_IN) {
+ if (tok == Token::IN && !accept_IN)
+ return 0; // 0 precedence will terminate binary expression parsing
+
+ return Token::Precedence(tok);
+}
+
+
+// Precedence >= 4
+Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
+ ASSERT(prec >= 4);
+ Expression* x = ParseUnaryExpression(CHECK_OK);
+ for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
+ // prec1 >= 4
+ while (Precedence(peek(), accept_IN) == prec1) {
+ Token::Value op = Next();
+ Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
+
+ // Compute some expressions involving only number literals.
+ if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
+ y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
+ double x_val = x->AsLiteral()->handle()->Number();
+ double y_val = y->AsLiteral()->handle()->Number();
+
+ switch (op) {
+ case Token::ADD:
+ x = NewNumberLiteral(x_val + y_val);
+ continue;
+ case Token::SUB:
+ x = NewNumberLiteral(x_val - y_val);
+ continue;
+ case Token::MUL:
+ x = NewNumberLiteral(x_val * y_val);
+ continue;
+ case Token::DIV:
+ x = NewNumberLiteral(x_val / y_val);
+ continue;
+ case Token::BIT_OR:
+ x = NewNumberLiteral(DoubleToInt32(x_val) | DoubleToInt32(y_val));
+ continue;
+ case Token::BIT_AND:
+ x = NewNumberLiteral(DoubleToInt32(x_val) & DoubleToInt32(y_val));
+ continue;
+ case Token::BIT_XOR:
+ x = NewNumberLiteral(DoubleToInt32(x_val) ^ DoubleToInt32(y_val));
+ continue;
+ case Token::SHL: {
+ int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+ x = NewNumberLiteral(value);
+ continue;
+ }
+ case Token::SHR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t value = DoubleToUint32(x_val) >> shift;
+ x = NewNumberLiteral(value);
+ continue;
+ }
+ case Token::SAR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
+ x = NewNumberLiteral(value);
+ continue;
+ }
+ default:
+ break;
+ }
+ }
+
+ // For now we distinguish between comparisons and other binary
+ // operations. (We could combine the two and get rid of this
+ // code an AST node eventually.)
+ if (Token::IsCompareOp(op)) {
+ // We have a comparison.
+ Token::Value cmp = op;
+ switch (op) {
+ case Token::NE: cmp = Token::EQ; break;
+ case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
+ default: break;
+ }
+ x = NEW(CompareOperation(cmp, x, y));
+ if (cmp != op) {
+ // The comparison was negated - add a NOT.
+ x = NEW(UnaryOperation(Token::NOT, x));
+ }
+
+ } else {
+ // We have a "normal" binary operation.
+ x = NEW(BinaryOperation(op, x, y));
+ }
+ }
+ }
+ return x;
+}
+
+
+Expression* Parser::ParseUnaryExpression(bool* ok) {
+ // UnaryExpression ::
+ // PostfixExpression
+ // 'delete' UnaryExpression
+ // 'void' UnaryExpression
+ // 'typeof' UnaryExpression
+ // '++' UnaryExpression
+ // '--' UnaryExpression
+ // '+' UnaryExpression
+ // '-' UnaryExpression
+ // '~' UnaryExpression
+ // '!' UnaryExpression
+
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
+ op = Next();
+ Expression* x = ParseUnaryExpression(CHECK_OK);
+
+ // Compute some expressions involving only number literals.
+ if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber()) {
+ double x_val = x->AsLiteral()->handle()->Number();
+ switch (op) {
+ case Token::SUB:
+ return NewNumberLiteral(-x_val);
+ case Token::BIT_NOT:
+ return NewNumberLiteral(~DoubleToInt32(x_val));
+ default: break;
+ }
+ }
+
+ return NEW(UnaryOperation(op, x));
+
+ } else if (Token::IsCountOp(op)) {
+ op = Next();
+ Expression* x = ParseUnaryExpression(CHECK_OK);
+ if (x == NULL || !x->IsValidLeftHandSide()) {
+ if (x != NULL && x->AsCall() != NULL) {
+ // According to ECMA-262 host function calls are permitted to
+ // return references. This cannot happen in our system so we
+ // will always get an error. We could report this as a syntax
+ // error here but for compatibility with KJS and SpiderMonkey we
+ // choose to report the error at runtime.
+ Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+ x = NewThrowReferenceError(type);
+ } else {
+ // Invalid left hand side expressions that are not function
+ // calls are reported as syntax errors at compile time.
+ ReportMessage("invalid_lhs_in_prefix_op", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ }
+ return NEW(CountOperation(true /* prefix */, op, x));
+
+ } else {
+ return ParsePostfixExpression(ok);
+ }
+}
+
+
+Expression* Parser::ParsePostfixExpression(bool* ok) {
+ // PostfixExpression ::
+ // LeftHandSideExpression ('++' | '--')?
+
+ Expression* result = ParseLeftHandSideExpression(CHECK_OK);
+ if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
+ if (result == NULL || !result->IsValidLeftHandSide()) {
+ if (result != NULL && result->AsCall() != NULL) {
+ // According to ECMA-262 host function calls are permitted to
+ // return references. This cannot happen in our system so we
+ // will always get an error. We could report this as a syntax
+ // error here but for compatibility with KJS and SpiderMonkey we
+ // choose to report the error at runtime.
+ Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+ result = NewThrowReferenceError(type);
+ } else {
+ // Invalid left hand side expressions that are not function
+ // calls are reported as syntax errors at compile time.
+ ReportMessage("invalid_lhs_in_postfix_op",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ }
+ Token::Value next = Next();
+ result = NEW(CountOperation(false /* postfix */, next, result));
+ }
+ return result;
+}
+
+
+Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
+ // LeftHandSideExpression ::
+ // (NewExpression | MemberExpression) ...
+
+ Expression* result;
+ if (peek() == Token::NEW) {
+ result = ParseNewExpression(CHECK_OK);
+ } else {
+ result = ParseMemberExpression(CHECK_OK);
+ }
+
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = scanner().location().beg_pos;
+ Expression* index = ParseExpression(true, CHECK_OK);
+ result = factory()->NewProperty(result, index, pos);
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+
+ case Token::LPAREN: {
+ int pos = scanner().location().beg_pos;
+ ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations. We can ignore locally declared variables with
+ // name 'eval' since they override the global 'eval' function. We
+ // only need to look at unresolved variables (VariableProxies).
+
+ if (!is_pre_parsing_) {
+ // We assume that only a function called 'eval' can be used
+ // to invoke the global eval() implementation. This permits
+ // for massive optimizations.
+ VariableProxy* callee = result->AsVariableProxy();
+ if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
+ // We do not allow direct calls to 'eval' in our internal
+ // JS files. Use builtin functions instead.
+ ASSERT(!Bootstrapper::IsActive());
+ top_scope_->RecordEvalCall();
+ } else {
+ // This is rather convoluted code to check if we're calling
+ // a function named 'eval' through a property access. If so,
+ // we mark it as a possible eval call (we don't know if the
+ // receiver will resolve to the global object or not), but
+ // we do not treat the call as an eval() call - we let the
+ // call get through to the JavaScript eval code defined in
+ // v8natives.js.
+ Property* property = result->AsProperty();
+ if (property != NULL) {
+ Literal* key = property->key()->AsLiteral();
+ if (key != NULL &&
+ key->handle().is_identical_to(Factory::eval_symbol())) {
+ // We do not allow direct calls to 'eval' in our
+ // internal JS files. Use builtin functions instead.
+ ASSERT(!Bootstrapper::IsActive());
+ top_scope_->RecordEvalCall();
+ }
+ }
+ }
+ }
+
+ // Optimize the eval() case w/o arguments so we
+ // don't need to handle it every time at runtime.
+ //
+ // Note: For now we don't do static eval analysis
+ // as it appears that we need to be able to call
+ // eval() via alias names. We leave the code as
+ // is, in case we want to enable this again in the
+ // future.
+ const bool is_eval = false;
+ if (is_eval && args->length() == 0) {
+ result = NEW(Literal(Factory::undefined_value()));
+ } else {
+ result = factory()->NewCall(result, args, is_eval, pos);
+ }
+ break;
+ }
+
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = scanner().location().beg_pos;
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+ break;
+ }
+
+ default:
+ return result;
+ }
+ }
+}
+
+
+Expression* Parser::ParseNewExpression(bool* ok) {
+ // NewExpression ::
+ // ('new')+ MemberExpression
+
+ // The grammar for new expressions is pretty warped. The keyword
+ // 'new' can either be a part of the new expression (where it isn't
+ // followed by an argument list) or a part of the member expression,
+ // where it must be followed by an argument list. To accommodate
+ // this, we parse the 'new' keywords greedily and keep track of how
+ // many we have parsed. This information is then passed on to the
+ // member expression parser, which is only allowed to match argument
+ // lists as long as it has 'new' prefixes left
+ List<int> new_positions(4);
+ while (peek() == Token::NEW) {
+ Consume(Token::NEW);
+ new_positions.Add(scanner().location().beg_pos);
+ }
+ ASSERT(new_positions.length() > 0);
+
+ Expression* result =
+ ParseMemberWithNewPrefixesExpression(&new_positions, CHECK_OK);
+ while (!new_positions.is_empty()) {
+ int last = new_positions.RemoveLast();
+ result = NEW(CallNew(result, new ZoneList<Expression*>(0), last));
+ }
+ return result;
+}
+
+
+Expression* Parser::ParseMemberExpression(bool* ok) {
+ static List<int> new_positions(0);
+ return ParseMemberWithNewPrefixesExpression(&new_positions, ok);
+}
+
+
+Expression* Parser::ParseMemberWithNewPrefixesExpression(
+ List<int>* new_positions,
+ bool* ok) {
+ // MemberExpression ::
+ // (PrimaryExpression | FunctionLiteral)
+ // ('[' Expression ']' | '.' Identifier | Arguments)*
+
+ // Parse the initial primary or function expression.
+ Expression* result = NULL;
+ if (peek() == Token::FUNCTION) {
+ Expect(Token::FUNCTION, CHECK_OK);
+ int function_token_position = scanner().location().beg_pos;
+ Handle<String> name;
+ if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
+ result = ParseFunctionLiteral(name, function_token_position,
+ NESTED, CHECK_OK);
+ } else {
+ result = ParsePrimaryExpression(CHECK_OK);
+ }
+
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = scanner().location().beg_pos;
+ Expression* index = ParseExpression(true, CHECK_OK);
+ result = factory()->NewProperty(result, index, pos);
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = scanner().location().beg_pos;
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+ break;
+ }
+ case Token::LPAREN: {
+ if (new_positions->is_empty()) return result;
+ // Consume one of the new prefixes (already parsed).
+ ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+ int last = new_positions->RemoveLast();
+ result = NEW(CallNew(result, args, last));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+
+DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
+ // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
+ // contexts this is used as a statement which invokes the debugger as i a
+ // break point is present.
+ // DebuggerStatement ::
+ // 'debugger' ';'
+
+ Expect(Token::DEBUGGER, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ return NEW(DebuggerStatement());
+}
+
+
+void Parser::ReportUnexpectedToken(Token::Value token) {
+ // We don't report stack overflows here, to avoid increasing the
+ // stack depth even further. Instead we report it after parsing is
+ // over, in ParseProgram.
+ if (token == Token::ILLEGAL && scanner().stack_overflow())
+ return;
+ // Four of the tokens are treated specially
+ switch (token) {
+ case Token::EOS:
+ return ReportMessage("unexpected_eos", Vector<const char*>::empty());
+ case Token::NUMBER:
+ return ReportMessage("unexpected_token_number",
+ Vector<const char*>::empty());
+ case Token::STRING:
+ return ReportMessage("unexpected_token_string",
+ Vector<const char*>::empty());
+ case Token::IDENTIFIER:
+ return ReportMessage("unexpected_token_identifier",
+ Vector<const char*>::empty());
+ default:
+ const char* name = Token::String(token);
+ ASSERT(name != NULL);
+ ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
+ }
+}
+
+
+Expression* Parser::ParsePrimaryExpression(bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ Expression* result = NULL;
+ switch (peek()) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ if (is_pre_parsing_) {
+ result = VariableProxySentinel::this_proxy();
+ } else {
+ VariableProxy* recv = top_scope_->receiver();
+ recv->var_uses()->RecordRead(1);
+ result = recv;
+ }
+ break;
+ }
+
+ case Token::NULL_LITERAL:
+ Consume(Token::NULL_LITERAL);
+ result = NEW(Literal(Factory::null_value()));
+ break;
+
+ case Token::TRUE_LITERAL:
+ Consume(Token::TRUE_LITERAL);
+ result = NEW(Literal(Factory::true_value()));
+ break;
+
+ case Token::FALSE_LITERAL:
+ Consume(Token::FALSE_LITERAL);
+ result = NEW(Literal(Factory::false_value()));
+ break;
+
+ case Token::IDENTIFIER: {
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ if (is_pre_parsing_) {
+ result = VariableProxySentinel::identifier_proxy();
+ } else {
+ result = top_scope_->NewUnresolved(name, inside_with());
+ }
+ break;
+ }
+
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ double value =
+ StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+ result = NewNumberLiteral(value);
+ break;
+ }
+
+ case Token::STRING: {
+ Consume(Token::STRING);
+ Handle<String> symbol =
+ factory()->LookupSymbol(scanner_.literal_string(),
+ scanner_.literal_length());
+ result = NEW(Literal(symbol));
+ break;
+ }
+
+ case Token::ASSIGN_DIV:
+ result = ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ result = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ if (allow_natives_syntax_ || extension_ != NULL) {
+ result = ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+ // If we're not allowing special syntax we fall-through to the
+ // default case.
+
+ default: {
+ Token::Value tok = peek();
+ // Token::Peek returns the value of the next token but
+ // location() gives info about the current token.
+ // Therefore, we need to read ahead to the next token
+ Next();
+ ReportUnexpectedToken(tok);
+ *ok = false;
+ return NULL;
+ }
+ }
+
+ return result;
+}
+
+
+Expression* Parser::ParseArrayLiteral(bool* ok) {
+ // ArrayLiteral ::
+ // '[' Expression? (',' Expression?)* ']'
+
+ ZoneListWrapper<Expression> values = factory()->NewList<Expression>(4);
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ Expression* elem;
+ if (peek() == Token::COMMA) {
+ elem = GetLiteralTheHole();
+ } else {
+ elem = ParseAssignmentExpression(true, CHECK_OK);
+ }
+ values.Add(elem);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
+ }
+ }
+ Expect(Token::RBRACK, CHECK_OK);
+ if (values.elements() == NULL) return NULL;
+
+ // Allocate a fixed array with all the literals.
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(values.length(), TENURED);
+
+ // Fill in the literals.
+ for (int i = 0; i < values.length(); i++) {
+ Literal* literal = values.at(i)->AsLiteral();
+ if (literal == NULL) {
+ literals->set_the_hole(i);
+ } else {
+ literals->set(i, *literal->handle());
+ }
+ }
+
+ // Construct the expression for calling Runtime::CreateArray
+ // with the literal array as argument.
+ ZoneList<Expression*>* arguments = new ZoneList<Expression*>(1);
+ arguments->Add(NEW(Literal(literals)));
+ Expression* result =
+ NEW(CallRuntime(Factory::CreateArrayLiteral_symbol(),
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteral),
+ arguments));
+
+ return NEW(ArrayLiteral(literals, result, values.elements()));
+}
+
+
+Expression* Parser::ParseObjectLiteral(bool* ok) {
+ // ObjectLiteral ::
+ // '{' (
+ // ((Identifier | String | Number) ':' AssignmentExpression)
+ // | (('get' | 'set') FunctionLiteral)
+ // )*[','] '}'
+
+ ZoneListWrapper<ObjectLiteral::Property> properties =
+ factory()->NewList<ObjectLiteral::Property>(4);
+ int number_of_constant_properties = 0;
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Literal* key = NULL;
+ switch (peek()) {
+ case Token::IDENTIFIER: {
+ // Store identifier keys as literal symbols to avoid
+ // resolving them when compiling code for the object
+ // literal.
+ bool is_getter = false;
+ bool is_setter = false;
+ Handle<String> id =
+ ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ if (is_getter || is_setter) {
+ // Special handling of getter and setter syntax.
+ if (peek() == Token::IDENTIFIER) {
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ FunctionLiteral* value =
+ ParseFunctionLiteral(name, kNoPosition, DECLARATION, CHECK_OK);
+ ObjectLiteral::Property* property =
+ NEW(ObjectLiteral::Property(is_getter, value));
+ properties.Add(property);
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+ continue; // restart the while
+ }
+ }
+ key = NEW(Literal(id));
+ break;
+ }
+
+ case Token::STRING: {
+ Consume(Token::STRING);
+ Handle<String> string =
+ factory()->LookupSymbol(scanner_.literal_string(),
+ scanner_.literal_length());
+ uint32_t index;
+ if (!string.is_null() && string->AsArrayIndex(&index)) {
+ key = NewNumberLiteral(index);
+ } else {
+ key = NEW(Literal(string));
+ }
+ break;
+ }
+
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ double value =
+ StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+ key = NewNumberLiteral(value);
+ break;
+ }
+
+ default:
+ Expect(Token::RBRACE, CHECK_OK);
+ break;
+ }
+
+ Expect(Token::COLON, CHECK_OK);
+ Expression* value = ParseAssignmentExpression(true, CHECK_OK);
+
+ ObjectLiteral::Property* property =
+ NEW(ObjectLiteral::Property(key, value));
+ if ((property != NULL) &&
+ property->kind() == ObjectLiteral::Property::CONSTANT) {
+ number_of_constant_properties++;
+ }
+ properties.Add(property);
+
+ // TODO(1240767): Consider allowing trailing comma.
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+ // Computation of literal_index must happen before pre parse bailout.
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ if (is_pre_parsing_) return NULL;
+
+ Handle<FixedArray> constant_properties = (number_of_constant_properties == 0)
+ ? Factory::empty_fixed_array()
+ : Factory::NewFixedArray(number_of_constant_properties*2, TENURED);
+ int position = 0;
+ for (int i = 0; i < properties.length(); i++) {
+ ObjectLiteral::Property* property = properties.at(i);
+ if (property->kind() == ObjectLiteral::Property::CONSTANT) {
+ Handle<Object> key = property->key()->handle();
+ Literal* literal = property->value()->AsLiteral();
+ // Add name, value pair to the fixed array.
+ constant_properties->set(position++, *key);
+ constant_properties->set(position++, *literal->handle());
+ }
+ }
+
+ // Construct the expression for calling Runtime::CreateObjectLiteral
+ // with the literal array as argument.
+ ZoneList<Expression*>* arguments = new ZoneList<Expression*>(1);
+ arguments->Add(new Literal(constant_properties));
+ Expression* result =
+ new CallRuntime(
+ Factory::CreateObjectLiteralBoilerplate_symbol(),
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralBoilerplate),
+ arguments);
+ return new ObjectLiteral(constant_properties, result,
+ properties.elements(), literal_index);
+}
+
+
+Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+ if (!scanner_.ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessage("unterminated_regexp", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+
+ if (is_pre_parsing_) {
+ // If we're preparsing we just do all the parsing stuff without
+ // building anything.
+ scanner_.ScanRegExpFlags();
+ Next();
+ return NULL;
+ }
+
+ Handle<String> js_pattern =
+ Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
+ scanner_.ScanRegExpFlags();
+ Handle<String> js_flags =
+ Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
+ Next();
+
+ return new RegExpLiteral(js_pattern, js_flags, literal_index);
+}
+
+
+ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
+ // Arguments ::
+ // '(' (AssignmentExpression)*[','] ')'
+
+ ZoneListWrapper<Expression> result = factory()->NewList<Expression>(4);
+ Expect(Token::LPAREN, CHECK_OK);
+ bool done = (peek() == Token::RPAREN);
+ while (!done) {
+ Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
+ result.Add(argument);
+ done = (peek() == Token::RPAREN);
+ if (!done) Expect(Token::COMMA, CHECK_OK);
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+ return result.elements();
+}
+
+
+FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
+ int function_token_position,
+ FunctionLiteralType type,
+ bool* ok) {
+ // Function ::
+ // '(' FormalParameterList? ')' '{' FunctionBody '}'
+
+ bool is_named = !var_name.is_null();
+
+ // The name associated with this function. If it's a function expression,
+ // this is the actual function name, otherwise this is the name of the
+ // variable declared and initialized with the function (expression). In
+ // that case, we don't have a function name (it's empty).
+ Handle<String> name = is_named ? var_name : factory()->EmptySymbol();
+ // The function name, if any.
+ Handle<String> function_name = factory()->EmptySymbol();
+ if (is_named && (type == EXPRESSION || type == NESTED)) {
+ function_name = name;
+ }
+
+ int num_parameters = 0;
+ // Parse function body.
+ { Scope::Type type = Scope::FUNCTION_SCOPE;
+ Scope* scope = factory()->NewScope(top_scope_, type, inside_with());
+ LexicalScope lexical_scope(this, scope);
+ TemporaryScope temp_scope(this);
+ top_scope_->SetScopeName(name);
+
+ // FormalParameterList ::
+ // '(' (Identifier)*[','] ')'
+ Expect(Token::LPAREN, CHECK_OK);
+ int start_pos = scanner_.location().beg_pos;
+ bool done = (peek() == Token::RPAREN);
+ while (!done) {
+ Handle<String> param_name = ParseIdentifier(CHECK_OK);
+ if (!is_pre_parsing_) {
+ top_scope_->AddParameter(top_scope_->Declare(param_name,
+ Variable::VAR));
+ num_parameters++;
+ }
+ done = (peek() == Token::RPAREN);
+ if (!done) Expect(Token::COMMA, CHECK_OK);
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+
+ Expect(Token::LBRACE, CHECK_OK);
+ ZoneListWrapper<Statement> body = factory()->NewList<Statement>(8);
+
+ // If we have a named function expression, we add a local variable
+ // declaration to the body of the function with the name of the
+ // function and let it refer to the function itself (closure).
+ // NOTE: We create a proxy and resolve it here so that in the
+ // future we can change the AST to only refer to VariableProxies
+ // instead of Variables and Proxis as is the case now.
+ if (!function_name.is_null() && function_name->length() > 0) {
+ Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
+ VariableProxy* fproxy =
+ top_scope_->NewUnresolved(function_name, inside_with());
+ fproxy->BindTo(fvar);
+ body.Add(new ExpressionStatement(
+ new Assignment(Token::INIT_VAR, fproxy,
+ NEW(ThisFunction()), kNoPosition)));
+ }
+
+ // Determine if the function will be lazily compiled. The mode can
+ // only be PARSE_LAZILY if the --lazy flag is true.
+ bool is_lazily_compiled =
+ mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
+
+ int materialized_literal_count;
+ int expected_property_count;
+ if (is_lazily_compiled && pre_data() != NULL) {
+ FunctionEntry entry = pre_data()->GetFunctionEnd(start_pos);
+ int end_pos = entry.end_pos();
+ Counters::total_preparse_skipped.Increment(end_pos - start_pos);
+ scanner_.SeekForward(end_pos);
+ materialized_literal_count = entry.literal_count();
+ expected_property_count = entry.property_count();
+ } else {
+ ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
+ materialized_literal_count = temp_scope.materialized_literal_count();
+ expected_property_count = temp_scope.expected_property_count();
+ }
+
+ Expect(Token::RBRACE, CHECK_OK);
+ int end_pos = scanner_.location().end_pos;
+
+ FunctionEntry entry = log()->LogFunction(start_pos);
+ if (entry.is_valid()) {
+ entry.set_end_pos(end_pos);
+ entry.set_literal_count(materialized_literal_count);
+ entry.set_property_count(expected_property_count);
+ }
+
+ FunctionLiteral *function_literal =
+ NEW(FunctionLiteral(name, top_scope_,
+ body.elements(), materialized_literal_count,
+ expected_property_count,
+ num_parameters, start_pos, end_pos,
+ function_name->length() > 0));
+ if (!is_pre_parsing_) {
+ function_literal->set_function_token_position(function_token_position);
+ }
+ return function_literal;
+ }
+}
+
+
+Expression* Parser::ParseV8Intrinsic(bool* ok) {
+ // CallRuntime ::
+ // '%' Identifier Arguments
+
+ Expect(Token::MOD, CHECK_OK);
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ Runtime::Function* function =
+ Runtime::FunctionForName(scanner_.literal_string());
+ ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+ if (function == NULL && extension_ != NULL) {
+ // The extension structures are only accessible while parsing the
+ // very first time not when reparsing because of lazy compilation.
+ top_scope_->ForceEagerCompilation();
+ }
+
+ // Check for built-in macros.
+ if (!is_pre_parsing_) {
+ if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
+ // %IS_VAR(x)
+ // evaluates to x if x is a variable,
+ // leads to a parse error otherwise
+ if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
+ return args->at(0);
+ }
+ *ok = false;
+ // Check here for other macros.
+ // } else if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
+ // ...
+ }
+
+ if (!*ok) {
+ // We found a macro but it failed.
+ ReportMessage("unable_to_parse", Vector<const char*>::empty());
+ return NULL;
+ }
+ }
+
+ // Otherwise we have a runtime call.
+ return NEW(CallRuntime(name, function, args));
+}
+
+
+void Parser::Consume(Token::Value token) {
+ Token::Value next = Next();
+ USE(next);
+ USE(token);
+ ASSERT(next == token);
+}
+
+
+void Parser::Expect(Token::Value token, bool* ok) {
+ Token::Value next = Next();
+ if (next == token) return;
+ ReportUnexpectedToken(next);
+ *ok = false;
+}
+
+
+void Parser::ExpectSemicolon(bool* ok) {
+ // Check for automatic semicolon insertion according to
+ // the rules given in ECMA-262, section 7.9, page 21.
+ Token::Value tok = peek();
+ if (tok == Token::SEMICOLON) {
+ Next();
+ return;
+ }
+ if (scanner_.has_line_terminator_before_next() ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ return;
+ }
+ Expect(Token::SEMICOLON, ok);
+}
+
+
+Literal* Parser::GetLiteralUndefined() {
+ return NEW(Literal(Factory::undefined_value()));
+}
+
+
+Literal* Parser::GetLiteralTheHole() {
+ return NEW(Literal(Factory::the_hole_value()));
+}
+
+
+Literal* Parser::GetLiteralNumber(double value) {
+ return NewNumberLiteral(value);
+}
+
+
+Handle<String> Parser::ParseIdentifier(bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return Handle<String>();
+ return factory()->LookupSymbol(scanner_.literal_string(),
+ scanner_.literal_length());
+}
+
+// This function reads an identifier and determines whether or not it
+// is 'get' or 'set'. The reason for not using ParseIdentifier and
+// checking on the output is that this involves heap allocation which
+// we can't do during preparsing.
+Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return Handle<String>();
+ if (scanner_.literal_length() == 3) {
+ const char* token = scanner_.literal_string();
+ *is_get = strcmp(token, "get") == 0;
+ *is_set = !*is_get && strcmp(token, "set") == 0;
+ }
+ return factory()->LookupSymbol(scanner_.literal_string(),
+ scanner_.literal_length());
+}
+
+
+// ----------------------------------------------------------------------------
+// Parser support
+
+
+bool Parser::TargetStackContainsLabel(Handle<String> label) {
+ for (int i = target_stack_->length(); i-- > 0;) {
+ BreakableStatement* stat = target_stack_->at(i)->AsBreakableStatement();
+ if (stat != NULL && ContainsLabel(stat->labels(), label))
+ return true;
+ }
+ return false;
+}
+
+
+BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
+ bool anonymous = label.is_null();
+ for (int i = target_stack_->length(); i-- > 0;) {
+ BreakableStatement* stat = target_stack_->at(i)->AsBreakableStatement();
+ if (stat == NULL) continue;
+
+ if ((anonymous && stat->is_target_for_anonymous()) ||
+ (!anonymous && ContainsLabel(stat->labels(), label))) {
+ RegisterLabelUse(stat->break_target(), i);
+ return stat;
+ }
+ }
+ return NULL;
+}
+
+
+IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
+ bool* ok) {
+ bool anonymous = label.is_null();
+ for (int i = target_stack_->length(); i-- > 0;) {
+ IterationStatement* stat = target_stack_->at(i)->AsIterationStatement();
+ if (stat == NULL) continue;
+
+ ASSERT(stat->is_target_for_anonymous());
+ if (anonymous || ContainsLabel(stat->labels(), label)) {
+ RegisterLabelUse(stat->continue_target(), i);
+ return stat;
+ }
+ }
+ return NULL;
+}
+
+
+void Parser::RegisterLabelUse(Label* label, int index) {
+ // Register that a label found at the given index in the target
+ // stack has been used from the top of the target stack. Add the
+ // label to any LabelCollectors passed on the stack.
+ for (int i = target_stack_->length(); i-- > index;) {
+ LabelCollector* collector = target_stack_->at(i)->AsLabelCollector();
+ if (collector != NULL) collector->AddLabel(label);
+ }
+}
+
+
+Literal* Parser::NewNumberLiteral(double number) {
+ return NEW(Literal(Factory::NewNumber(number, TENURED)));
+}
+
+
+Expression* Parser::NewThrowReferenceError(Handle<String> type) {
+ return NewThrowError(Factory::MakeReferenceError_symbol(),
+ type, HandleVector<Object>(NULL, 0));
+}
+
+
+Expression* Parser::NewThrowSyntaxError(Handle<String> type,
+ Handle<Object> first) {
+ int argc = first.is_null() ? 0 : 1;
+ Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
+ return NewThrowError(Factory::MakeSyntaxError_symbol(), type, arguments);
+}
+
+
+Expression* Parser::NewThrowTypeError(Handle<String> type,
+ Handle<Object> first,
+ Handle<Object> second) {
+ ASSERT(!first.is_null() && !second.is_null());
+ Handle<Object> elements[] = { first, second };
+ Vector< Handle<Object> > arguments =
+ HandleVector<Object>(elements, ARRAY_SIZE(elements));
+ return NewThrowError(Factory::MakeTypeError_symbol(), type, arguments);
+}
+
+
+Expression* Parser::NewThrowError(Handle<String> constructor,
+ Handle<String> type,
+ Vector< Handle<Object> > arguments) {
+ if (is_pre_parsing_) return NULL;
+
+ int argc = arguments.length();
+ Handle<JSArray> array = Factory::NewJSArray(argc, TENURED);
+ ASSERT(array->IsJSArray() && array->HasFastElements());
+ for (int i = 0; i < argc; i++) {
+ Handle<Object> element = arguments[i];
+ if (!element.is_null()) {
+ array->SetFastElement(i, *element);
+ }
+ }
+ ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
+ args->Add(new Literal(type));
+ args->Add(new Literal(array));
+ return new Throw(new CallRuntime(constructor, NULL, args),
+ scanner().location().beg_pos);
+}
+
+
+// ----------------------------------------------------------------------------
+// The Parser interface.
+
+// MakeAST() is just a wrapper for the corresponding Parser calls
+// so we don't have to expose the entire Parser class in the .h file.
+
+static bool always_allow_natives_syntax = false;
+
+
+ParserMessage::~ParserMessage() {
+ for (int i = 0; i < args().length(); i++)
+ DeleteArray(args()[i]);
+ DeleteArray(args().start());
+}
+
+
+ScriptDataImpl::~ScriptDataImpl() {
+ store_.Dispose();
+}
+
+
+int ScriptDataImpl::Length() {
+ return store_.length();
+}
+
+
+unsigned* ScriptDataImpl::Data() {
+ return store_.start();
+}
+
+
+ScriptDataImpl* PreParse(unibrow::CharacterStream* stream,
+ v8::Extension* extension) {
+ Handle<Script> no_script;
+ bool allow_natives_syntax =
+ always_allow_natives_syntax ||
+ FLAG_allow_natives_syntax ||
+ Bootstrapper::IsActive();
+ PreParser parser(no_script, allow_natives_syntax, extension);
+ if (!parser.PreParseProgram(stream)) return NULL;
+ // The list owns the backing store so we need to clone the vector.
+ // That way, the result will be exactly the right size rather than
+ // the expected 50% too large.
+ Vector<unsigned> store = parser.recorder()->store()->ToVector().Clone();
+ return new ScriptDataImpl(store);
+}
+
+
+FunctionLiteral* MakeAST(bool compile_in_global_context,
+ Handle<Script> script,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data) {
+ bool allow_natives_syntax =
+ always_allow_natives_syntax ||
+ FLAG_allow_natives_syntax ||
+ Bootstrapper::IsActive();
+ AstBuildingParser parser(script, allow_natives_syntax, extension, pre_data);
+ if (pre_data != NULL && pre_data->has_error()) {
+ Scanner::Location loc = pre_data->MessageLocation();
+ const char* message = pre_data->BuildMessage();
+ Vector<const char*> args = pre_data->BuildArgs();
+ parser.ReportMessageAt(loc, message, args);
+ DeleteArray(message);
+ for (int i = 0; i < args.length(); i++)
+ DeleteArray(args[i]);
+ DeleteArray(args.start());
+ return NULL;
+ }
+ Handle<String> source = Handle<String>(String::cast(script->source()));
+ SafeStringInputBuffer input(source.location());
+ FunctionLiteral* result = parser.ParseProgram(source,
+ &input, compile_in_global_context);
+ return result;
+}
+
+
+FunctionLiteral* MakeLazyAST(Handle<Script> script,
+ Handle<String> name,
+ int start_position,
+ int end_position,
+ bool is_expression) {
+ bool allow_natives_syntax_before = always_allow_natives_syntax;
+ always_allow_natives_syntax = true;
+ AstBuildingParser parser(script, true, NULL, NULL); // always allow
+ always_allow_natives_syntax = allow_natives_syntax_before;
+ // Parse the function by pulling the function source from the script source.
+ Handle<String> script_source(String::cast(script->source()));
+ FunctionLiteral* result =
+ parser.ParseLazy(SubString(script_source, start_position, end_position),
+ name,
+ start_position,
+ is_expression);
+ return result;
+}
+
+
+#undef NEW
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PARSER_H_
+#define V8_PARSER_H_
+
+#include "scanner.h"
+
+namespace v8 { namespace internal {
+
+
+class ParserMessage : public Malloced {
+ public:
+ ParserMessage(Scanner::Location loc, const char* message,
+ Vector<const char*> args)
+ : loc_(loc),
+ message_(message),
+ args_(args) { }
+ ~ParserMessage();
+ Scanner::Location location() { return loc_; }
+ const char* message() { return message_; }
+ Vector<const char*> args() { return args_; }
+ private:
+ Scanner::Location loc_;
+ const char* message_;
+ Vector<const char*> args_;
+};
+
+
+class FunctionEntry BASE_EMBEDDED {
+ public:
+ explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
+ FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
+
+ int start_pos() { return backing_[kStartPosOffset]; }
+ void set_start_pos(int value) { backing_[kStartPosOffset] = value; }
+
+ int end_pos() { return backing_[kEndPosOffset]; }
+ void set_end_pos(int value) { backing_[kEndPosOffset] = value; }
+
+ int literal_count() { return backing_[kLiteralCountOffset]; }
+ void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
+
+ int property_count() { return backing_[kPropertyCountOffset]; }
+ void set_property_count(int value) { backing_[kPropertyCountOffset] = value; }
+
+ bool is_valid() { return backing_.length() > 0; }
+
+ static const int kSize = 4;
+
+ private:
+ Vector<unsigned> backing_;
+ static const int kStartPosOffset = 0;
+ static const int kEndPosOffset = 1;
+ static const int kLiteralCountOffset = 2;
+ static const int kPropertyCountOffset = 3;
+};
+
+
+class ScriptDataImpl : public ScriptData {
+ public:
+ explicit ScriptDataImpl(Vector<unsigned> store)
+ : store_(store),
+ last_entry_(0) { }
+ virtual ~ScriptDataImpl();
+ virtual int Length();
+ virtual unsigned* Data();
+ FunctionEntry GetFunctionEnd(int start);
+ bool SanityCheck();
+
+ Scanner::Location MessageLocation();
+ const char* BuildMessage();
+ Vector<const char*> BuildArgs();
+
+ bool has_error() { return store_[kHasErrorOffset]; }
+ unsigned magic() { return store_[kMagicOffset]; }
+ unsigned version() { return store_[kVersionOffset]; }
+
+ static const unsigned kMagicNumber = 0xBadDead;
+ static const unsigned kCurrentVersion = 1;
+
+ static const unsigned kMagicOffset = 0;
+ static const unsigned kVersionOffset = 1;
+ static const unsigned kHasErrorOffset = 2;
+ static const unsigned kSizeOffset = 3;
+ static const unsigned kHeaderSize = 4;
+
+ private:
+ unsigned Read(int position);
+ unsigned* ReadAddress(int position);
+ int EntryCount();
+ FunctionEntry nth(int n);
+
+ Vector<unsigned> store_;
+
+ // The last entry returned. This is used to make lookup faster:
+ // the next entry to return is typically the next entry so lookup
+ // will usually be much faster if we start from the last entry.
+ int last_entry_;
+};
+
+
+// The parser: Takes a script and and context information, and builds a
+// FunctionLiteral AST node. Returns NULL and deallocates any allocated
+// AST nodes if parsing failed.
+FunctionLiteral* MakeAST(bool compile_in_global_context,
+ Handle<Script> script,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data);
+
+
+ScriptDataImpl* PreParse(unibrow::CharacterStream* stream,
+ v8::Extension* extension);
+
+
+// Support for doing lazy compilation. The script is the script containing full
+// source of the script where the function is declared. The start_position and
+// end_position specifies the part of the script source which has the source
+// for the function decleration in the form:
+//
+// (<formal parameters>) { <function body> }
+//
+// without any function keyword or name.
+//
+FunctionLiteral* MakeLazyAST(Handle<Script> script,
+ Handle<String> name,
+ int start_position,
+ int end_position,
+ bool is_expression);
+
+} } // namespace v8::internal
+
+#endif // V8_PARSER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Linux goes here
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <stdlib.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/fcntl.h> // open
+#include <unistd.h> // getpagesize
+#include <execinfo.h> // backtrace, backtrace_symbols
+#include <errno.h>
+#include <stdarg.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 { namespace internal {
+
+// 0 is never a valid thread id on Linux since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ return ceil(x);
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ srandom(static_cast<unsigned int>(TimeCurrentMillis()));
+}
+
+
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
+ *secs = usage.ru_utime.tv_sec;
+ *usecs = usage.ru_utime.tv_usec;
+ return 0;
+}
+
+
+double OS::TimeCurrentMillis() {
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) < 0) return 0.0;
+ return (static_cast<double>(tv.tv_sec) * 1000) +
+ (static_cast<double>(tv.tv_usec) / 1000);
+}
+
+
+int64_t OS::Ticks() {
+ // Linux's gettimeofday has microsecond resolution.
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) < 0)
+ return 0;
+ return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+
+char* OS::LocalTimezone(double time) {
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ return const_cast<char*>(t->tm_zone);
+}
+
+
+double OS::DaylightSavingsOffset(double time) {
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ return t->tm_isdst ? 3600 * msPerSecond : 0;
+}
+
+
+double OS::LocalTimeOffset() {
+ // 1199174400 = Jan 1 2008 (UTC).
+ // Random date where daylight savings time is not in effect.
+ static const int kJan1st2008 = 1199174400;
+ time_t tv = static_cast<time_t>(kJan1st2008);
+ struct tm* t = localtime(&tv);
+ ASSERT(t->tm_isdst <= 0);
+ return static_cast<double>(t->tm_gmtoff * msPerSecond);
+}
+
+
+void OS::Print(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrint(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+ vprintf(format, args);
+}
+
+
+void OS::PrintError(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrintError(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+ vfprintf(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = VSNPrintF(str, size, format, args);
+ va_end(args);
+ return result;
+}
+
+
+int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
+ return vsnprintf(str, size, format, args); // forward to linux.
+}
+
+
+double OS::nan_value() { return NAN; }
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return getpagesize();
+}
+
+
+void* OS::Allocate(const size_t requested, size_t* allocated) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ void* mbase = mmap(NULL, msize, PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ munmap(buf, length);
+}
+
+
+void OS::Sleep(int milliseconds) {
+ unsigned int ms = static_cast<unsigned int>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ fwrite(initial, size, 1, file);
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static unsigned StringToLongLong(char* buffer) {
+ return static_cast<unsigned>(strtoll(buffer, NULL, 16));
+}
+
+#endif
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ read(fd, addr_buffer + 2, 8);
+ unsigned start = StringToLongLong(addr_buffer);
+ read(fd, addr_buffer + 2, 1);
+ if (addr_buffer[2] != '-') return;
+ read(fd, addr_buffer + 2, 8);
+ unsigned end = StringToLongLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ int bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read > MAP_LENGTH - 1)
+ break;
+ int result = read(fd, buffer + bytes_read, 1);
+ // A read error means that -1 is returned.
+ if (result < 1) return;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // There are 56 chars to ignore at this point in the line.
+ if (bytes_read < 56) continue;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ buffer[bytes_read] = 0;
+ LOG(SharedLibraryEvent(buffer + 56, start, end));
+ }
+#endif
+}
+
+
+int OS::StackWalk(OS::StackFrame* frames, int frames_size) {
+ void** addresses = NewArray<void*>(frames_size);
+
+ int frames_count = backtrace(addresses, frames_size);
+
+ char** symbols;
+ symbols = backtrace_symbols(addresses, frames_count);
+ if (symbols == NULL) {
+ DeleteArray(addresses);
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(frames[i].text, kStackWalkMaxTextLen, "%s", symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ DeleteArray(addresses);
+ free(symbols);
+
+ return frames_count;
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
+ address_ = mmap(address_hint, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size) {
+ if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class LinuxMutex : public Mutex {
+ public:
+
+ LinuxMutex() {
+ pthread_mutexattr_t attrs;
+ int result = pthread_mutexattr_init(&attrs);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT(result == 0);
+ result = pthread_mutex_init(&mutex_, &attrs);
+ ASSERT(result == 0);
+ }
+
+ virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
+
+ virtual int Lock() {
+ int result = pthread_mutex_lock(&mutex_);
+ return result;
+ }
+
+ virtual int Unlock() {
+ int result = pthread_mutex_unlock(&mutex_);
+ return result;
+ }
+
+ private:
+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new LinuxMutex();
+}
+
+
+class LinuxSemaphore : public Semaphore {
+ public:
+ explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
+ virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
+
+ virtual void Wait() { sem_wait(&sem_); }
+
+ virtual void Signal() { sem_post(&sem_); }
+
+ private:
+ sem_t sem_;
+};
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new LinuxSemaphore(count);
+}
+
+// TODO(1233584): Implement Linux support.
+Select::Select(int len, Semaphore** sems) {
+ FATAL("Not implemented");
+}
+
+
+Select::~Select() {
+ FATAL("Not implemented");
+}
+
+
+int Select::WaitSingle() {
+ FATAL("Not implemented");
+ return 0;
+}
+
+
+void Select::WaitAll() {
+ FATAL("Not implemented");
+}
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static ProfileSampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ TickSample sample;
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#if defined (__arm__) || defined(__thumb__)
+ sample.pc = mcontext.gregs[R15];
+ sample.sp = mcontext.gregs[R13];
+#else
+ sample.pc = mcontext.gregs[REG_EIP];
+ sample.sp = mcontext.gregs[REG_ESP];
+#endif
+ sample.state = Logger::state();
+
+ if (active_sampler_ == NULL) return;
+ active_sampler_->Tick(&sample);
+}
+
+
+class ProfileSampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+ProfileSampler::ProfileSampler(int interval) {
+ data_ = new PlatformData();
+ interval_ = interval;
+ active_ = false;
+}
+
+
+ProfileSampler::~ProfileSampler() {
+ delete data_;
+}
+
+
+void ProfileSampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void ProfileSampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for MacOS goes here
+
+#include <ucontext.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <mach/mach_init.h>
+
+#include <AvailabilityMacros.h>
+
+#ifdef MAC_OS_X_VERSION_10_5
+# include <execinfo.h> // backtrace, backtrace_symbols
+#endif
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+// 0 is never a valid thread id on MacOSX since a ptread_t is
+// a pointer.
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ // Correct Mac OS X Leopard 'ceil' behavior.
+ if (-1.0 < x && x < 0.0) {
+ return -0.0;
+ } else {
+ return ceil(x);
+ }
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ srandom(static_cast<unsigned int>(TimeCurrentMillis()));
+}
+
+
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
+ *secs = usage.ru_utime.tv_sec;
+ *usecs = usage.ru_utime.tv_usec;
+ return 0;
+}
+
+
+double OS::TimeCurrentMillis() {
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) < 0) return 0.0;
+ return (static_cast<double>(tv.tv_sec) * 1000) +
+ (static_cast<double>(tv.tv_usec) / 1000);
+}
+
+
+int64_t OS::Ticks() {
+ // Mac OS's gettimeofday has microsecond resolution.
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) < 0)
+ return 0;
+ return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+
+char* OS::LocalTimezone(double time) {
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ return const_cast<char*>(t->tm_zone);
+}
+
+
+double OS::DaylightSavingsOffset(double time) {
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ return t->tm_isdst ? 3600 * msPerSecond : 0;
+}
+
+
+double OS::LocalTimeOffset() {
+ // 1199174400 = Jan 1 2008 (UTC).
+ // Random date where daylight savings time is not in effect.
+ static const int kJan1st2008 = 1199174400;
+ time_t tv = static_cast<time_t>(kJan1st2008);
+ struct tm* t = localtime(&tv);
+ ASSERT(t->tm_isdst <= 0);
+ return static_cast<double>(t->tm_gmtoff * msPerSecond);
+}
+
+
+void OS::Print(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrint(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+ vprintf(format, args);
+}
+
+
+void OS::PrintError(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrintError(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+ vfprintf(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = VSNPrintF(str, size, format, args);
+ va_end(args);
+ return result;
+}
+
+
+int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
+ return vsnprintf(str, size, format, args); // forward to Mac OS X.
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return kPointerSize;
+}
+
+
+void* OS::Allocate(const size_t requested, size_t* allocated) {
+ *allocated = requested;
+ void* mbase = malloc(requested);
+ UpdateAllocatedSpaceLimits(mbase, requested);
+ return mbase;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+ free(buf);
+ USE(length);
+}
+
+
+void OS::Sleep(int miliseconds) {
+ usleep(1000 * miliseconds);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination
+ abort();
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ fwrite(initial, size, 1, file);
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+ // TODO(1233579): Implement.
+}
+
+
+double OS::nan_value() { return NAN; }
+
+
+int OS::StackWalk(StackFrame* frames, int frames_size) {
+#ifndef MAC_OS_X_VERSION_10_5
+ return 0;
+#else
+ void** addresses = NewArray<void*>(frames_size);
+ int frames_count = backtrace(addresses, frames_size);
+
+ char** symbols;
+ symbols = backtrace_symbols(addresses, frames_count);
+ if (symbols == NULL) {
+ DeleteArray(addresses);
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(frames[i].text, kStackWalkMaxTextLen, "%s", symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ DeleteArray(addresses);
+ free(symbols);
+
+ return frames_count;
+#endif
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
+ address_ = mmap(address_hint, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size) {
+ if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class MacOSMutex : public Mutex {
+ public:
+
+ MacOSMutex() {
+ // For some reason the compiler doesn't allow you to write
+ // "this->mutex_ = PTHREAD_..." directly on mac.
+ pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&m, &attr);
+ mutex_ = m;
+ }
+
+ ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
+
+ int Lock() { return pthread_mutex_lock(&mutex_); }
+
+ int Unlock() { return pthread_mutex_unlock(&mutex_); }
+
+ private:
+ pthread_mutex_t mutex_;
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new MacOSMutex();
+}
+
+
+class MacOSSemaphore : public Semaphore {
+ public:
+ explicit MacOSSemaphore(int count) {
+ semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
+ }
+
+ ~MacOSSemaphore() {
+ semaphore_destroy(mach_task_self(), semaphore_);
+ }
+
+ void Wait() { semaphore_wait(semaphore_); }
+
+ void Signal() { semaphore_signal(semaphore_); }
+
+ private:
+ semaphore_t semaphore_;
+};
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new MacOSSemaphore(count);
+}
+
+
+// TODO(1233584): Implement MacOS support.
+Select::Select(int len, Semaphore** sems) {
+ FATAL("Not implemented");
+}
+
+
+Select::~Select() {
+ FATAL("Not implemented");
+}
+
+
+int Select::WaitSingle() {
+ FATAL("Not implemented");
+ return 0;
+}
+
+
+void Select::WaitAll() {
+ FATAL("Not implemented");
+}
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static ProfileSampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ TickSample sample;
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#if __DARWIN_UNIX03
+ sample.pc = mcontext->__ss.__eip;
+ sample.sp = mcontext->__ss.__esp;
+#else // !__DARWIN_UNIX03
+ sample.pc = mcontext->ss.eip;
+ sample.sp = mcontext->ss.esp;
+#endif // __DARWIN_UNIX03
+ sample.state = Logger::state();
+
+ if (active_sampler_ == NULL) return;
+ active_sampler_->Tick(&sample);
+}
+
+
+class ProfileSampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+ProfileSampler::ProfileSampler(int interval) {
+ data_ = new PlatformData();
+ interval_ = interval;
+ active_ = false;
+}
+
+
+ProfileSampler::~ProfileSampler() {
+ delete data_;
+}
+
+
+void ProfileSampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void ProfileSampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Win32.
+#ifndef WIN32_LEAN_AND_MEAN
+// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
+#define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#ifndef NOKERNEL
+#define NOKERNEL
+#endif
+#ifndef NOUSER
+#define NOUSER
+#endif
+#ifndef NOSERVICE
+#define NOSERVICE
+#endif
+#ifndef NOSOUND
+#define NOSOUND
+#endif
+#ifndef NOMCX
+#define NOMCX
+#endif
+
+#include <windows.h>
+
+#include <mmsystem.h> // For timeGetTime().
+#include <dbghelp.h> // For SymLoadModule64 and al.
+#include <tlhelp32.h> // For Module32First and al.
+
+// These aditional WIN32 includes have to be right here as the #undef's below
+// makes it impossible to have them elsewhere.
+#include <winsock2.h>
+#include <process.h> // for _beginthreadex()
+#include <stdlib.h>
+
+#pragma comment(lib, "winmm.lib") // force linkage with winmm.
+
+#undef VOID
+#undef DELETE
+#undef IN
+#undef THIS
+#undef CONST
+#undef NAN
+#undef GetObject
+#undef CreateMutex
+#undef CreateSemaphore
+
+#include "v8.h"
+
+#include "platform.h"
+
+// Extra POSIX/ANSI routines for Win32. Please refer to The Open Group Base
+// Specification for specification of the correct semantics for these
+// functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+
+// Test for finite value - usually defined in math.h
+namespace v8 {
+namespace internal {
+
+int isfinite(double x) {
+ return _finite(x);
+}
+
+} // namespace v8
+} // namespace internal
+
+// Test for a NaN (not a number) value - usually defined in math.h
+int isnan(double x) {
+ return _isnan(x);
+}
+
+
+// Test for infinity - usually defined in math.h
+int isinf(double x) {
+ return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
+}
+
+
+// Test if x is less than y and both nominal - usually defined in math.h
+int isless(double x, double y) {
+ return isnan(x) || isnan(y) ? 0 : x < y;
+}
+
+
+// Test if x is greater than y and both nominal - usually defined in math.h
+int isgreater(double x, double y) {
+ return isnan(x) || isnan(y) ? 0 : x > y;
+}
+
+
+// Classify floating point number - usually defined in math.h
+int fpclassify(double x) {
+ // Use the MS-specific _fpclass() for classification.
+ int flags = _fpclass(x);
+
+ // Determine class. We cannot use a switch statement because
+ // the _FPCLASS_ constants are defined as flags.
+ if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
+ if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
+ if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
+ if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
+
+ // All cases should be covered by the code above.
+ ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
+ return FP_NAN;
+}
+
+
+// Test sign - usually defined in math.h
+int signbit(double x) {
+ // We need to take care of the special case of both positive
+ // and negative versions of zero.
+ if (x == 0)
+ return _fpclass(x) & _FPCLASS_NZ;
+ else
+ return x < 0;
+}
+
+
+// Generate a pseudo-random number in the range 0-2^31-1. Usually
+// defined in stdlib.h
+int random() {
+ return rand();
+}
+
+
+// Case-insensitive string comparisons. Use stricmp() on Win32. Usually defined
+// in strings.h.
+int strcasecmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+
+
+// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
+// defined in strings.h.
+int strncasecmp(const char* s1, const char* s2, int n) {
+ return strnicmp(s1, s2, n);
+}
+
+namespace v8 { namespace internal {
+
+double ceiling(double x) {
+ return ceil(x);
+}
+
+// ----------------------------------------------------------------------------
+// The Time class represents time on win32. A timestamp is represented as
+// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
+// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
+// January 1, 1970.
+
+class Time {
+ public:
+ // Constructors.
+ Time();
+ explicit Time(double jstime);
+ Time(int year, int mon, int day, int hour, int min, int sec);
+
+ // Convert timestamp to JavaScript representation.
+ double ToJSTime();
+
+ // Set timestamp to current time.
+ void SetToCurrentTime();
+
+ // Returns the local timezone offset in milliseconds east of UTC. This is
+ // the number of milliseconds you must add to UTC to get local time, i.e.
+ // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
+ // routine also takes into account whether daylight saving is effect
+ // at the time.
+ int64_t LocalOffset();
+
+ // Returns the daylight savings time offset for the time in milliseconds.
+ int64_t DaylightSavingsOffset();
+
+ // Returns a string identifying the current timezone for the
+ // timestamp taking into account daylight saving.
+ char* LocalTimezone();
+
+ private:
+ // Constants for time conversion.
+ static const int64_t kTimeEpoc = 116444736000000000;
+ static const int64_t kTimeScaler = 10000;
+ static const int64_t kMsPerMinute = 60000;
+
+ // Constants for timezone information.
+ static const int kTzNameSize = 128;
+ static const bool kShortTzNames = false;
+
+ // Timezone information. We need to have static buffers for the
+ // timezone names because we return pointers to these in
+ // LocalTimezone().
+ static bool tz_initialized_;
+ static TIME_ZONE_INFORMATION tzinfo_;
+ static char std_tz_name_[kTzNameSize];
+ static char dst_tz_name_[kTzNameSize];
+
+ // Initialize the timezone information (if not already done).
+ static void TzSet();
+
+ // Guess the name of the timezone from the bias.
+ static const char* GuessTimezoneNameFromBias(int bias);
+
+ // Return whether or not daylight savings time is in effect at this time.
+ bool InDST();
+
+ // Return the difference (in milliseconds) between this timestamp and
+ // another timestamp.
+ int64_t Diff(Time* other);
+
+ // Accessor for FILETIME representation.
+ FILETIME& ft() { return time_.ft_; }
+
+ // Accessor for integer representation.
+ int64_t& t() { return time_.t_; }
+
+ // Although win32 uses 64-bit integers for representing timestamps,
+ // these are packed into a FILETIME structure. The FILETIME structure
+ // is just a struct representing a 64-bit integer. The TimeStamp union
+ // allows access to both a FILETIME and an integer representation of
+ // the timestamp.
+ union TimeStamp {
+ FILETIME ft_;
+ int64_t t_;
+ };
+
+ TimeStamp time_;
+};
+
+// Static variables.
+bool Time::tz_initialized_ = false;
+TIME_ZONE_INFORMATION Time::tzinfo_;
+char Time::std_tz_name_[kTzNameSize];
+char Time::dst_tz_name_[kTzNameSize];
+
+
+// Initialize timestamp to start of epoc.
+Time::Time() {
+ t() = 0;
+}
+
+
+// Initialize timestamp from a JavaScript timestamp.
+Time::Time(double jstime) {
+ t() = static_cast<uint64_t>(jstime) * kTimeScaler + kTimeEpoc;
+}
+
+
+// Initialize timestamp from date/time components.
+Time::Time(int year, int mon, int day, int hour, int min, int sec) {
+ SYSTEMTIME st;
+ st.wYear = year;
+ st.wMonth = mon;
+ st.wDay = day;
+ st.wHour = hour;
+ st.wMinute = min;
+ st.wSecond = sec;
+ st.wMilliseconds = 0;
+ SystemTimeToFileTime(&st, &ft());
+}
+
+
+// Convert timestamp to JavaScript timestamp.
+double Time::ToJSTime() {
+ return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
+}
+
+
+// Guess the name of the timezone from the bias.
+// The guess is very biased towards the northern hemisphere.
+const char* Time::GuessTimezoneNameFromBias(int bias) {
+ static const int kHour = 60;
+ switch (-bias) {
+ case -9*kHour: return "Alaska";
+ case -8*kHour: return "Pacific";
+ case -7*kHour: return "Mountain";
+ case -6*kHour: return "Central";
+ case -5*kHour: return "Eastern";
+ case -4*kHour: return "Atlantic";
+ case 0*kHour: return "GMT";
+ case +1*kHour: return "Central Europe";
+ case +2*kHour: return "Eastern Europe";
+ case +3*kHour: return "Russia";
+ case +5*kHour + 30: return "India";
+ case +8*kHour: return "China";
+ case +9*kHour: return "Japan";
+ case +12*kHour: return "New Zealand";
+ default: return "Local";
+ }
+}
+
+
+// Initialize timezone information. The timezone information is obtained from
+// windows. If we cannot get the timezone information we fall back to CET.
+// Please notice that this code is not thread-safe.
+void Time::TzSet() {
+ // Just return if timezone information has already been initialized.
+ if (tz_initialized_) return;
+
+ // Obtain timezone information from operating system.
+ memset(&tzinfo_, 0, sizeof(tzinfo_));
+ if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+ // If we cannot get timezone information we fall back to CET.
+ tzinfo_.Bias = -60;
+ tzinfo_.StandardDate.wMonth = 10;
+ tzinfo_.StandardDate.wDay = 5;
+ tzinfo_.StandardDate.wHour = 3;
+ tzinfo_.StandardBias = 0;
+ tzinfo_.DaylightDate.wMonth = 3;
+ tzinfo_.DaylightDate.wDay = 5;
+ tzinfo_.DaylightDate.wHour = 2;
+ tzinfo_.DaylightBias = -60;
+ }
+
+ // Make standard and DST timezone names.
+ _snprintf(std_tz_name_, kTzNameSize, "%S", tzinfo_.StandardName);
+ std_tz_name_[kTzNameSize - 1] = '\0';
+ _snprintf(dst_tz_name_, kTzNameSize, "%S", tzinfo_.DaylightName);
+ dst_tz_name_[kTzNameSize - 1] = '\0';
+
+ // If OS returned empty string or resource id (like "@tzres.dll,-211")
+ // simply guess the name from the UTC bias of the timezone.
+ // To properly resolve the resource identifier requires a library load,
+ // which is not possible in a sandbox.
+ if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+ _snprintf(std_tz_name_, kTzNameSize - 1, "%s Standard Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+ _snprintf(dst_tz_name_, kTzNameSize - 1, "%s Daylight Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+
+ // Timezone information initialized.
+ tz_initialized_ = true;
+}
+
+
+// Return the difference in milliseconds between this and another timestamp.
+int64_t Time::Diff(Time* other) {
+ return (t() - other->t()) / kTimeScaler;
+}
+
+
+// Set timestamp to current time.
+void Time::SetToCurrentTime() {
+ // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+ // Because we're fast, we like fast timers which have at least a
+ // 1ms resolution.
+ //
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for v8 wants fast
+ // timers, it can use timeBeginPeriod to increase the resolution.
+ //
+ // Using timeGetTime() has a drawback because it is a 32bit value
+ // and hence rolls-over every ~49days.
+ //
+ // To use the clock, we use GetSystemTimeAsFileTime as our base;
+ // and then use timeGetTime to extrapolate current time from the
+ // start time. To deal with rollovers, we resync the clock
+ // any time when more than kMaxClockElapsedTime has passed or
+ // whenever timeGetTime creates a rollover.
+
+ static bool initialized = false;
+ static TimeStamp init_time;
+ static DWORD init_ticks;
+ static const int kHundredNanosecondsPerSecond = 10000;
+ static const int kMaxClockElapsedTime =
+ 60*60*24*kHundredNanosecondsPerSecond; // 1 day
+
+ // If we are uninitialized, we need to resync the clock.
+ bool needs_resync = !initialized;
+
+ // Get the current time.
+ TimeStamp time_now;
+ GetSystemTimeAsFileTime(&time_now.ft_);
+ DWORD ticks_now = timeGetTime();
+
+ // Check if we need to resync due to clock rollover.
+ needs_resync |= ticks_now < init_ticks;
+
+ // Check if we need to resync due to elapsed time.
+ needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+ // Resync the clock if necessary.
+ if (needs_resync) {
+ GetSystemTimeAsFileTime(&init_time.ft_);
+ init_ticks = ticks_now = timeGetTime();
+ initialized = true;
+ }
+
+ // Finally, compute the actual time. Why is this so hard.
+ DWORD elapsed = ticks_now - init_ticks;
+ this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
+// Return the local timezone offset in milliseconds east of UTC. This
+// takes into account whether daylight saving is in effect at the time.
+int64_t Time::LocalOffset() {
+ // Initialize timezone information, if needed.
+ TzSet();
+
+ // Convert timestamp to date/time components. These are now in UTC
+ // format. NB: Please do not replace the following three calls with one
+ // call to FileTimeToLocalFileTime(), because it does not handle
+ // daylight saving correctly.
+ SYSTEMTIME utc;
+ FileTimeToSystemTime(&ft(), &utc);
+
+ // Convert to local time, using timezone information.
+ SYSTEMTIME local;
+ SystemTimeToTzSpecificLocalTime(&tzinfo_, &utc, &local);
+
+ // Convert local time back to a timestamp. This timestamp now
+ // has a bias similar to the local timezone bias in effect
+ // at the time of the original timestamp.
+ Time localtime;
+ SystemTimeToFileTime(&local, &localtime.ft());
+
+ // The difference between the new local timestamp and the original
+ // timestamp and is the local timezone offset.
+ return localtime.Diff(this);
+}
+
+
+// Return whether or not daylight savings time is in effect at this time.
+bool Time::InDST() {
+ // Initialize timezone information, if needed.
+ TzSet();
+
+ // Determine if DST is in effect at the specified time.
+ bool in_dst = false;
+ if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
+ // Get the local timezone offset for the timestamp in milliseconds.
+ int64_t offset = LocalOffset();
+
+ // Compute the offset for DST. The bias parameters in the timezone info
+ // are specified in minutes. These must be converted to milliseconds.
+ int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
+
+ // If the local time offset equals the timezone bias plus the daylight
+ // bias then DST is in effect.
+ in_dst = offset == dstofs;
+ }
+
+ return in_dst;
+}
+
+
+// Return the dalight savings time offset for this time.
+int64_t Time::DaylightSavingsOffset() {
+ return InDST() ? 60 * kMsPerMinute : 0;
+}
+
+
+// Returns a string identifying the current timezone for the
+// timestamp taking into account daylight saving.
+char* Time::LocalTimezone() {
+ // Return the standard or DST time zone name based on whether daylight
+ // saving is in effect at the given time.
+ return InDST() ? dst_tz_name_ : std_tz_name_;
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ srand(static_cast<unsigned int>(TimeCurrentMillis()));
+}
+
+
+// Returns the accumulated user time for thread.
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ FILETIME dummy;
+ uint64_t usertime;
+
+ // Get the amount of time that the thread has executed in user mode.
+ if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
+ reinterpret_cast<FILETIME*>(&usertime))) return -1;
+
+ // Adjust the resolution to micro-seconds.
+ usertime /= 10;
+
+ // Convert to seconds and microseconds
+ *secs = static_cast<uint32_t>(usertime / 1000000);
+ *usecs = static_cast<uint32_t>(usertime % 1000000);
+ return 0;
+}
+
+
+// Returns current time as the number of milliseconds since
+// 00:00:00 UTC, January 1, 1970.
+double OS::TimeCurrentMillis() {
+ Time t;
+ t.SetToCurrentTime();
+ return t.ToJSTime();
+}
+
+// Returns the tickcounter based on timeGetTime.
+int64_t OS::Ticks() {
+ return timeGetTime() * 1000; // Convert to microseconds.
+}
+
+
+// Returns a string identifying the current timezone taking into
+// account daylight saving.
+char* OS::LocalTimezone(double time) {
+ return Time(time).LocalTimezone();
+}
+
+
+// Returns the local time offset in milliseconds east of UTC.
+double OS::LocalTimeOffset() {
+ // 1199174400 = Jan 1 2008 (UTC).
+ // Random date where daylight savings time is not in effect.
+ int64_t offset = Time(1199174400).LocalOffset();
+ return static_cast<double>(offset);
+}
+
+
+// Returns the daylight savings offset in milliseconds for the given
+// time.
+double OS::DaylightSavingsOffset(double time) {
+ int64_t offset = Time(time).DaylightSavingsOffset();
+ return static_cast<double>(offset);
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 console output.
+//
+// If a Win32 application is linked as a console application it has a normal
+// standard output and standard error. In this case normal printf works fine
+// for output. However, if the application is linked as a GUI application,
+// the process doesn't have a console, and therefore (debugging) output is lost.
+// This is the case if we are embedded in a windows program (like a browser).
+// In order to be able to get debug output in this case the the debugging
+// facility using OutputDebugString. This output goes to the active debugger
+// for the process (if any). Else the output can be monitored using DBMON.EXE.
+
+enum OutputMode {
+ UNKNOWN, // Output method has not yet been determined.
+ CONSOLE, // Output is written to stdout.
+ ODS // Output is written to debug facility.
+};
+
+static OutputMode output_mode = UNKNOWN; // Current output mode.
+
+
+// Determine if the process has a console for output.
+static bool HasConsole() {
+ // Only check the first time. Eventual race conditions are not a problem,
+ // because all threads will eventually determine the same mode.
+ if (output_mode == UNKNOWN) {
+ // We cannot just check that the standard output is attached to a console
+ // because this would fail if output is redirected to a file. Therefore we
+ // say that a process does not have an output console if either the
+ // standard output handle is invalid or its file type is unknown.
+ if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
+ GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
+ output_mode = CONSOLE;
+ else
+ output_mode = ODS;
+ }
+ return output_mode == CONSOLE;
+}
+
+
+static void VPrintHelper(FILE* stream, const char* format, va_list args) {
+ if (HasConsole()) {
+ vfprintf(stream, format, args);
+ } else {
+ // It is important to use safe print here in order to avoid
+ // overflowing the buffer. We might truncate the output, but this
+ // does not crash.
+ static const int kBufferSize = 4096;
+ char buffer[kBufferSize];
+ OS::VSNPrintF(buffer, kBufferSize, format, args);
+ OutputDebugStringA(buffer);
+ }
+}
+
+
+// Print (debug) message to console.
+void OS::Print(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrint(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+ VPrintHelper(stdout, format, args);
+}
+
+
+// Print error message to console.
+void OS::PrintError(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrintError(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+ VPrintHelper(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = VSNPrintF(str, size, format, args);
+ va_end(args);
+ return result;
+}
+
+
+int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
+ // Print formated output to string. The _vsnprintf function has been
+ // deprecated in MSVC. We need to define _CRT_NONSTDC_NO_DEPRECATE
+ // during compilation to use it anyway. Usually defined in stdio.h.
+ int n = _vsnprintf(str, size, format, args);
+ // Make sure to zero-terminate the string if the output was
+ // truncated or if there was an error.
+ if (n < 0 || static_cast<size_t>(n) >= size) str[size - 1] = '\0';
+ return n;
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* pointer) {
+ if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
+ return true;
+ // Ask the Windows API
+ if (IsBadWritePtr(pointer, 1))
+ return true;
+ return false;
+}
+
+
+// Get the system's page size used by VirtualAlloc().
+static size_t GetPageSize() {
+ static size_t page_size = 0;
+ if (page_size == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ page_size = info.dwPageSize;
+ }
+ return page_size;
+}
+
+
+size_t OS::AllocateAlignment() {
+ // See also http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx
+ const size_t kWindowsVirtualAllocAlignment = 64*1024;
+ return kWindowsVirtualAllocAlignment;
+}
+
+
+void* OS::Allocate(const size_t requested, size_t* allocated) {
+ // VirtualAlloc rounds allocated size to page size automatically.
+ size_t msize = RoundUp(requested, GetPageSize());
+
+ // Windows XP SP2 allows Data Excution Prevention (DEP).
+ LPVOID mbase = VirtualAlloc(NULL, requested, MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE);
+ if (mbase == NULL) {
+ LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ return NULL;
+ }
+
+ ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
+
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+ // TODO(1240712): VirtualFree has a return value which is ignored here.
+ VirtualFree(buf, 0, MEM_RELEASE);
+ USE(length);
+}
+
+
+void OS::Sleep(int milliseconds) {
+ ::Sleep(milliseconds);
+}
+
+
+void OS::Abort() {
+ // Redirect to windows specific abort to ensure
+ // collaboration with sandboxing.
+ __debugbreak();
+}
+
+
+class Win32MemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory)
+ : file_(file), file_mapping_(file_mapping), memory_(memory) { }
+ virtual ~Win32MemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ HANDLE file_;
+ HANDLE file_mapping_;
+ void* memory_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ // Open a physical file
+ HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+ if (file == NULL) return NULL;
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, NULL,
+ PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+ if (file_mapping == NULL) return NULL;
+ // Map a view of the file into memory
+ void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+ if (memory) memmove(memory, initial, size);
+ return new Win32MemoryMappedFile(file, file_mapping, memory);
+}
+
+
+Win32MemoryMappedFile::~Win32MemoryMappedFile() {
+ if (memory_ != NULL)
+ UnmapViewOfFile(memory_);
+ CloseHandle(file_mapping_);
+ CloseHandle(file_);
+}
+
+
+// The following code loads functions defined in DbhHelp.h and TlHelp32.h
+// dynamically. This is to avoid beeing depending on dbghelp.dll and
+// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
+// kernel32.dll at some point so loading functions defines in TlHelp32.h
+// dynamically might not be necessary any more - for some versions of Windows?).
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define DBGHELP_FUNCTION_LIST(V) \
+ V(SymInitialize) \
+ V(SymGetOptions) \
+ V(SymSetOptions) \
+ V(SymGetSearchPath) \
+ V(SymLoadModule64) \
+ V(StackWalk64) \
+ V(SymGetSymFromAddr64) \
+ V(SymGetLineFromAddr64) \
+ V(SymFunctionTableAccess64) \
+ V(SymGetModuleBase64)
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define TLHELP32_FUNCTION_LIST(V) \
+ V(CreateToolhelp32Snapshot) \
+ V(Module32FirstW) \
+ V(Module32NextW)
+
+// Define the decoration to use for the type and variable name used for
+// dynamically loaded DLL function..
+#define DLL_FUNC_TYPE(name) _##name##_
+#define DLL_FUNC_VAR(name) _##name
+
+// Define the type for each dynamically loaded DLL function. The function
+// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
+// from the Windows include files are redefined here to have the function
+// definitions to be as close to the ones in the original .h files as possible.
+#ifndef IN
+#define IN
+#endif
+#ifndef VOID
+#define VOID void
+#endif
+
+// DbgHelp.h functions.
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
+ IN PSTR UserSearchPath,
+ IN BOOL fInvadeProcess);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
+ IN HANDLE hProcess,
+ OUT PSTR SearchPath,
+ IN DWORD SearchPathLength);
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
+ IN HANDLE hProcess,
+ IN HANDLE hFile,
+ IN PSTR ImageName,
+ IN PSTR ModuleName,
+ IN DWORD64 BaseOfDll,
+ IN DWORD SizeOfDll);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
+ DWORD MachineType,
+ HANDLE hProcess,
+ HANDLE hThread,
+ LPSTACKFRAME64 StackFrame,
+ PVOID ContextRecord,
+ PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+ PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+ PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+ PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
+ IN HANDLE hProcess,
+ IN DWORD64 qwAddr,
+ OUT PDWORD64 pdwDisplacement,
+ OUT PIMAGEHLP_SYMBOL64 Symbol);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
+ IN HANDLE hProcess,
+ IN DWORD64 qwAddr,
+ OUT PDWORD pdwDisplacement,
+ OUT PIMAGEHLP_LINE64 Line64);
+// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
+typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
+ HANDLE hProcess,
+ DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
+ HANDLE hProcess,
+ DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
+
+// TlHelp32.h functions.
+typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
+ DWORD dwFlags,
+ DWORD th32ProcessID);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
+ LPMODULEENTRY32W lpme);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
+ LPMODULEENTRY32W lpme);
+
+#undef IN
+#undef VOID
+
+// Declare a variable for each dynamically loaded DLL function.
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
+TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
+#undef DEF_DLL_FUNCTION
+
+// Load the functions. This function has a lot of "ugly" macros in order to
+// keep down code duplication.
+
+static bool LoadDbgHelpAndTlHelp32() {
+ static bool dbghelp_loaded = false;
+
+ if (dbghelp_loaded) return true;
+
+ HMODULE module;
+
+ // Load functions from the dbghelp.dll module.
+ module = LoadLibrary(TEXT("dbghelp.dll"));
+ if (module == NULL) {
+ return false;
+ }
+
+#define LOAD_DLL_FUNC(name) \
+ DLL_FUNC_VAR(name) = \
+ reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+ // Load functions from the kernel32.dll module (the TlHelp32.h function used
+ // to be in tlhelp32.dll but are now moved to kernel32.dll).
+ module = LoadLibrary(TEXT("kernel32.dll"));
+ if (module == NULL) {
+ return false;
+ }
+
+#define LOAD_DLL_FUNC(name) \
+ DLL_FUNC_VAR(name) = \
+ reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+ // Check that all functions where loaded.
+ bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+
+DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+
+#undef DLL_FUNC_LOADED
+ true;
+
+ dbghelp_loaded = result;
+ return result;
+ // NOTE: The modules are never unloaded and will stay arround until the
+ // application is closed.
+}
+
+
+// Load the symbols for generating stack traces.
+static bool LoadSymbols(HANDLE process_handle) {
+ static bool symbols_loaded = false;
+
+ if (symbols_loaded) return true;
+
+ BOOL ok;
+
+ // Initialize the symbol engine.
+ ok = _SymInitialize(process_handle, // hProcess
+ NULL, // UserSearchPath
+ FALSE); // fInvadeProcess
+ if (!ok) return false;
+
+ DWORD options = _SymGetOptions();
+ options |= SYMOPT_LOAD_LINES;
+ options |= SYMOPT_FAIL_CRITICAL_ERRORS;
+ options = _SymSetOptions(options);
+
+ char buf[OS::kStackWalkMaxNameLen] = {0};
+ ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
+ if (!ok) {
+ int err = GetLastError();
+ PrintF("%d\n", err);
+ return false;
+ }
+
+ HANDLE snapshot = _CreateToolhelp32Snapshot(
+ TH32CS_SNAPMODULE, // dwFlags
+ GetCurrentProcessId()); // th32ProcessId
+ if (snapshot == INVALID_HANDLE_VALUE) return false;
+ MODULEENTRY32W module_entry;
+ module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
+ BOOL cont = _Module32FirstW(snapshot, &module_entry);
+ while (cont) {
+ DWORD64 base;
+ // NOTE the SymLoadModule64 function has the peculiarity of accepting a
+ // both unicode and ASCII strings even though the parameter is PSTR.
+ base = _SymLoadModule64(
+ process_handle, // hProcess
+ 0, // hFile
+ reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName
+ reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName
+ reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll
+ module_entry.modBaseSize); // SizeOfDll
+ if (base == 0) {
+ int err = GetLastError();
+ if (err != ERROR_MOD_NOT_FOUND &&
+ err != ERROR_INVALID_HANDLE) return false;
+ }
+ LOG(SharedLibraryEvent(
+ module_entry.szExePath,
+ reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
+ reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
+ module_entry.modBaseSize)));
+ cont = _Module32NextW(snapshot, &module_entry);
+ }
+ CloseHandle(snapshot);
+
+ symbols_loaded = true;
+ return true;
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+ // SharedLibraryEvents are logged when loading symbol information.
+ // Only the shared libraries loaded at the time of the call to
+ // LogSharedLibraryAddresses are logged. DLLs loaded after
+ // initialization are not accounted for.
+ if (!LoadDbgHelpAndTlHelp32()) return;
+ HANDLE process_handle = GetCurrentProcess();
+ LoadSymbols(process_handle);
+}
+
+
+// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
+
+// Switch off warning 4748 (/GS can not protect parameters and local variables
+// from local buffer overrun because optimizations are disabled in function) as
+// it is triggered by the use of inline assembler.
+#pragma warning(push)
+#pragma warning(disable : 4748)
+int OS::StackWalk(OS::StackFrame* frames, int frames_size) {
+ BOOL ok;
+
+ // Load the required functions from DLL's.
+ if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
+
+ // Get the process and thread handles.
+ HANDLE process_handle = GetCurrentProcess();
+ HANDLE thread_handle = GetCurrentThread();
+
+ // Read the symbols.
+ if (!LoadSymbols(process_handle)) return kStackWalkError;
+
+ // Capture current context.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+ context.ContextFlags = CONTEXT_CONTROL;
+ context.ContextFlags = CONTEXT_CONTROL;
+ __asm call x
+ __asm x: pop eax
+ __asm mov context.Eip, eax
+ __asm mov context.Ebp, ebp
+ __asm mov context.Esp, esp
+ // NOTE: At some point, we could use RtlCaptureContext(&context) to
+ // capture the context instead of inline assembler. However it is
+ // only available on XP, Vista, Server 2003 and Server 2008 which
+ // might not be sufficient.
+
+ // Initialize the stack walking
+ STACKFRAME64 stack_frame;
+ memset(&stack_frame, 0, sizeof(stack_frame));
+ stack_frame.AddrPC.Offset = context.Eip;
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Offset = context.Ebp;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
+ stack_frame.AddrStack.Offset = context.Esp;
+ stack_frame.AddrStack.Mode = AddrModeFlat;
+ int frames_count = 0;
+
+ // Collect stack frames.
+ while (frames_count < frames_size) {
+ ok = _StackWalk64(
+ IMAGE_FILE_MACHINE_I386, // MachineType
+ process_handle, // hProcess
+ thread_handle, // hThread
+ &stack_frame, // StackFrame
+ &context, // ContextRecord
+ NULL, // ReadMemoryRoutine
+ _SymFunctionTableAccess64, // FunctionTableAccessRoutine
+ _SymGetModuleBase64, // GetModuleBaseRoutine
+ NULL); // TranslateAddress
+ if (!ok) break;
+
+ // Store the address.
+ ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
+ frames[frames_count].address =
+ reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
+
+ // Try to locate a symbol for this frame.
+ DWORD64 symbol_displacement;
+ IMAGEHLP_SYMBOL64* symbol = NULL;
+ symbol = NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen);
+ if (!symbol) return kStackWalkError; // Out of memory.
+ memset(symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
+ symbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
+ symbol->MaxNameLength = kStackWalkMaxNameLen;
+ ok = _SymGetSymFromAddr64(process_handle, // hProcess
+ stack_frame.AddrPC.Offset, // Address
+ &symbol_displacement, // Displacement
+ symbol); // Symbol
+ if (ok) {
+ // Try to locate more source information for the symbol.
+ IMAGEHLP_LINE64 Line;
+ memset(&Line, 0, sizeof(Line));
+ Line.SizeOfStruct = sizeof(Line);
+ DWORD line_displacement;
+ ok = _SymGetLineFromAddr64(
+ process_handle, // hProcess
+ stack_frame.AddrPC.Offset, // dwAddr
+ &line_displacement, // pdwDisplacement
+ &Line); // Line
+ // Format a text representation of the frame based on the information
+ // available.
+ if (ok) {
+ SNPrintF(frames[frames_count].text, kStackWalkMaxTextLen, "%s %s:%d:%d",
+ symbol->Name, Line.FileName, Line.LineNumber,
+ line_displacement);
+ } else {
+ SNPrintF(frames[frames_count].text, kStackWalkMaxTextLen, "%s",
+ symbol->Name);
+ }
+ // Make sure line termination is in place.
+ frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
+ } else {
+ // No text representation of this frame
+ frames[frames_count].text[0] = '\0';
+
+ // Continue if we are just missing a module (for non C/C++ frames a
+ // module will never be found).
+ int err = GetLastError();
+ if (err != ERROR_MOD_NOT_FOUND) {
+ DeleteArray(symbol);
+ break;
+ }
+ }
+ DeleteArray(symbol);
+
+ frames_count++;
+ }
+
+ // Return the number of frames filled in.
+ return frames_count;
+}
+
+// Restore warnings to previous settings.
+#pragma warning(pop)
+
+
+double OS::nan_value() {
+ static const __int64 nanval = 0xfff8000000000000;
+ return *reinterpret_cast<const double*>(&nanval);
+}
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
+ address_ =
+ VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+ }
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size) {
+ if (NULL == VirtualAlloc(address, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return VirtualFree(address, size, MEM_DECOMMIT) != NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 thread support.
+
+// Definition of invalid thread handle and id.
+static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
+static const DWORD kNoThreadId = 0;
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: tid_ = GetCurrentThreadId(); break;
+ case ThreadHandle::INVALID: tid_ = kNoThreadId; break;
+ }
+ }
+ DWORD tid_; // Win32 thread identifier.
+};
+
+
+// Entry point for threads. The supplied argument is a pointer to the thread
+// object. The entry function dispatches to the run method in the thread
+// object. It is important that this function has __stdcall calling
+// convention.
+static unsigned int __stdcall ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the last parameter to _beginthreadex() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->tid_ = GetCurrentThreadId();
+ thread->Run();
+ return 0;
+}
+
+
+// Initialize thread handle to invalid handle.
+ThreadHandle::ThreadHandle(ThreadHandle::Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+// The thread is running if it has the same id as the current thread.
+bool ThreadHandle::IsSelf() const {
+ return GetCurrentThreadId() == data_->tid_;
+}
+
+
+// Test for invalid thread handle.
+bool ThreadHandle::IsValid() const {
+ return data_->tid_ != kNoThreadId;
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+class Thread::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(HANDLE thread) : thread_(thread) {}
+ HANDLE thread_;
+};
+
+
+// Initialize a Win32 thread object. The thread has an invalid thread
+// handle until it is started.
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+ data_ = new PlatformData(kNoThread);
+}
+
+
+// Close our own handle for the thread.
+Thread::~Thread() {
+ if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
+ delete data_;
+}
+
+
+// Create a new thread. It is important to use _beginthreadex() instead of
+// the Win32 function CreateThread(), because the CreateThread() does not
+// initialize thread specific structures in the C runtime library.
+void Thread::Start() {
+ data_->thread_ = reinterpret_cast<HANDLE>(
+ _beginthreadex(NULL,
+ 0,
+ ThreadEntry,
+ this,
+ 0,
+ reinterpret_cast<unsigned int*>(
+ &thread_handle_data()->tid_)));
+ ASSERT(IsValid());
+}
+
+
+// Wait for thread to terminate.
+void Thread::Join() {
+ WaitForSingleObject(data_->thread_, INFINITE);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ DWORD result = TlsAlloc();
+ ASSERT(result != TLS_OUT_OF_INDEXES);
+ return static_cast<LocalStorageKey>(result);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ BOOL result = TlsFree(static_cast<DWORD>(key));
+ USE(result);
+ ASSERT(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ return TlsGetValue(static_cast<DWORD>(key));
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
+ USE(result);
+ ASSERT(result);
+}
+
+
+
+void Thread::YieldCPU() {
+ Sleep(0);
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 mutex support.
+//
+// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
+// faster than Win32 Mutex objects because they are implemented using user mode
+// atomic instructions. Therefore we only do ring transitions if there is lock
+// contention.
+
+class Win32Mutex : public Mutex {
+ public:
+
+ Win32Mutex() { InitializeCriticalSection(&cs_); }
+
+ ~Win32Mutex() { DeleteCriticalSection(&cs_); }
+
+ int Lock() {
+ EnterCriticalSection(&cs_);
+ return 0;
+ }
+
+ int Unlock() {
+ LeaveCriticalSection(&cs_);
+ return 0;
+ }
+
+ private:
+ CRITICAL_SECTION cs_; // Critical section used for mutex
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new Win32Mutex();
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 select support.
+//
+// On Win32 the function WaitForMultipleObjects can be used to wait
+// for all kind of synchronization handles. Currently the
+// implementation only suports the fixed Select::MaxSelectSize maximum
+// number of handles
+
+
+class Select::PlatformData : public Malloced {
+ public:
+ PlatformData(int len, Semaphore** sems);
+ int len_;
+ HANDLE objs_[Select::MaxSelectSize];
+};
+
+
+Select::Select(int len, Semaphore** sems) {
+ data_ = new PlatformData(len, sems);
+}
+
+
+Select::~Select() {
+ delete data_;
+}
+
+
+int Select::WaitSingle() {
+ return WaitForMultipleObjects(data_->len_,
+ data_->objs_,
+ FALSE,
+ INFINITE) - WAIT_OBJECT_0;
+}
+
+
+void Select::WaitAll() {
+ WaitForMultipleObjects(data_->len_, data_->objs_, TRUE, INFINITE);
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 semaphore support.
+//
+// On Win32 semaphores are implemented using Win32 Semaphore objects. The
+// semaphores are anonymous. Also, the semaphores are initialized to have
+// no upper limit on count.
+
+
+class Win32Semaphore : public Semaphore {
+ public:
+ explicit Win32Semaphore(int count) {
+ sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+ }
+
+ ~Win32Semaphore() {
+ CloseHandle(sem);
+ }
+
+ void Wait() {
+ WaitForSingleObject(sem, INFINITE);
+ }
+
+ void Signal() {
+ LONG dummy;
+ ReleaseSemaphore(sem, 1, &dummy);
+ }
+
+ private:
+ HANDLE sem;
+ friend class Select::PlatformData;
+};
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new Win32Semaphore(count);
+}
+
+
+
+Select::PlatformData::PlatformData(int len, Semaphore** sems) : len_(len) {
+ ASSERT(len_ < Select::MaxSelectSize);
+ for (int i = 0; i < len_; i++) {
+ objs_[i] = reinterpret_cast<Win32Semaphore*>(sems[i])->sem;
+ }
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support.
+//
+// On win32 we use a sampler thread with high priority to sample the program
+// counter for the profiled thread.
+
+class ProfileSampler::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ProfileSampler* sampler) {
+ sampler_ = sampler;
+ sampler_thread_ = INVALID_HANDLE_VALUE;
+ profiled_thread_ = INVALID_HANDLE_VALUE;
+ }
+
+ ProfileSampler* sampler_;
+ HANDLE sampler_thread_;
+ HANDLE profiled_thread_;
+
+ // Sampler thread handler.
+ void Runner() {
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+ // Loop until the sampler is disengaged.
+ while (sampler_->IsActive()) {
+ // Pause the profiled thread and get its context.
+ SuspendThread(profiled_thread_);
+ context.ContextFlags = CONTEXT_FULL;
+ GetThreadContext(profiled_thread_, &context);
+ ResumeThread(profiled_thread_);
+
+ // Invoke tick handler with program counter and stack pointer.
+ TickSample sample;
+ sample.pc = context.Eip;
+ sample.sp = context.Esp;
+ sample.state = Logger::state();
+ sampler_->Tick(&sample);
+ // Wait until next sampling.
+ Sleep(sampler_->interval_);
+ }
+ }
+};
+
+
+// Entry point for sampler thread.
+static unsigned int __stdcall ProfileSamplerEntry(void* arg) {
+ ProfileSampler::PlatformData* data =
+ reinterpret_cast<ProfileSampler::PlatformData*>(arg);
+ data->Runner();
+ return 0;
+}
+
+
+// Initialize a profile sampler.
+ProfileSampler::ProfileSampler(int interval) {
+ data_ = new PlatformData(this);
+ interval_ = interval;
+ active_ = false;
+}
+
+
+ProfileSampler::~ProfileSampler() {
+ delete data_;
+}
+
+
+// Start profiling.
+void ProfileSampler::Start() {
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to duplicate the handle because we are
+ // going to use it in the samler thread. using GetThreadHandle() will
+ // not work in this case.
+ BOOL ok = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+ GetCurrentProcess(), &data_->profiled_thread_,
+ THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION, FALSE, 0);
+ if (!ok) return;
+
+ // Start sampler thread.
+ unsigned int tid;
+ active_ = true;
+ data_->sampler_thread_ = reinterpret_cast<HANDLE>(
+ _beginthreadex(NULL, 0, ProfileSamplerEntry, data_, 0, &tid));
+ // Set thread to high priority to increase sampling accuracy.
+ SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+}
+
+
+// Stop profiling.
+void ProfileSampler::Stop() {
+ // Seting active to false triggers termination of the sampler
+ // thread.
+ active_ = false;
+
+ // Wait for sampler thread to terminate.
+ WaitForSingleObject(data_->sampler_thread_, INFINITE);
+
+ // Release the thread handles
+ CloseHandle(data_->sampler_thread_);
+ CloseHandle(data_->profiled_thread_);
+}
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module contains the platform-specific code. This make the rest of the
+// code less dependent on operating system, compilers and runtime libraries.
+// This module does specifically not deal with differences between different
+// processor architecture.
+// The platform classes have the same definition for all platforms. The
+// implementation for a particular platform is put in platform_<os>.cc.
+// The build system then uses the implementation for the target platform.
+//
+// This design has been choosen because it is simple and fast. Alternatively,
+// the platform dependent classes could have been implemented using abstract
+// superclasses with virtual methods and having specializations for each
+// platform. This design was rejected because it was more complicated and
+// slower. It would require factory methods for selecting the right
+// implementation and the overhead of virtual methods for performance
+// sensitive like mutex locking/unlocking.
+
+#ifndef V8_PLATFORM_H_
+#define V8_PLATFORM_H_
+
+#ifdef WIN32
+
+enum {
+ FP_NAN,
+ FP_INFINITE,
+ FP_ZERO,
+ FP_SUBNORMAL,
+ FP_NORMAL
+};
+
+#define INFINITY HUGE_VAL
+
+namespace v8 { namespace internal {
+int isfinite(double x);
+} }
+int isnan(double x);
+int isinf(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+int random();
+
+int strcasecmp(const char* s1, const char* s2);
+int strncasecmp(const char* s1, const char* s2, int n);
+
+#else
+
+// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
+// warning flag and certain versions of GCC due to a bug:
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
+// For now, we use the more involved template-based version from <limits>, but
+// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
+// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
+#if defined(__GNUC__)
+#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+#endif
+
+#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
+#include <limits>
+#undef INFINITY
+#define INFINITY std::numeric_limits<double>::infinity()
+#endif
+
+#endif // WIN32
+
+namespace v8 { namespace internal {
+
+double ceiling(double x);
+
+// ----------------------------------------------------------------------------
+// OS
+//
+// This class has static methods for the different platform specific
+// functions. Add methods here to cope with differences between the
+// supported platforms.
+
+class OS {
+ public:
+ // Initializes the platform OS support. Called once at VM startup.
+ static void Setup();
+
+ // Returns the accumulated user time for thread. This routine
+ // can be used for profiling. The implementation should
+ // strive for high-precision timer resolution, preferable
+ // micro-second resolution.
+ static int GetUserTime(uint32_t* secs, uint32_t* usecs);
+
+ // Get a tick counter normalized to one tick per microsecond.
+ // Used for calculating time intervals.
+ static int64_t Ticks();
+
+ // Returns current time as the number of milliseconds since
+ // 00:00:00 UTC, January 1, 1970.
+ static double TimeCurrentMillis();
+
+ // Returns a string identifying the current time zone. The
+ // timestamp is used for determining if DST is in effect.
+ static char* LocalTimezone(double time);
+
+ // Returns the local time offset in milliseconds east of UTC without
+ // taking daylight savings time into account.
+ static double LocalTimeOffset();
+
+ // Returns the daylight savings offset for the given time.
+ static double DaylightSavingsOffset(double time);
+
+ // Print output to console. This is mostly used for debugging output.
+ // On platforms that has standard terminal output, the output
+ // should go to stdout.
+ static void Print(const char* format, ...);
+ static void VPrint(const char* format, va_list args);
+
+ // Print error output to console. This is mostly used for error message
+ // output. On platforms that has standard terminal output, the output
+ // should go to stderr.
+ static void PrintError(const char* format, ...);
+ static void VPrintError(const char* format, va_list args);
+
+ // Allocate/Free memory used by JS heap.
+ // Pages are readable/writeable/executable by default.
+ // Returns the address of allocated memory, or NULL if failed.
+ static void* Allocate(const size_t requested, size_t* allocated);
+ static void Free(void* buf, const size_t length);
+ // Get the Alignment guaranteed by Allocate().
+ static size_t AllocateAlignment();
+
+ // Returns an indication of whether a pointer is in a space that
+ // has been allocated by Allocate(). This method may conservatively
+ // always return false, but giving more accurate information may
+ // improve the robustness of the stack dump code in the presence of
+ // heap corruption.
+ static bool IsOutsideAllocatedSpace(void* pointer);
+
+ // Sleep for a number of miliseconds.
+ static void Sleep(const int miliseconds);
+
+ // Abort the current process.
+ static void Abort();
+
+ // Walk the stack.
+ static const int kStackWalkError = -1;
+ static const int kStackWalkMaxNameLen = 256;
+ static const int kStackWalkMaxTextLen = 256;
+ struct StackFrame {
+ void* address;
+ char text[kStackWalkMaxTextLen];
+ };
+
+ static int StackWalk(StackFrame* frames, int frames_size);
+
+ // Factory method for creating platform dependent Mutex.
+ // Please use delete to reclaim the storage for the returned Mutex.
+ static Mutex* CreateMutex();
+
+ // Factory method for creating platform dependent Semaphore.
+ // Please use delete to reclaim the storage for the returned Semaphore.
+ static Semaphore* CreateSemaphore(int count);
+
+ class MemoryMappedFile {
+ public:
+ static MemoryMappedFile* create(const char* name, int size, void* initial);
+ virtual ~MemoryMappedFile() { }
+ virtual void* memory() = 0;
+ };
+
+ // Safe formatting print.
+ static int SNPrintF(char* str, size_t size, const char* format, ...);
+ static int VSNPrintF(char* str,
+ size_t size,
+ const char* format,
+ va_list args);
+
+ // Support for profiler. Can do nothing, in which case ticks
+ // occuring in shared libraries will not be properly accounted
+ // for.
+ static void LogSharedLibraryAddresses();
+
+ // Returns the double constant NAN
+ static double nan_value();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
+
+ static const int msPerSecond = 1000;
+};
+
+
+class VirtualMemory {
+ public:
+ // Reserves virtual memory with size.
+ VirtualMemory(size_t size, void* address_hint = 0);
+ ~VirtualMemory();
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved();
+
+ // Returns the start address of the reserved memory.
+ void* address() {
+ ASSERT(IsReserved());
+ return address_;
+ };
+
+ // Returns the size of the reserved memory.
+ size_t size() { return size_; }
+
+ // Commits real memory. Returns whether the operation succeeded.
+ bool Commit(void* address, size_t size);
+
+ // Uncommit real memory. Returns whether the operation succeeded.
+ bool Uncommit(void* address, size_t size);
+
+ private:
+ void* address_; // Start address of the virtual memory.
+ size_t size_; // Size of the virtual memory.
+};
+
+
+// ----------------------------------------------------------------------------
+// ThreadHandle
+//
+// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
+// does not own the underlying os handle. Thread handles can be used for
+// refering to threads and testing equality.
+
+class ThreadHandle {
+ public:
+ enum Kind { SELF, INVALID };
+ explicit ThreadHandle(Kind kind);
+
+ // Destructor.
+ ~ThreadHandle();
+
+ // Test for thread running.
+ bool IsSelf() const;
+
+ // Test for valid thread handle.
+ bool IsValid() const;
+
+ // Get platform-specific data.
+ class PlatformData;
+ PlatformData* thread_handle_data() { return data_; }
+
+ // Initialize the handle to kind
+ void Initialize(Kind kind);
+
+ private:
+ PlatformData* data_; // Captures platform dependent data.
+};
+
+
+// ----------------------------------------------------------------------------
+// Thread
+//
+// Thread objects are used for creating and running threads. When the start()
+// method is called the new thread starts running the run() method in the new
+// thread. The Thread object should not be deallocated before the thread has
+// terminated.
+
+class Thread: public ThreadHandle {
+ public:
+ // Opaque data type for thread-local storage keys.
+ enum LocalStorageKey {};
+
+ // Create new thread.
+ Thread();
+ virtual ~Thread();
+
+ // Start new thread by calling the Run() method in the new thread.
+ void Start();
+
+ // Wait until thread terminates.
+ void Join();
+
+ // Abstract method for run handler.
+ virtual void Run() = 0;
+
+ // Thread-local storage.
+ static LocalStorageKey CreateThreadLocalKey();
+ static void DeleteThreadLocalKey(LocalStorageKey key);
+ static void* GetThreadLocal(LocalStorageKey key);
+ static void SetThreadLocal(LocalStorageKey key, void* value);
+
+ // A hint to the scheduler to let another thread run.
+ static void YieldCPU();
+
+ private:
+ class PlatformData;
+ PlatformData* data_;
+ DISALLOW_EVIL_CONSTRUCTORS(Thread);
+};
+
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// Mutexes are used for serializing access to non-reentrant sections of code.
+// The implementations of mutex should allow for nested/recursive locking.
+
+class Mutex {
+ public:
+ virtual ~Mutex() {}
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ virtual int Lock() = 0;
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ virtual int Unlock() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+// Guard
+//
+// Stack-allocated Guards provide block-scoped locking and unlocking
+// of a mutex.
+
+class Guard {
+ public:
+ explicit Guard(Mutex* mux): mux_(mux) { mux_->Lock(); }
+ ~Guard() { mux_->Unlock(); }
+
+ private:
+ Mutex* mux_;
+ DISALLOW_EVIL_CONSTRUCTORS(Guard);
+};
+
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore {
+ public:
+ virtual ~Semaphore() {}
+
+ // Suspends the calling thread until the counter is non zero
+ // and then decrements the semaphore counter.
+ virtual void Wait() = 0;
+
+ // Increments the semaphore counter.
+ virtual void Signal() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+// Select
+//
+// A selector makes it possible to wait for several synchronization objects
+
+class Select {
+ public:
+ Select(int len, Semaphore** sems);
+ ~Select();
+ int WaitSingle();
+ void WaitAll();
+ static const int MaxSelectSize = 32;
+
+ class PlatformData;
+ private:
+ PlatformData* data_; // Platform specific data.
+ DISALLOW_EVIL_CONSTRUCTORS(Select);
+};
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+// ----------------------------------------------------------------------------
+// ProfileSampler
+//
+// A profile sampler periodically samples the program counter and stack pointer
+// for the thread that created it.
+
+// TickSample captures the information collected
+// for each profiling sample.
+struct TickSample {
+ unsigned int pc; // Instruction pointer.
+ unsigned int sp; // Stack pointer.
+ StateTag state; // The state of the VM.
+};
+
+class ProfileSampler {
+ public:
+ // Initialize sampler.
+ explicit ProfileSampler(int interval);
+ virtual ~ProfileSampler();
+
+ // This method is called for each sampling period with the current program
+ // counter.
+ virtual void Tick(TickSample* sample) = 0;
+
+ // Start and stop sampler.
+ void Start();
+ void Stop();
+
+ class PlatformData;
+ protected:
+ inline bool IsActive() { return active_; }
+
+ private:
+ int interval_;
+ bool active_;
+ PlatformData* data_; // Platform specific data.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ProfileSampler);
+};
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "prettyprinter.h"
+#include "scopes.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+
+PrettyPrinter::PrettyPrinter() {
+ output_ = NULL;
+ size_ = 0;
+ pos_ = 0;
+}
+
+
+PrettyPrinter::~PrettyPrinter() {
+ DeleteArray(output_);
+}
+
+
+void PrettyPrinter::VisitBlock(Block* node) {
+ if (!node->is_initializer_block()) Print("{ ");
+ PrintStatements(node->statements());
+ if (node->statements()->length() > 0) Print(" ");
+ if (!node->is_initializer_block()) Print("}");
+}
+
+
+void PrettyPrinter::VisitDeclaration(Declaration* node) {
+ Print("var ");
+ PrintLiteral(node->proxy()->name(), false);
+ if (node->fun() != NULL) {
+ Print(" = ");
+ PrintFunctionLiteral(node->fun());
+ }
+ Print(";");
+}
+
+
+void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+ Visit(node->expression());
+ Print(";");
+}
+
+
+void PrettyPrinter::VisitEmptyStatement(EmptyStatement* node) {
+ Print(";");
+}
+
+
+void PrettyPrinter::VisitIfStatement(IfStatement* node) {
+ Print("if (");
+ Visit(node->condition());
+ Print(") ");
+ Visit(node->then_statement());
+ if (node->HasElseStatement()) {
+ Print(" else ");
+ Visit(node->else_statement());
+ }
+}
+
+
+void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) {
+ Print("continue");
+ ZoneStringList* labels = node->target()->labels();
+ if (labels != NULL) {
+ Print(" ");
+ ASSERT(labels->length() > 0); // guaranteed to have at least one entry
+ PrintLiteral(labels->at(0), false); // any label from the list is fine
+ }
+ Print(";");
+}
+
+
+void PrettyPrinter::VisitBreakStatement(BreakStatement* node) {
+ Print("break");
+ ZoneStringList* labels = node->target()->labels();
+ if (labels != NULL) {
+ Print(" ");
+ ASSERT(labels->length() > 0); // guaranteed to have at least one entry
+ PrintLiteral(labels->at(0), false); // any label from the list is fine
+ }
+ Print(";");
+}
+
+
+void PrettyPrinter::VisitReturnStatement(ReturnStatement* node) {
+ Print("return ");
+ Visit(node->expression());
+ Print(";");
+}
+
+
+void PrettyPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
+ Print("<enter with> (");
+ Visit(node->expression());
+ Print(") ");
+}
+
+
+void PrettyPrinter::VisitWithExitStatement(WithExitStatement* node) {
+ Print("<exit with>");
+}
+
+
+void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
+ PrintLabels(node->labels());
+ Print("switch (");
+ Visit(node->tag());
+ Print(") { ");
+ ZoneList<CaseClause*>* cases = node->cases();
+ for (int i = 0; i < cases->length(); i++)
+ PrintCaseClause(cases->at(i));
+ Print("}");
+}
+
+
+void PrettyPrinter::VisitLoopStatement(LoopStatement* node) {
+ PrintLabels(node->labels());
+ switch (node->type()) {
+ case LoopStatement::DO_LOOP:
+ ASSERT(node->init() == NULL);
+ ASSERT(node->next() == NULL);
+ Print("do ");
+ Visit(node->body());
+ Print(" while (");
+ Visit(node->cond());
+ Print(");");
+ break;
+
+ case LoopStatement::FOR_LOOP:
+ Print("for (");
+ if (node->init() != NULL) {
+ Visit(node->init());
+ Print(" ");
+ } else {
+ Print("; ");
+ }
+ if (node->cond() != NULL)
+ Visit(node->cond());
+ Print("; ");
+ if (node->next() != NULL)
+ Visit(node->next()); // prints extra ';', unfortunately
+ // to fix: should use Expression for next
+ Print(") ");
+ Visit(node->body());
+ break;
+
+ case LoopStatement::WHILE_LOOP:
+ ASSERT(node->init() == NULL);
+ ASSERT(node->next() == NULL);
+ Print("while (");
+ Visit(node->cond());
+ Print(") ");
+ Visit(node->body());
+ break;
+ }
+}
+
+
+void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
+ PrintLabels(node->labels());
+ Print("for (");
+ Visit(node->each());
+ Print(" in ");
+ Visit(node->enumerable());
+ Print(") ");
+ Visit(node->body());
+}
+
+
+void PrettyPrinter::VisitTryCatch(TryCatch* node) {
+ Print("try ");
+ Visit(node->try_block());
+ Print(" catch (");
+ Visit(node->catch_var());
+ Print(") ");
+ Visit(node->catch_block());
+}
+
+
+void PrettyPrinter::VisitTryFinally(TryFinally* node) {
+ Print("try ");
+ Visit(node->try_block());
+ Print(" finally ");
+ Visit(node->finally_block());
+}
+
+
+void PrettyPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
+ Print("debugger ");
+}
+
+
+void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+ Print("(");
+ PrintFunctionLiteral(node);
+ Print(")");
+}
+
+
+void PrettyPrinter::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ Print("(");
+ PrintLiteral(node->boilerplate(), true);
+ Print(")");
+}
+
+
+void PrettyPrinter::VisitConditional(Conditional* node) {
+ Visit(node->condition());
+ Print(" ? ");
+ Visit(node->then_expression());
+ Print(" : ");
+ Visit(node->else_expression());
+}
+
+
+void PrettyPrinter::VisitLiteral(Literal* node) {
+ PrintLiteral(node->handle(), true);
+}
+
+
+void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
+ Print(" RegExp(");
+ PrintLiteral(node->pattern(), false);
+ Print(",");
+ PrintLiteral(node->flags(), false);
+ Print(") ");
+}
+
+
+void PrettyPrinter::VisitObjectLiteral(ObjectLiteral* node) {
+ Print("{ ");
+ Visit(node->result());
+ Print(" <- ");
+ for (int i = 0; i < node->properties()->length(); i++) {
+ if (i != 0) Print(",");
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ Print(" ");
+ Visit(property->key());
+ Print(": ");
+ Visit(property->value());
+ }
+ Print(" }");
+}
+
+
+void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
+ Print("[ ");
+ Visit(node->result());
+ Print(" <- ");
+ for (int i = 0; i < node->values()->length(); i++) {
+ if (i != 0) Print(",");
+ Visit(node->values()->at(i));
+ }
+ Print(" ]");
+}
+
+
+void PrettyPrinter::VisitSlot(Slot* node) {
+ switch (node->type()) {
+ case Slot::PARAMETER:
+ Print("parameter[%d]", node->index());
+ break;
+ case Slot::LOCAL:
+ Print("frame[%d]", node->index());
+ break;
+ case Slot::CONTEXT:
+ Print(".context[%d]", node->index());
+ break;
+ case Slot::LOOKUP:
+ Print(".context[");
+ PrintLiteral(node->var()->name(), false);
+ Print("]");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void PrettyPrinter::VisitVariableProxy(VariableProxy* node) {
+ PrintLiteral(node->name(), false);
+}
+
+
+void PrettyPrinter::VisitAssignment(Assignment* node) {
+ Visit(node->target());
+ Print(" %s ", Token::String(node->op()));
+ Visit(node->value());
+}
+
+
+void PrettyPrinter::VisitThrow(Throw* node) {
+ Print("throw ");
+ Visit(node->exception());
+}
+
+
+void PrettyPrinter::VisitProperty(Property* node) {
+ Expression* key = node->key();
+ Literal* literal = key->AsLiteral();
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ Print("(");
+ Visit(node->obj());
+ Print(").");
+ PrintLiteral(literal->handle(), false);
+ } else {
+ Visit(node->obj());
+ Print("[");
+ Visit(key);
+ Print("]");
+ }
+}
+
+
+void PrettyPrinter::VisitCall(Call* node) {
+ Visit(node->expression());
+ PrintArguments(node->arguments());
+}
+
+
+void PrettyPrinter::VisitCallNew(CallNew* node) {
+ Print("new (");
+ Visit(node->expression());
+ Print(")");
+ PrintArguments(node->arguments());
+}
+
+
+void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
+ Print("%%");
+ PrintLiteral(node->name(), false);
+ PrintArguments(node->arguments());
+}
+
+
+void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
+ Print("(%s", Token::String(node->op()));
+ Visit(node->expression());
+ Print(")");
+}
+
+
+void PrettyPrinter::VisitCountOperation(CountOperation* node) {
+ Print("(");
+ if (node->is_prefix()) Print("%s", Token::String(node->op()));
+ Visit(node->expression());
+ if (node->is_postfix()) Print("%s", Token::String(node->op()));
+ Print(")");
+}
+
+
+void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
+ Print("(");
+ Visit(node->left());
+ Print("%s", Token::String(node->op()));
+ Visit(node->right());
+ Print(")");
+}
+
+
+void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
+ Print("(");
+ Visit(node->left());
+ Print("%s", Token::String(node->op()));
+ Visit(node->right());
+ Print(")");
+}
+
+
+void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
+ Print("<this-function>");
+}
+
+
+const char* PrettyPrinter::Print(Node* node) {
+ Init();
+ Visit(node);
+ return output_;
+}
+
+
+const char* PrettyPrinter::PrintExpression(FunctionLiteral* program) {
+ Init();
+ ExpressionStatement* statement =
+ program->body()->at(0)->AsExpressionStatement();
+ Visit(statement->expression());
+ return output_;
+}
+
+
+const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
+ Init();
+ PrintStatements(program->body());
+ Print("\n");
+ return output_;
+}
+
+
+void PrettyPrinter::PrintOut(Node* node) {
+ PrettyPrinter printer;
+ PrintF("%s", printer.Print(node));
+}
+
+
+void PrettyPrinter::Init() {
+ if (size_ == 0) {
+ ASSERT(output_ == NULL);
+ const int initial_size = 256;
+ output_ = NewArray<char>(initial_size);
+ size_ = initial_size;
+ }
+ output_[0] = '\0';
+ pos_ = 0;
+}
+
+
+void PrettyPrinter::Print(const char* format, ...) {
+ for (;;) {
+ va_list arguments;
+ va_start(arguments, format);
+ int available = size_ - pos_;
+ int n = OS::VSNPrintF(output_ + pos_, available, format, arguments);
+ va_end(arguments);
+
+ // Return value from VSNPrintF is not portable. On linux the return value
+ // is the number of characters which would have been written to the string
+ // if enough space had been available. On windows it returns -1 if the
+ // result is truncated but does not indicate the required buffer size.
+#ifdef WIN32
+ if (n <= 0) n = available;
+#else
+ CHECK_GE(n, 0); // no errors
+#endif
+
+ if (n < available) {
+ // there was enough space - we are done
+ pos_ += n;
+ return;
+ } else {
+ // there was not enough space - allocate more and try again
+ const int slack = 32;
+ int new_size = size_ + (size_ >> 1) + slack;
+ if (new_size < size_ + n)
+ new_size = size_ + n + slack;
+ char* new_output = NewArray<char>(new_size);
+ memcpy(new_output, output_, pos_);
+ DeleteArray(output_);
+ output_ = new_output;
+ size_ = new_size;
+ }
+ }
+}
+
+
+void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ if (i != 0) Print(" ");
+ Visit(statements->at(i));
+ }
+}
+
+
+void PrettyPrinter::PrintLabels(ZoneStringList* labels) {
+ if (labels != NULL) {
+ for (int i = 0; i < labels->length(); i++) {
+ PrintLiteral(labels->at(i), false);
+ Print(": ");
+ }
+ }
+}
+
+
+void PrettyPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
+ Print("(");
+ for (int i = 0; i < arguments->length(); i++) {
+ if (i != 0) Print(", ");
+ Visit(arguments->at(i));
+ }
+ Print(")");
+}
+
+
+void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
+ Object* object = *value;
+ if (object->IsString()) {
+ String* string = String::cast(object);
+ if (quote) Print("\"");
+ for (int i = 0; i < string->length(); i++) {
+ Print("%c", string->Get(i));
+ }
+ if (quote) Print("\"");
+ } else if (object == Heap::null_value()) {
+ Print("null");
+ } else if (object == Heap::true_value()) {
+ Print("true");
+ } else if (object == Heap::false_value()) {
+ Print("false");
+ } else if (object == Heap::undefined_value()) {
+ Print("undefined");
+ } else if (object->IsNumber()) {
+ Print("%g", object->Number());
+ } else if (object->IsJSObject()) {
+ // regular expression
+ if (object->IsJSFunction()) {
+ Print("JS-Function");
+ } else if (object->IsJSArray()) {
+ Print("JS-array[%u]", JSArray::cast(object)->length());
+ } else if (object->IsJSObject()) {
+ Print("JS-Object");
+ } else {
+ Print("?UNKNOWN?");
+ }
+ } else if (object->IsFixedArray()) {
+ Print("FixedArray");
+ } else {
+ Print("<unknown literal %p>", object);
+ }
+}
+
+
+void PrettyPrinter::PrintParameters(Scope* scope) {
+ Print("(");
+ for (int i = 0; i < scope->num_parameters(); i++) {
+ if (i > 0) Print(", ");
+ PrintLiteral(scope->parameter(i)->name(), false);
+ }
+ Print(")");
+}
+
+
+void PrettyPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
+ for (int i = 0; i < declarations->length(); i++) {
+ if (i > 0) Print(" ");
+ Visit(declarations->at(i));
+ }
+}
+
+
+void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
+ Print("function ");
+ PrintLiteral(function->name(), false);
+ PrintParameters(function->scope());
+ Print(" { ");
+ PrintDeclarations(function->scope()->declarations());
+ PrintStatements(function->body());
+ Print(" }");
+}
+
+
+void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ Print("default");
+ } else {
+ Print("case ");
+ Visit(clause->label());
+ }
+ Print(": ");
+ PrintStatements(clause->statements());
+ if (clause->statements()->length() > 0)
+ Print(" ");
+}
+
+
+//-----------------------------------------------------------------------------
+
+class IndentedScope BASE_EMBEDDED {
+ public:
+ IndentedScope() {
+ ast_printer_->inc_indent();
+ }
+
+ explicit IndentedScope(const char* txt) {
+ ast_printer_->PrintIndented(txt);
+ ast_printer_->Print("\n");
+ ast_printer_->inc_indent();
+ }
+
+ virtual ~IndentedScope() {
+ ast_printer_->dec_indent();
+ }
+
+ static void SetAstPrinter(AstPrinter* a) { ast_printer_ = a; }
+
+ private:
+ static AstPrinter* ast_printer_;
+};
+
+
+AstPrinter* IndentedScope::ast_printer_ = NULL;
+
+
+//-----------------------------------------------------------------------------
+
+int AstPrinter::indent_ = 0;
+
+
+AstPrinter::AstPrinter() {
+ ASSERT(indent_ == 0);
+ IndentedScope::SetAstPrinter(this);
+}
+
+
+AstPrinter::~AstPrinter() {
+ ASSERT(indent_ == 0);
+ IndentedScope::SetAstPrinter(NULL);
+}
+
+
+void AstPrinter::PrintIndented(const char* txt) {
+ for (int i = 0; i < indent_; i++) {
+ Print(". ");
+ }
+ Print(txt);
+}
+
+
+void AstPrinter::PrintLiteralIndented(const char* info,
+ Handle<Object> value,
+ bool quote) {
+ PrintIndented(info);
+ Print(" ");
+ PrintLiteral(value, quote);
+ Print("\n");
+}
+
+
+void AstPrinter::PrintLiteralWithModeIndented(const char* info,
+ Variable* var,
+ Handle<Object> value) {
+ if (var == NULL) {
+ PrintLiteralIndented(info, value, true);
+ } else {
+ char buf[256];
+ OS::SNPrintF(buf, sizeof(buf), "%s (mode = %s)", info,
+ Variable::Mode2String(var->mode()));
+ PrintLiteralIndented(buf, value, true);
+ }
+}
+
+
+void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
+ if (labels != NULL && labels->length() > 0) {
+ if (info == NULL) {
+ PrintIndented("LABELS ");
+ } else {
+ PrintIndented(info);
+ Print(" ");
+ }
+ PrintLabels(labels);
+ Print("\n");
+ } else if (info != NULL) {
+ PrintIndented(info);
+ }
+}
+
+
+void AstPrinter::PrintIndentedVisit(const char* s, Node* node) {
+ IndentedScope indent(s);
+ Visit(node);
+}
+
+
+const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
+ Init();
+ { IndentedScope indent("FUNC");
+ PrintLiteralIndented("NAME", program->name(), true);
+ PrintParameters(program->scope());
+ PrintDeclarations(program->scope()->declarations());
+ PrintStatements(program->body());
+ }
+ return Output();
+}
+
+
+void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
+ if (declarations->length() > 0) {
+ IndentedScope indent("DECLS");
+ for (int i = 0; i < declarations->length(); i++) {
+ Visit(declarations->at(i));
+ }
+ }
+}
+
+
+void AstPrinter::PrintParameters(Scope* scope) {
+ if (scope->num_parameters() > 0) {
+ IndentedScope indent("PARAMS");
+ for (int i = 0; i < scope->num_parameters(); i++) {
+ PrintLiteralWithModeIndented("VAR ", scope->parameter(i),
+ scope->parameter(i)->name());
+ }
+ }
+}
+
+
+void AstPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
+}
+
+
+void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
+ for (int i = 0; i < arguments->length(); i++) {
+ Visit(arguments->at(i));
+ }
+}
+
+
+void AstPrinter::PrintCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ IndentedScope indent("DEFAULT");
+ PrintStatements(clause->statements());
+ } else {
+ IndentedScope indent("CASE");
+ Visit(clause->label());
+ PrintStatements(clause->statements());
+ }
+}
+
+
+void AstPrinter::VisitBlock(Block* node) {
+ const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
+ IndentedScope indent(block_txt);
+ PrintStatements(node->statements());
+}
+
+
+void AstPrinter::VisitDeclaration(Declaration* node) {
+ if (node->fun() == NULL) {
+ // var or const declarations
+ PrintLiteralWithModeIndented(
+ Variable::Mode2String(node->mode()),
+ node->proxy()->AsVariable(), node->proxy()->name());
+ } else {
+ // function declarations
+ PrintIndented("FUNCTION ");
+ PrintLiteral(node->proxy()->name(), true);
+ Print(" = function ");
+ PrintLiteral(node->fun()->name(), false);
+ Print("\n");
+ }
+}
+
+
+void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+ Visit(node->expression());
+}
+
+
+void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
+ PrintIndented("EMPTY\n");
+}
+
+
+void AstPrinter::VisitIfStatement(IfStatement* node) {
+ PrintIndentedVisit("IF", node->condition());
+ PrintIndentedVisit("THEN", node->then_statement());
+ if (node->HasElseStatement()) {
+ PrintIndentedVisit("ELSE", node->else_statement());
+ }
+}
+
+
+void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
+ PrintLabelsIndented("CONTINUE", node->target()->labels());
+}
+
+
+void AstPrinter::VisitBreakStatement(BreakStatement* node) {
+ PrintLabelsIndented("BREAK", node->target()->labels());
+}
+
+
+void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
+ PrintIndentedVisit("RETURN", node->expression());
+}
+
+
+void AstPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
+ PrintIndentedVisit("WITH ENTER", node->expression());
+}
+
+
+void AstPrinter::VisitWithExitStatement(WithExitStatement* node) {
+ PrintIndented("WITH EXIT\n");
+}
+
+
+void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
+ IndentedScope indent("SWITCH");
+ PrintLabelsIndented(NULL, node->labels());
+ PrintIndentedVisit("TAG", node->tag());
+ for (int i = 0; i < node->cases()->length(); i++) {
+ PrintCaseClause(node->cases()->at(i));
+ }
+}
+
+
+void AstPrinter::VisitLoopStatement(LoopStatement* node) {
+ IndentedScope indent(node->OperatorString());
+ PrintLabelsIndented(NULL, node->labels());
+ if (node->init()) PrintIndentedVisit("INIT", node->init());
+ if (node->cond()) PrintIndentedVisit("COND", node->cond());
+ if (node->body()) PrintIndentedVisit("BODY", node->body());
+ if (node->next()) PrintIndentedVisit("NEXT", node->next());
+}
+
+
+void AstPrinter::VisitForInStatement(ForInStatement* node) {
+ IndentedScope indent("FOR IN");
+ PrintIndentedVisit("FOR", node->each());
+ PrintIndentedVisit("IN", node->enumerable());
+ PrintIndentedVisit("BODY", node->body());
+}
+
+
+void AstPrinter::VisitTryCatch(TryCatch* node) {
+ IndentedScope indent("TRY CATCH");
+ PrintIndentedVisit("TRY", node->try_block());
+ PrintIndentedVisit("CATCHVAR", node->catch_var());
+ PrintIndentedVisit("CATCH", node->catch_block());
+}
+
+
+void AstPrinter::VisitTryFinally(TryFinally* node) {
+ IndentedScope indent("TRY FINALLY");
+ PrintIndentedVisit("TRY", node->try_block());
+ PrintIndentedVisit("FINALLY", node->finally_block());
+}
+
+
+void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
+ IndentedScope indent("DEBUGGER");
+}
+
+
+void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+ IndentedScope indent("FUNC LITERAL");
+ PrintLiteralIndented("NAME", node->name(), false);
+ PrintParameters(node->scope());
+ // We don't want to see the function literal in this case: it
+ // will be printed via PrintProgram when the code for it is
+ // generated.
+ // PrintStatements(node->body());
+}
+
+
+void AstPrinter::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ IndentedScope indent("FUNC LITERAL");
+ PrintLiteralIndented("BOILERPLATE", node->boilerplate(), true);
+}
+
+
+void AstPrinter::VisitConditional(Conditional* node) {
+ IndentedScope indent("CONDITIONAL");
+ PrintIndentedVisit("?", node->condition());
+ PrintIndentedVisit("THEN", node->then_expression());
+ PrintIndentedVisit("ELSE", node->else_expression());
+}
+
+
+void AstPrinter::VisitLiteral(Literal* node) {
+ PrintLiteralIndented("LITERAL", node->handle(), true);
+}
+
+
+void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
+ IndentedScope indent("REGEXP LITERAL");
+ PrintLiteral(node->pattern(), false);
+ Print(",");
+ PrintLiteral(node->flags(), false);
+}
+
+
+void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
+ IndentedScope indent("OBJ LITERAL");
+ Visit(node->result());
+ for (int i = 0; i < node->properties()->length(); i++) {
+ const char* prop_kind = NULL;
+ switch (node->properties()->at(i)->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ prop_kind = "PROPERTY - CONSTANT";
+ break;
+ case ObjectLiteral::Property::COMPUTED:
+ prop_kind = "PROPERTY - COMPUTED";
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ prop_kind = "PROPERTY - PROTOTYPE";
+ break;
+ case ObjectLiteral::Property::GETTER:
+ prop_kind = "PROPERTY - GETTER";
+ break;
+ case ObjectLiteral::Property::SETTER:
+ prop_kind = "PROPERTY - SETTER";
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ IndentedScope prop(prop_kind);
+ PrintIndentedVisit("KEY", node->properties()->at(i)->key());
+ PrintIndentedVisit("VALUE", node->properties()->at(i)->value());
+ }
+}
+
+
+void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
+ IndentedScope indent("ARRAY LITERAL");
+ Visit(node->result());
+ if (node->values()->length() > 0) {
+ IndentedScope indent("VALUES");
+ for (int i = 0; i < node->values()->length(); i++) {
+ Visit(node->values()->at(i));
+ }
+ }
+}
+
+
+void AstPrinter::VisitSlot(Slot* node) {
+ PrintIndented("SLOT ");
+ switch (node->type()) {
+ case Slot::PARAMETER:
+ Print("parameter[%d]", node->index());
+ break;
+ case Slot::LOCAL:
+ Print("frame[%d]", node->index());
+ break;
+ case Slot::CONTEXT:
+ Print(".context[%d]", node->index());
+ break;
+ case Slot::LOOKUP:
+ Print(".context[");
+ PrintLiteral(node->var()->name(), false);
+ Print("]");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ Print("\n");
+}
+
+
+void AstPrinter::VisitVariableProxy(VariableProxy* node) {
+ PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name());
+ Variable* var = node->var();
+ if (var != NULL && var->rewrite() != NULL) {
+ IndentedScope indent;
+ Visit(var->rewrite());
+ }
+}
+
+
+void AstPrinter::VisitAssignment(Assignment* node) {
+ IndentedScope indent(Token::Name(node->op()));
+ Visit(node->target());
+ Visit(node->value());
+}
+
+
+void AstPrinter::VisitThrow(Throw* node) {
+ PrintIndentedVisit("THROW", node->exception());
+}
+
+
+void AstPrinter::VisitProperty(Property* node) {
+ IndentedScope indent("PROPERTY");
+ Visit(node->obj());
+ Literal* literal = node->key()->AsLiteral();
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ PrintLiteralIndented("LITERAL", literal->handle(), false);
+ } else {
+ PrintIndentedVisit("KEY", node->key());
+ }
+}
+
+
+void AstPrinter::VisitCall(Call* node) {
+ IndentedScope indent("CALL");
+ Visit(node->expression());
+ PrintArguments(node->arguments());
+}
+
+
+void AstPrinter::VisitCallNew(CallNew* node) {
+ IndentedScope indent("CALL NEW");
+ Visit(node->expression());
+ PrintArguments(node->arguments());
+}
+
+
+void AstPrinter::VisitCallRuntime(CallRuntime* node) {
+ PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
+ IndentedScope indent;
+ PrintArguments(node->arguments());
+}
+
+
+void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
+ PrintIndentedVisit(Token::Name(node->op()), node->expression());
+}
+
+
+void AstPrinter::VisitCountOperation(CountOperation* node) {
+ char buf[128];
+ OS::SNPrintF(buf, sizeof(buf), "%s %s", (node->is_prefix() ? "PRE" : "POST"),
+ Token::Name(node->op()));
+ PrintIndentedVisit(buf, node->expression());
+}
+
+
+void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
+ IndentedScope indent(Token::Name(node->op()));
+ Visit(node->left());
+ Visit(node->right());
+}
+
+
+void AstPrinter::VisitCompareOperation(CompareOperation* node) {
+ IndentedScope indent(Token::Name(node->op()));
+ Visit(node->left());
+ Visit(node->right());
+}
+
+
+void AstPrinter::VisitThisFunction(ThisFunction* node) {
+ IndentedScope indent("THIS-FUNCTION");
+}
+
+
+
+#endif // DEBUG
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PRETTYPRINTER_H_
+#define V8_PRETTYPRINTER_H_
+
+#include "ast.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+
+class PrettyPrinter: public Visitor {
+ public:
+ PrettyPrinter();
+ virtual ~PrettyPrinter();
+
+ // The following routines print a node into a string.
+ // The result string is alive as long as the PrettyPrinter is alive.
+ const char* Print(Node* node);
+ const char* PrintExpression(FunctionLiteral* program);
+ const char* PrintProgram(FunctionLiteral* program);
+
+ // Print a node to stdout.
+ static void PrintOut(Node* node);
+
+ // Individual nodes
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+ char* output_; // output string buffer
+ int size_; // output_ size
+ int pos_; // current printing position
+
+ protected:
+ void Init();
+ void Print(const char* format, ...);
+ const char* Output() const { return output_; }
+
+ virtual void PrintStatements(ZoneList<Statement*>* statements);
+ void PrintLabels(ZoneStringList* labels);
+ virtual void PrintArguments(ZoneList<Expression*>* arguments);
+ void PrintLiteral(Handle<Object> value, bool quote);
+ void PrintParameters(Scope* scope);
+ void PrintDeclarations(ZoneList<Declaration*>* declarations);
+ void PrintFunctionLiteral(FunctionLiteral* function);
+ void PrintCaseClause(CaseClause* clause);
+};
+
+
+// Prints the AST structure
+class AstPrinter: public PrettyPrinter {
+ public:
+ AstPrinter();
+ virtual ~AstPrinter();
+
+ const char* PrintProgram(FunctionLiteral* program);
+
+ // Individual nodes
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+ private:
+ friend class IndentedScope;
+ void PrintIndented(const char* txt);
+ void PrintIndentedVisit(const char* s, Node* node);
+
+ void PrintStatements(ZoneList<Statement*>* statements);
+ void PrintDeclarations(ZoneList<Declaration*>* declarations);
+ void PrintParameters(Scope* scope);
+ void PrintArguments(ZoneList<Expression*>* arguments);
+ void PrintCaseClause(CaseClause* clause);
+ void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
+ void PrintLiteralWithModeIndented(const char* info,
+ Variable* var,
+ Handle<Object> value);
+ void PrintLabelsIndented(const char* info, ZoneStringList* labels);
+
+ void inc_indent() { indent_++; }
+ void dec_indent() { indent_--; }
+
+ static int indent_;
+};
+
+#endif // DEBUG
+
+} } // namespace v8::internal
+
+#endif // V8_PRETTYPRINTER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+namespace v8 { namespace internal {
+
+
+void DescriptorWriter::Write(Descriptor* desc) {
+ ASSERT(desc->key_->IsSymbol());
+ descriptors_->Set(pos_, desc);
+ advance();
+}
+
+
+void DescriptorWriter::WriteFrom(DescriptorReader* reader) {
+ Descriptor desc;
+ reader->Get(&desc);
+ Write(&desc);
+}
+
+
+#ifdef DEBUG
+void LookupResult::Print() {
+ if (!IsValid()) {
+ PrintF("Not Found\n");
+ return;
+ }
+
+ PrintF("LookupResult:\n");
+ PrintF(" -cacheable = %s\n", IsCacheable() ? "true" : "false");
+ PrintF(" -attributes = %x\n", GetAttributes());
+ switch (type()) {
+ case NORMAL:
+ PrintF(" -type = normal\n");
+ PrintF(" -entry = %d", GetDictionaryEntry());
+ break;
+ case MAP_TRANSITION:
+ PrintF(" -type = map transition\n");
+ PrintF(" -map:\n");
+ GetTransitionMap()->Print();
+ PrintF("\n");
+ break;
+ case CONSTANT_FUNCTION:
+ PrintF(" -type = constant function\n");
+ PrintF(" -function:\n");
+ GetConstantFunction()->Print();
+ PrintF("\n");
+ break;
+ case FIELD:
+ PrintF(" -type = field\n");
+ PrintF(" -index = %d", GetFieldIndex());
+ PrintF("\n");
+ break;
+ case CALLBACKS:
+ PrintF(" -type = call backs\n");
+ PrintF(" -callback object:\n");
+ GetCallbackObject()->Print();
+ break;
+ case INTERCEPTOR:
+ PrintF(" -type = lookup interceptor\n");
+ break;
+ case CONSTANT_TRANSITION:
+ PrintF(" -type = constant property transition\n");
+ break;
+ }
+}
+
+
+void Descriptor::Print() {
+ PrintF("Descriptor ");
+ GetKey()->ShortPrint();
+ PrintF(" @ ");
+ GetValue()->ShortPrint();
+ PrintF(" %d\n", GetDetails().index());
+}
+
+
+#endif
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_H_
+#define V8_PROPERTY_H_
+
+namespace v8 { namespace internal {
+
+
+// Abstraction for elements in instance-descriptor arrays.
+//
+// Each descriptor has a key, property attributes, property type,
+// property index (in the actual instance-descriptor array) and
+// optionally a piece of data.
+//
+
+class Descriptor BASE_EMBEDDED {
+ public:
+ static int IndexFromValue(Object* value) {
+ return Smi::cast(value)->value();
+ }
+
+ String* key() { return key_; }
+
+ Object* KeyToSymbol() {
+ if (!key_->IsSymbol()) {
+ Object* result = Heap::LookupSymbol(key_);
+ if (result->IsFailure()) return result;
+ key_ = String::cast(result);
+ }
+ return key_;
+ }
+
+ String* GetKey() { return key_; }
+ Object* GetValue() { return value_; }
+ PropertyDetails GetDetails() { return details_; }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ void SetEnumerationIndex(int index) {
+ ASSERT(PropertyDetails::IsValidIndex(index));
+ details_ = PropertyDetails(details_.attributes(), details_.type(), index);
+ }
+
+ private:
+ String* key_;
+ Object* value_;
+ PropertyDetails details_;
+
+ protected:
+ Descriptor() : details_(Smi::FromInt(0)) {}
+
+ void Init(String* key, Object* value, PropertyDetails details) {
+ key_ = key;
+ value_ = value;
+ details_ = details;
+ }
+
+ Descriptor(String* key, Object* value, PropertyDetails details)
+ : key_(key),
+ value_(value),
+ details_(details) { }
+
+ Descriptor(String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ PropertyType type,
+ int index = 0)
+ : key_(key),
+ value_(value),
+ details_(attributes, type, index) { }
+
+ friend class DescriptorWriter;
+ friend class DescriptorReader;
+ friend class DescriptorArray;
+};
+
+
+class MapTransitionDescriptor: public Descriptor {
+ public:
+ MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
+ : Descriptor(key, map, attributes, MAP_TRANSITION) { }
+};
+
+
+class ConstTransitionDescriptor: public Descriptor {
+ public:
+ explicit ConstTransitionDescriptor(String* key)
+ : Descriptor(key, Smi::FromInt(0), NONE, CONSTANT_TRANSITION) { }
+};
+
+
+class FieldDescriptor: public Descriptor {
+ public:
+ FieldDescriptor(String* key,
+ int field_index,
+ PropertyAttributes attributes,
+ int index = 0)
+ : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
+};
+
+
+class ConstantFunctionDescriptor: public Descriptor {
+ public:
+ ConstantFunctionDescriptor(String* key,
+ JSFunction* function,
+ PropertyAttributes attributes,
+ int index = 0)
+ : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
+};
+
+
+class CallbacksDescriptor: public Descriptor {
+ public:
+ CallbacksDescriptor(String* key,
+ Object* proxy,
+ PropertyAttributes attributes,
+ int index = 0)
+ : Descriptor(key, proxy, attributes, CALLBACKS, index) {}
+};
+
+
+class LookupResult BASE_EMBEDDED {
+ public:
+ // Where did we find the result;
+ enum {
+ NOT_FOUND,
+ DESCRIPTOR_TYPE,
+ DICTIONARY_TYPE,
+ INTERCEPTOR_TYPE,
+ CONSTANT_TYPE
+ } lookup_type_;
+
+ LookupResult()
+ : lookup_type_(NOT_FOUND),
+ cacheable_(true),
+ details_(NONE, NORMAL) {}
+
+ void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
+ lookup_type_ = DESCRIPTOR_TYPE;
+ holder_ = holder;
+ details_ = details;
+ number_ = number;
+ }
+
+ void ConstantResult(JSObject* holder) {
+ lookup_type_ = CONSTANT_TYPE;
+ holder_ = holder;
+ details_ =
+ PropertyDetails(static_cast<PropertyAttributes>(DONT_ENUM |
+ DONT_DELETE),
+ CALLBACKS);
+ number_ = -1;
+ }
+
+ void DictionaryResult(JSObject* holder, int entry) {
+ lookup_type_ = DICTIONARY_TYPE;
+ holder_ = holder;
+ details_ = holder->property_dictionary()->DetailsAt(entry);
+ number_ = entry;
+ }
+
+ void InterceptorResult(JSObject* holder) {
+ lookup_type_ = INTERCEPTOR_TYPE;
+ holder_ = holder;
+ details_ = PropertyDetails(NONE, INTERCEPTOR);
+ }
+
+ void NotFound() {
+ lookup_type_ = NOT_FOUND;
+ }
+
+
+ JSObject* holder() {
+ ASSERT(IsValid());
+ return holder_;
+ }
+
+ PropertyType type() {
+ ASSERT(IsValid());
+ return details_.type();
+ }
+
+ bool IsTransitionType() {
+ PropertyType t = type();
+ if (t == MAP_TRANSITION || t == CONSTANT_TRANSITION) return true;
+ return false;
+ }
+
+ PropertyAttributes GetAttributes() {
+ ASSERT(IsValid());
+ return details_.attributes();
+ }
+
+ PropertyDetails GetPropertyDetails() {
+ return details_;
+ }
+
+ bool IsReadOnly() { return details_.IsReadOnly(); }
+ bool IsDontDelete() { return details_.IsDontDelete(); }
+ bool IsDontEnum() { return details_.IsDontEnum(); }
+
+ bool IsValid() { return lookup_type_ != NOT_FOUND; }
+
+ // Tells whether the result is a property.
+ // Excluding transitions.
+ bool IsProperty() {
+ return IsValid() && !IsTransitionType();
+ }
+
+ bool IsCacheable() { return cacheable_; }
+ void DisallowCaching() { cacheable_ = false; }
+
+ // Tells whether the value needs to be loaded.
+ bool IsLoaded() {
+ if (lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE) {
+ Object* value = GetValue();
+ if (value->IsJSFunction()) {
+ return JSFunction::cast(value)->IsLoaded();
+ }
+ }
+ return true;
+ }
+
+ Map* GetTransitionMap() {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(type() == MAP_TRANSITION);
+ return Map::cast(GetValue());
+ }
+
+ int GetFieldIndex() {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(type() == FIELD);
+ return Descriptor::IndexFromValue(GetValue());
+ }
+
+ int GetDictionaryEntry() {
+ ASSERT(lookup_type_ == DICTIONARY_TYPE);
+ return number_;
+ }
+
+ JSFunction* GetConstantFunction() {
+ ASSERT(type() == CONSTANT_FUNCTION);
+ return JSFunction::cast(GetValue());
+ }
+
+ Object* GetCallbackObject() {
+ if (lookup_type_ == CONSTANT_TYPE) {
+ // For now we only have the __proto__ as constant type.
+ return Heap::prototype_accessors();
+ }
+ return GetValue();
+ }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ Object* GetValue() {
+ if (lookup_type_ == DESCRIPTOR_TYPE) {
+ DescriptorArray* descriptors = holder()->map()->instance_descriptors();
+ return descriptors->GetValue(number_);
+ }
+ // In the dictionary case, the data is held in the value field.
+ ASSERT(lookup_type_ == DICTIONARY_TYPE);
+ return holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
+ }
+
+ private:
+ JSObject* holder_;
+ int number_;
+ bool cacheable_;
+ PropertyDetails details_;
+};
+
+
+// The DescriptorStream is an abstraction for iterating over a map's
+// instance descriptors.
+class DescriptorStream BASE_EMBEDDED {
+ public:
+ explicit DescriptorStream(DescriptorArray* descriptors, int pos) {
+ descriptors_ = descriptors;
+ pos_ = pos;
+ limit_ = descriptors_->number_of_descriptors();
+ }
+
+ // Tells whether we have reached the end of the steam.
+ bool eos() { return pos_ >= limit_; }
+
+ int next_position() { return pos_ + 1; }
+ void advance() { pos_ = next_position(); }
+
+ protected:
+ DescriptorArray* descriptors_;
+ int pos_; // Current position.
+ int limit_; // Limit for posistion.
+};
+
+
+class DescriptorReader: public DescriptorStream {
+ public:
+ explicit DescriptorReader(DescriptorArray* descriptors, int pos = 0)
+ : DescriptorStream(descriptors, pos) {}
+
+ String* GetKey() { return descriptors_->GetKey(pos_); }
+ Object* GetValue() { return descriptors_->GetValue(pos_); }
+ PropertyDetails GetDetails() {
+ return PropertyDetails(descriptors_->GetDetails(pos_));
+ }
+
+ int GetFieldIndex() { return Descriptor::IndexFromValue(GetValue()); }
+
+ bool IsDontEnum() { return GetDetails().IsDontEnum(); }
+
+ PropertyType type() { return GetDetails().type(); }
+
+ // Tells whether the type is a transition.
+ bool IsTransition() {
+ PropertyType t = type();
+ ASSERT(t != INTERCEPTOR);
+ if (t == MAP_TRANSITION || t == CONSTANT_TRANSITION) return true;
+ return false;
+ }
+
+ JSFunction* GetConstantFunction() { return JSFunction::cast(GetValue()); }
+
+ AccessorDescriptor* GetCallbacks() {
+ ASSERT(type() == CALLBACKS);
+ Proxy* p = Proxy::cast(GetCallbacksObject());
+ return reinterpret_cast<AccessorDescriptor*>(p->proxy());
+ }
+
+ Object* GetCallbacksObject() {
+ ASSERT(type() == CALLBACKS);
+ return GetValue();
+ }
+
+ bool Equals(String* name) { return name->Equals(GetKey()); }
+
+ void ReplaceConstantFunction(JSFunction* value) {
+ descriptors_->ReplaceConstantFunction(pos_, value);
+ }
+
+ void Get(Descriptor* desc) {
+ descriptors_->Get(pos_, desc);
+ }
+};
+
+class DescriptorWriter: public DescriptorStream {
+ public:
+ explicit DescriptorWriter(DescriptorArray* descriptors)
+ : DescriptorStream(descriptors, 0) {}
+
+ // Append a descriptor to this stream.
+ void Write(Descriptor* desc);
+ // Read a descriptor from the reader and append it to this stream.
+ void WriteFrom(DescriptorReader* reader);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PROPERTY_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Expect $Object = global.Object;
+// Expect $Array = global.Array;
+
+const $RegExp = global.RegExp;
+
+// A recursive descent parser for Patterns according to the grammar of
+// ECMA-262 15.10.1, with deviations noted below.
+function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
+ // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
+ if (IS_REGEXP(pattern)) {
+ if (!IS_UNDEFINED(flags)) {
+ throw MakeTypeError('regexp_flags', []);
+ }
+ flags = (pattern.global ? 'g' : '')
+ + (pattern.ignoreCase ? 'i' : '')
+ + (pattern.multiline ? 'm' : '');
+ pattern = pattern.source;
+ }
+
+ pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern);
+ flags = IS_UNDEFINED(flags) ? '' : ToString(flags);
+
+ var global = false;
+ var ignoreCase = false;
+ var multiline = false;
+
+ for (var i = 0; i < flags.length; i++) {
+ var c = flags.charAt(i);
+ switch (c) {
+ case 'g':
+ if (global) throw MakeSyntaxError('duplicate_regexp_flag', ['g']);
+ global = true;
+ break;
+ case 'i':
+ if (ignoreCase) throw MakeSyntaxError('duplicate_regexp_flag', ['i']);
+ ignoreCase = true;
+ break;
+ case 'm':
+ if (multiline) throw MakeSyntaxError('duplicate_regexp_flag', ['m']);
+ multiline = true;
+ break;
+ default:
+ // Ignore flags that have no meaning to be consistent with
+ // KJS.
+ break;
+ }
+ }
+
+ if (isConstructorCall) {
+ // ECMA-262, section 15.10.7.1.
+ %SetProperty(object, 'source', pattern,
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+ // ECMA-262, section 15.10.7.2.
+ %SetProperty(object, 'global', global, DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+ // ECMA-262, section 15.10.7.3.
+ %SetProperty(object, 'ignoreCase', ignoreCase,
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+ // ECMA-262, section 15.10.7.4.
+ %SetProperty(object, 'multiline', multiline,
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+ // ECMA-262, section 15.10.7.5.
+ %SetProperty(object, 'lastIndex', 0, DONT_DELETE | DONT_ENUM);
+ } else { // RegExp is being recompiled via RegExp.prototype.compile.
+ %IgnoreAttributesAndSetProperty(object, 'source', pattern);
+ %IgnoreAttributesAndSetProperty(object, 'global', global);
+ %IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
+ %IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
+ %IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
+ }
+
+ // Call internal function to compile the pattern.
+ %RegExpCompile(object, pattern, flags);
+};
+
+
+function RegExpConstructor(pattern, flags) {
+ if (%IsConstructCall(this)) {
+ DoConstructRegExp(this, pattern, flags, true);
+ } else {
+ // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
+ if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
+ return pattern;
+ }
+ return new $RegExp(pattern, flags);
+ }
+};
+
+
+// Deprecated RegExp.prototype.compile method. We behave like the constructor
+// were called again. In SpiderMonkey, this method returns the regexp object.
+// In KJS, it returns undefined. For compatibility with KJS, we match their
+// behavior.
+function CompileRegExp(pattern, flags) {
+ // Both KJS and SpiderMonkey treat a missing pattern argument as the
+ // empty subject string, and an actual undefined value passed as the
+ // patter as the string 'undefined'. Note that KJS is inconsistent
+ // here, treating undefined values differently in
+ // RegExp.prototype.compile and in the constructor, where they are
+ // the empty string. For compatibility with KJS, we match their
+ // behavior.
+ if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
+ DoConstructRegExp(this, 'undefined', flags, false);
+ } else {
+ DoConstructRegExp(this, pattern, flags, false);
+ }
+}
+
+
+// DoRegExpExec and DoRegExpExecGlobal are wrappers around the runtime
+// %RegExp and %RegExpGlobal functions that ensure that the static
+// properties of the RegExp constructor are set.
+function DoRegExpExec(regexp, string, index) {
+ var matchIndices = %RegExpExec(regexp, string, index);
+ if (!IS_NULL(matchIndices)) {
+ regExpCaptures = matchIndices;
+ regExpSubject = regexp_input = string;
+ }
+ return matchIndices;
+};
+
+function DoRegExpExecGlobal(regexp, string) {
+ // Here, matchIndices is an array of arrays of substring indices.
+ var matchIndices = %RegExpExecGlobal(regexp, string);
+ if (matchIndices.length != 0) {
+ regExpCaptures = matchIndices[matchIndices.length - 1];
+ regExpSubject = regexp_input = string;
+ }
+ return matchIndices;
+};
+
+
+function RegExpExec(string) {
+ if (%_ArgumentsLength() == 0) {
+ string = regexp_input;
+ }
+ var s = ToString(string);
+ var length = s.length;
+ var lastIndex = this.lastIndex;
+ var i = this.global ? TO_INTEGER(lastIndex) : 0;
+
+ if (i < 0 || i > s.length) {
+ this.lastIndex = 0;
+ return null;
+ }
+
+ // matchIndices is an array of integers with length of captures*2,
+ // each pair of integers specified the start and the end of index
+ // in the string.
+ var matchIndices = DoRegExpExec(this, s, i);
+
+ if (matchIndices == null) {
+ if (this.global) this.lastIndex = 0;
+ return matchIndices; // no match
+ }
+
+ var numResults = matchIndices.length >> 1;
+ var result = new $Array(numResults);
+ for (var i = 0; i < numResults; i++) {
+ var matchStart = matchIndices[2*i];
+ var matchEnd = matchIndices[2*i + 1];
+ if (matchStart != -1 && matchEnd != -1) {
+ result[i] = s.slice(matchStart, matchEnd);
+ } else {
+ // Make sure the element is present. Avoid reading the undefined
+ // property from the global object since this may change.
+ result[i] = void 0;
+ }
+ }
+
+ if (this.global)
+ this.lastIndex = matchIndices[1];
+ result.index = matchIndices[0];
+ result.input = s;
+ return result;
+};
+
+
+function RegExpTest(string) {
+ var result = (%_ArgumentsLength() == 0) ? this.exec() : this.exec(string);
+ return result != null;
+};
+
+
+function RegExpToString() {
+ // If this.source is an empty string, output /(?:)/.
+ // http://bugzilla.mozilla.org/show_bug.cgi?id=225550
+ // ecma_2/RegExp/properties-001.js.
+ var src = this.source ? this.source : '(?:)';
+ var result = '/' + src + '/';
+ if (this.global)
+ result += 'g';
+ if (this.ignoreCase)
+ result += 'i';
+ if (this.multiline)
+ result += 'm';
+ return result;
+};
+
+
+// Getters for the static properties lastMatch, lastParen, leftContext, and
+// rightContext of the RegExp constructor. The properties are computed based
+// on the captures array of the last successful match and the subject string
+// of the last successful match.
+function RegExpGetLastMatch() {
+ return regExpSubject.slice(regExpCaptures[0], regExpCaptures[1]);
+};
+
+function RegExpGetLastParen() {
+ var length = regExpCaptures.length;
+ if (length <= 2) return ''; // There were no captures.
+ // We match the SpiderMonkey behavior: return the substring defined by the
+ // last pair (after the first pair) of elements of the capture array even if
+ // it is empty.
+ return regExpSubject.slice(regExpCaptures[length - 2],
+ regExpCaptures[length - 1]);
+};
+
+function RegExpGetLeftContext() {
+ return regExpSubject.slice(0, regExpCaptures[0]);
+};
+
+function RegExpGetRightContext() {
+ return regExpSubject.slice(regExpCaptures[1], regExpSubject.length);
+};
+
+
+// The properties $1..$9 are the first nine capturing substrings of the last
+// successful match, or ''. The function RegExpMakeCaptureGetter will be
+// called with an index greater than or equal to 1 but it actually works for
+// any non-negative index.
+function RegExpMakeCaptureGetter(n) {
+ return function() {
+ var index = n * 2;
+ if (index >= regExpCaptures.length) return '';
+ var matchStart = regExpCaptures[index];
+ var matchEnd = regExpCaptures[index + 1];
+ if (matchStart == -1 || matchEnd == -1) return '';
+ return regExpSubject.slice(matchStart, matchEnd);
+ };
+};
+
+
+// Properties of the builtins object for recording the result of the last
+// regexp match. The property regExpCaptures is the matchIndices array of the
+// last successful regexp match (an array of start/end index pairs for the
+// match and all the captured substrings), the invariant is that there is at
+// least two elements. The property regExpSubject is the subject string for
+// the last successful match.
+var regExpCaptures = [0, 0];
+var regExpSubject = '';
+
+
+%FunctionSetInstanceClassName($RegExp, 'RegExp');
+%FunctionSetPrototype($RegExp, new $Object());
+%AddProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
+%SetCode($RegExp, RegExpConstructor);
+
+%AddProperty($RegExp.prototype, 'exec', RegExpExec, DONT_ENUM);
+%AddProperty($RegExp.prototype, 'test', RegExpTest, DONT_ENUM);
+%AddProperty($RegExp.prototype, 'toString', RegExpToString, DONT_ENUM);
+%AddProperty($RegExp.prototype, 'compile', CompileRegExp, DONT_ENUM);
+
+// The spec says nothing about the length of exec and test, but
+// SpiderMonkey and KJS have length equal to 0.
+%FunctionSetLength($RegExp.prototype.exec, 0);
+%FunctionSetLength($RegExp.prototype.test, 0);
+// The length of compile is 1 in SpiderMonkey.
+%FunctionSetLength($RegExp.prototype.compile, 1);
+
+// The properties input, $input, and $_ are aliases for each other. When this
+// value is set in SpiderMonkey, the value it is set to is coerced to a
+// string. We mimic that behavior with a slight difference: in SpiderMonkey
+// the value of the expression 'RegExp.input = null' (for instance) is the
+// string "null" (ie, the value after coercion), while in V8 it is the value
+// null (ie, the value before coercion).
+// Getter and setter for the input.
+var regexp_input = "";
+function RegExpGetInput() { return regexp_input; };
+function RegExpSetInput(string) { regexp_input = ToString(string); };
+
+%DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
+%DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
+%DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+
+// The properties multiline and $* are aliases for each other. When this
+// value is set in SpiderMonkey, the value it is set to is coerced to a
+// boolean. We mimic that behavior with a slight difference: in SpiderMonkey
+// the value of the expression 'RegExp.multiline = null' (for instance) is the
+// boolean false (ie, the value after coercion), while in V8 it is the value
+// null (ie, the value before coercion).
+(function () {
+ // Getter and setter for multiline.
+ var multiline = false;
+ function RegExpGetMultiline() { return multiline; };
+ function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
+
+ %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
+ %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
+ %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
+})();
+
+
+function NoOpSetter(ignored) {};
+
+
+// Static properties set by a successful match.
+%DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
+%DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
+%DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
+%DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
+%DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
+%DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
+%DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
+%DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
+%DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
+%DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+
+// A local scope to hide the loop index i.
+(function() {
+ for (var i = 1; i < 10; ++i) {
+ %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
+ %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
+ }
+})();
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+#include "rewriter.h"
+
+namespace v8 { namespace internal {
+
+
+class Processor: public Visitor {
+ public:
+ explicit Processor(VariableProxy* result)
+ : result_(result),
+ result_assigned_(false),
+ is_set_(false),
+ in_try_(false) {
+ }
+
+ void Process(ZoneList<Statement*>* statements);
+ bool result_assigned() const { return result_assigned_; }
+
+ private:
+ VariableProxy* result_;
+
+ // We are not tracking result usage via the result_'s use
+ // counts (we leave the accurate computation to the
+ // usage analyzer). Instead we simple remember if
+ // there was ever an assignment to result_.
+ bool result_assigned_;
+
+ // To avoid storing to .result all the time, we eliminate some of
+ // the stores by keeping track of whether or not we're sure .result
+ // will be overwritten anyway. This is a bit more tricky than what I
+ // was hoping for
+ bool is_set_;
+ bool in_try_;
+
+ Expression* SetResult(Expression* value) {
+ result_assigned_ = true;
+ return new Assignment(Token::ASSIGN, result_, value, kNoPosition);
+ }
+
+ // Node visitors.
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+void Processor::Process(ZoneList<Statement*>* statements) {
+ for (int i = statements->length() - 1; i >= 0; --i) {
+ Visit(statements->at(i));
+ }
+}
+
+
+void Processor::VisitBlock(Block* node) {
+ // An initializer block is the rewritten form of a variable declaration
+ // with initialization expressions. The initializer block contains the
+ // list of assignments corresponding to the initialization expressions.
+ // While unclear from the spec (ECMA-262, 3rd., 12.2), the value of
+ // a variable declaration with initialization expression is 'undefined'
+ // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
+ // returns 'undefined'. To obtain the same behavior with v8, we need
+ // to prevent rewriting in that case.
+ if (!node->is_initializer_block()) Process(node->statements());
+}
+
+
+void Processor::VisitExpressionStatement(ExpressionStatement* node) {
+ // Rewrite : <x>; -> .result = <x>;
+ if (!is_set_) {
+ node->set_expression(SetResult(node->expression()));
+ if (!in_try_) is_set_ = true;
+ }
+}
+
+
+void Processor::VisitIfStatement(IfStatement* node) {
+ // Rewrite both then and else parts (reversed).
+ bool save = is_set_;
+ Visit(node->else_statement());
+ bool set_after_then = is_set_;
+ is_set_ = save;
+ Visit(node->then_statement());
+ is_set_ = is_set_ && set_after_then;
+}
+
+
+
+
+void Processor::VisitLoopStatement(LoopStatement* node) {
+ // Rewrite loop body statement.
+ bool set_after_loop = is_set_;
+ Visit(node->body());
+ is_set_ = is_set_ && set_after_loop;
+}
+
+
+void Processor::VisitForInStatement(ForInStatement* node) {
+ // Rewrite for-in body statement.
+ bool set_after_for = is_set_;
+ Visit(node->body());
+ is_set_ = is_set_ && set_after_for;
+}
+
+
+void Processor::VisitTryCatch(TryCatch* node) {
+ // Rewrite both try and catch blocks (reversed order).
+ bool set_after_catch = is_set_;
+ Visit(node->catch_block());
+ is_set_ = is_set_ && set_after_catch;
+ bool save = in_try_;
+ in_try_ = true;
+ Visit(node->try_block());
+ in_try_ = save;
+}
+
+
+void Processor::VisitTryFinally(TryFinally* node) {
+ // Rewrite both try and finally block (reversed order).
+ Visit(node->finally_block());
+ bool save = in_try_;
+ in_try_ = true;
+ Visit(node->try_block());
+ in_try_ = save;
+}
+
+
+void Processor::VisitSwitchStatement(SwitchStatement* node) {
+ // Rewrite statements in all case clauses in reversed order.
+ ZoneList<CaseClause*>* clauses = node->cases();
+ bool set_after_switch = is_set_;
+ for (int i = clauses->length() - 1; i >= 0; --i) {
+ CaseClause* clause = clauses->at(i);
+ Process(clause->statements());
+ }
+ is_set_ = is_set_ && set_after_switch;
+}
+
+
+void Processor::VisitContinueStatement(ContinueStatement* node) {
+ is_set_ = false;
+}
+
+
+void Processor::VisitBreakStatement(BreakStatement* node) {
+ is_set_ = false;
+}
+
+
+// Do nothing:
+void Processor::VisitDeclaration(Declaration* node) {}
+void Processor::VisitEmptyStatement(EmptyStatement* node) {}
+void Processor::VisitReturnStatement(ReturnStatement* node) {}
+void Processor::VisitWithEnterStatement(WithEnterStatement* node) {}
+void Processor::VisitWithExitStatement(WithExitStatement* node) {}
+void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
+
+
+// Expressions are never visited yet.
+void Processor::VisitFunctionLiteral(FunctionLiteral* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitConditional(Conditional* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitSlot(Slot* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitVariableProxy(VariableProxy* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitLiteral(Literal* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitRegExpLiteral(RegExpLiteral* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitArrayLiteral(ArrayLiteral* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitObjectLiteral(ObjectLiteral* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitAssignment(Assignment* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitThrow(Throw* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitProperty(Property* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitCall(Call* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitCallNew(CallNew* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitCallRuntime(CallRuntime* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitUnaryOperation(UnaryOperation* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitCountOperation(CountOperation* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitBinaryOperation(BinaryOperation* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitCompareOperation(CompareOperation* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+void Processor::VisitThisFunction(ThisFunction* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
+bool Rewriter::Process(FunctionLiteral* function) {
+ Scope* scope = function->scope();
+ if (scope->is_function_scope()) return true;
+
+ ZoneList<Statement*>* body = function->body();
+ if (body->is_empty()) return true;
+
+ VariableProxy* result = scope->NewTemporary(Factory::result_symbol());
+ Processor processor(result);
+ processor.Process(body);
+ if (processor.HasStackOverflow()) return false;
+
+ if (processor.result_assigned()) body->Add(new ReturnStatement(result));
+ return true;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REWRITER_H_
+#define V8_REWRITER_H_
+
+namespace v8 { namespace internal {
+
+
+// Currently, the rewriter takes function literals (only top-level)
+// and rewrites them to return the value of the last expression in
+// them.
+//
+// The rewriter adds a (hidden) variable, called .result, to the
+// activation, and tries to figure out where it needs to store into
+// this variable. If the variable is ever used, we conclude by adding
+// a return statement that returns the variable to the body of the
+// given function.
+
+class Rewriter {
+ public:
+ static bool Process(FunctionLiteral* function);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_REWRITER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "arguments.h"
+#include "compiler.h"
+#include "cpu.h"
+#include "dateparser.h"
+#include "debug.h"
+#include "execution.h"
+#include "jsregexp.h"
+#include "platform.h"
+#include "runtime.h"
+#include "scopeinfo.h"
+#include "v8threads.h"
+
+namespace v8 { namespace internal {
+
+
+#define RUNTIME_ASSERT(value) do { \
+ if (!(value)) return IllegalOperation(); \
+} while (false)
+
+// Cast the given object to a value of the specified type and store
+// it in a variable with the given name. If the object is not of the
+// expected type call IllegalOperation and return.
+#define CONVERT_CHECKED(Type, name, obj) \
+ RUNTIME_ASSERT(obj->Is##Type()); \
+ Type* name = Type::cast(obj);
+
+#define CONVERT_ARG_CHECKED(Type, name, index) \
+ RUNTIME_ASSERT(args[index]->Is##Type()); \
+ Handle<Type> name = args.at<Type>(index);
+
+// Cast the given object to a double and store it in a variable with
+// the given name. If the object is not a number (as opposed to
+// the number not-a-number) call IllegalOperation and return.
+#define CONVERT_DOUBLE_CHECKED(name, obj) \
+ RUNTIME_ASSERT(obj->IsNumber()); \
+ double name = (obj)->Number();
+
+// Call the specified converter on the object *comand store the result in
+// a variable of the specified type with the given name. If the
+// object is not a Number call IllegalOperation and return.
+#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
+ RUNTIME_ASSERT(obj->IsNumber()); \
+ type name = NumberTo##Type(obj);
+
+// Non-reentrant string buffer for efficient general use in this file.
+static StaticResource<StringInputBuffer> string_input_buffer;
+
+
+static Object* IllegalOperation() {
+ return Top::Throw(Heap::illegal_access_symbol());
+}
+
+
+static Object* Runtime_CloneObjectLiteralBoilerplate(Arguments args) {
+ CONVERT_CHECKED(JSObject, boilerplate, args[0]);
+
+#ifdef DEBUG
+ // Verify the constructor of the boilerplate is equal to the
+ // object function in the CURRENT global_context.
+ CHECK(boilerplate->map()->constructor()
+ == Top::context()->global_context()->object_function());
+#endif
+ return boilerplate->Copy();
+}
+
+
+static Object* Runtime_CreateObjectLiteralBoilerplate(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ // Copy the arguments.
+ Handle<FixedArray> literals = args.at<FixedArray>(0);
+ int literals_index = Smi::cast(args[1])->value();
+ Handle<FixedArray> constant_properties = args.at<FixedArray>(2);
+
+ // Create the boilerplate object for the function literal
+ Handle<JSObject> boilerplate =
+ Factory::NewJSObject(Top::object_function(), TENURED);
+
+ { // Add the constant propeties to the boilerplate.
+ int length = constant_properties->length();
+ OptimizedObjectForAddingMultipleProperties opt(boilerplate, true);
+ for (int index = 0; index < length; index +=2) {
+ Handle<Object> key(constant_properties->get(index+0));
+ Handle<Object> value(constant_properties->get(index+1));
+ uint32_t element_index = 0;
+ if (key->IsSymbol()) {
+ // If key is a symbol it is not an array element.
+ Handle<String> name(String::cast(*key));
+ ASSERT(!name->AsArrayIndex(&element_index));
+ SetProperty(boilerplate, name, value, NONE);
+ } else if (Array::IndexFromObject(*key, &element_index)) {
+ // Array index (uint32).
+ SetElement(boilerplate, element_index, value);
+ } else {
+ // Non-uint32 number.
+ ASSERT(key->IsNumber());
+ double num = key->Number();
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str = DoubleToCString(num, buffer);
+ Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ SetProperty(boilerplate, name, value, NONE);
+ }
+ }
+ }
+
+ // Update the functions literal and return the boilerplate.
+ literals->set(literals_index, *boilerplate);
+
+ return *boilerplate;
+}
+
+
+static Object* Runtime_CreateArrayLiteral(Arguments args) {
+ // Takes a FixedArray containing literals and produces
+ // JSArray with the elements matching the literals.
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(FixedArray, literals, args[0]);
+
+ // Retrieve the array constructor from the global context.
+ JSFunction* constructor =
+ JSFunction::cast(Top::context()->global_context()->array_function());
+
+ // Create the JSArray.
+ Object* object = Heap::AllocateJSObject(constructor);
+ if (object->IsFailure()) return object;
+
+ // Copy the literals.
+ Object* elements = literals->Copy();
+ if (elements->IsFailure()) return elements;
+
+ // Set the elements.
+ JSArray::cast(object)->SetContent(FixedArray::cast(elements));
+ return object;
+}
+
+
+static Object* Runtime_ClassOf(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ if (!obj->IsJSObject()) return Heap::null_value();
+ return JSObject::cast(obj)->class_name();
+}
+
+
+static Object* Runtime_IsInPrototypeChain(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
+ Object* O = args[0];
+ Object* V = args[1];
+ while (true) {
+ Object* prototype = V->GetPrototype();
+ if (prototype->IsNull()) return Heap::false_value();
+ if (O == prototype) return Heap::true_value();
+ V = prototype;
+ }
+}
+
+
+static Object* Runtime_IsConstructCall(Arguments args) {
+ NoHandleAllocation ha;
+ JavaScriptFrameIterator it;
+ return Heap::ToBoolean(it.frame()->IsConstructor());
+}
+
+
+static Object* Runtime_GetBuiltins(Arguments args) {
+ NoHandleAllocation ha;
+ return Top::context()->builtins();
+}
+
+
+static Object* Runtime_RegExpCompile(Arguments args) {
+ HandleScope scope; // create a new handle scope
+ ASSERT(args.length() == 3);
+ CONVERT_CHECKED(JSValue, raw_re, args[0]);
+ Handle<JSValue> re(raw_re);
+ CONVERT_CHECKED(String, raw_pattern, args[1]);
+ Handle<String> pattern(raw_pattern);
+ CONVERT_CHECKED(String, raw_flags, args[2]);
+ Handle<String> flags(raw_flags);
+ return *RegExpImpl::JsreCompile(re, pattern, flags);
+}
+
+
+static Object* Runtime_CreateApiFunction(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(FunctionTemplateInfo, raw_data, args[0]);
+ Handle<FunctionTemplateInfo> data(raw_data);
+ return *Factory::CreateApiFunction(data);
+}
+
+
+static Object* Runtime_IsTemplate(Arguments args) {
+ ASSERT(args.length() == 1);
+ Object* arg = args[0];
+ bool result = arg->IsObjectTemplateInfo()
+ || arg->IsFunctionTemplateInfo();
+ return Heap::ToBoolean(result);
+}
+
+
+static Object* Runtime_GetTemplateField(Arguments args) {
+ ASSERT(args.length() == 2);
+ CONVERT_CHECKED(HeapObject, templ, args[0]);
+ RUNTIME_ASSERT(templ->IsStruct());
+ CONVERT_CHECKED(Smi, field, args[1]);
+ return HeapObject::GetHeapObjectField(templ, field->value());
+}
+
+
+static Object* ThrowRedeclarationError(const char* type, Handle<String> name) {
+ HandleScope scope;
+ Handle<Object> type_handle = Factory::NewStringFromAscii(CStrVector(type));
+ Handle<Object> args[2] = { type_handle, name };
+ Handle<Object> error =
+ Factory::NewTypeError("redeclaration", HandleVector(args, 2));
+ return Top::Throw(*error);
+}
+
+
+static Object* Runtime_DeclareGlobals(Arguments args) {
+ HandleScope scope;
+ Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
+
+ CONVERT_ARG_CHECKED(FixedArray, pairs, 0);
+ Handle<Context> context = args.at<Context>(1);
+ bool is_eval = Smi::cast(args[2])->value() == 1;
+
+ // Compute the property attributes. According to ECMA-262, section
+ // 13, page 71, the property must be read-only and
+ // non-deletable. However, neither SpiderMonkey nor KJS creates the
+ // property as read-only, so we don't either.
+ PropertyAttributes base = is_eval ? NONE : DONT_DELETE;
+
+ // Only optimize the object if we intend to add more than 5 properties.
+ OptimizedObjectForAddingMultipleProperties ba(global, pairs->length()/2 > 5);
+
+ // Traverse the name/value pairs and set the properties.
+ int length = pairs->length();
+ for (int i = 0; i < length; i += 2) {
+ HandleScope scope;
+ Handle<String> name(String::cast(pairs->get(i)));
+ Handle<Object> value(pairs->get(i + 1));
+
+ // We have to declare a global const property. To capture we only
+ // assign to it when evaluating the assignment for "const x =
+ // <expr>" the initial value is the hole.
+ bool is_const_property = value->IsTheHole();
+
+ if (value->IsUndefined() || is_const_property) {
+ // Lookup the property in the global object, and don't set the
+ // value of the variable if the property is already there.
+ LookupResult lookup;
+ global->Lookup(*name, &lookup);
+ if (lookup.IsProperty()) {
+ // Determine if the property is local by comparing the holder
+ // against the global object. The information will be used to
+ // avoid throwing re-declaration errors when declaring
+ // variables or constants that exist in the prototype chain.
+ bool is_local = (*global == lookup.holder());
+ // Get the property attributes and determine if the property is
+ // read-only.
+ PropertyAttributes attributes = global->GetPropertyAttribute(*name);
+ bool is_read_only = (attributes & READ_ONLY) != 0;
+ if (lookup.type() == INTERCEPTOR) {
+ // If the interceptor says the property is there, we
+ // just return undefined without overwriting the property.
+ // Otherwise, we continue to setting the property.
+ if (attributes != ABSENT) {
+ // Check if the existing property conflicts with regards to const.
+ if (is_local && (is_read_only || is_const_property)) {
+ const char* type = (is_read_only) ? "const" : "var";
+ return ThrowRedeclarationError(type, name);
+ };
+ // The property already exists without conflicting: Go to
+ // the next declaration.
+ continue;
+ }
+ // Fall-through and introduce the absent property by using
+ // SetProperty.
+ } else {
+ if (is_local && (is_read_only || is_const_property)) {
+ const char* type = (is_read_only) ? "const" : "var";
+ return ThrowRedeclarationError(type, name);
+ }
+ // The property already exists without conflicting: Go to
+ // the next declaration.
+ continue;
+ }
+ }
+ } else {
+ // Copy the function and update its context. Use it as value.
+ Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
+ Handle<JSFunction> function =
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
+ value = function;
+ }
+
+ LookupResult lookup;
+ global->LocalLookup(*name, &lookup);
+
+ PropertyAttributes attributes = is_const_property
+ ? static_cast<PropertyAttributes>(base | READ_ONLY)
+ : base;
+
+ if (lookup.IsProperty()) {
+ // There's a local property that we need to overwrite because
+ // we're either declaring a function or there's an interceptor
+ // that claims the property is absent.
+
+ // Check for conflicting re-declarations. We cannot have
+ // conflicting types in case of intercepted properties because
+ // they are absent.
+ if (lookup.type() != INTERCEPTOR &&
+ (lookup.IsReadOnly() || is_const_property)) {
+ const char* type = (lookup.IsReadOnly()) ? "const" : "var";
+ return ThrowRedeclarationError(type, name);
+ }
+ SetProperty(global, name, value, attributes);
+ } else {
+ // If a property with this name does not already exist on the
+ // global object add the property locally. We take special
+ // precautions to always add it as a local property even in case
+ // of callbacks in the prototype chain (this rules out using
+ // SetProperty). Also, we must use the handle-based version to
+ // avoid GC issues.
+ AddProperty(global, name, value, attributes);
+ }
+ }
+ // Done.
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DeclareContextSlot(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 5);
+
+ // args[0] is result (TOS)
+ CONVERT_ARG_CHECKED(Context, context, 1);
+ Handle<String> name(String::cast(args[2]));
+ PropertyAttributes mode =
+ static_cast<PropertyAttributes>(Smi::cast(args[3])->value());
+ ASSERT(mode == READ_ONLY || mode == NONE);
+ Handle<Object> initial_value(args[4]);
+
+ // Declarations are always done in the function context.
+ context = Handle<Context>(context->fcontext());
+
+ int index;
+ PropertyAttributes attributes;
+ ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
+ Handle<Object> context_obj =
+ context->Lookup(name, flags, &index, &attributes);
+
+ if (attributes != ABSENT) {
+ // The name was declared before; check for conflicting
+ // re-declarations: This is similar to the code in parser.cc in
+ // the AstBuildingParser::Declare function.
+ if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
+ // Functions are not read-only.
+ ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
+ const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
+ return ThrowRedeclarationError(type, name);
+ }
+
+ // Initialize it if necessary.
+ if (*initial_value != NULL) {
+ if (index >= 0) {
+ // The variable or constant context slot should always be in
+ // the function context; not in any outer context nor in the
+ // arguments object.
+ ASSERT(context_obj.is_identical_to(context));
+ if (((attributes & READ_ONLY) == 0) ||
+ context->get(index)->IsTheHole()) {
+ context->set(index, *initial_value);
+ }
+ } else {
+ // Slow case: The property is not in the FixedArray part of the context.
+ Handle<JSObject> context_ext = Handle<JSObject>::cast(context_obj);
+ SetProperty(context_ext, name, initial_value, mode);
+ }
+ }
+ return args[0]; // return TOS
+ }
+
+ // The property is not in the function context. It needs to be "declared"
+ // in the function context's extension context, or in the global context.
+ Handle<JSObject> context_ext;
+ if (context->extension() != NULL) {
+ // The function context's extension context exists - use it.
+ context_ext = Handle<JSObject>(context->extension());
+ } else {
+ // The function context's extension context does not exists - allocate it.
+ context_ext = Factory::NewJSObject(Top::context_extension_function());
+ // And store it in the extension slot.
+ context->set_extension(*context_ext);
+ }
+ ASSERT(*context_ext != NULL);
+
+ // Declare the property by setting it to the initial value if provided,
+ // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
+ // constant declarations).
+ ASSERT(!context_ext->HasLocalProperty(*name));
+ Handle<Object> value(Heap::undefined_value());
+ if (*initial_value != NULL) value = initial_value;
+ SetProperty(context_ext, name, value, mode);
+ ASSERT(context_ext->GetLocalPropertyAttribute(*name) == mode);
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_InitializeVarGlobal(Arguments args) {
+ NoHandleAllocation nha;
+
+ // Determine if we need to assign to the variable if it already
+ // exists (based on the number of arguments).
+ RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+ bool assign = args.length() == 2;
+
+ CONVERT_ARG_CHECKED(String, name, 0);
+ GlobalObject* global = Top::context()->global();
+
+ // According to ECMA-262, section 12.2, page 62, the property must
+ // not be deletable.
+ PropertyAttributes attributes = DONT_DELETE;
+
+ // Lookup the property locally in the global object. If it isn't
+ // there, we add the property and take special precautions to always
+ // add it as a local property even in case of callbacks in the
+ // prototype chain (this rules out using SetProperty).
+ LookupResult lookup;
+ global->LocalLookup(*name, &lookup);
+ if (!lookup.IsProperty()) {
+ Object* value = (assign) ? args[1] : Heap::undefined_value();
+ return global->AddProperty(*name, value, attributes);
+ }
+
+ // Determine if this is a redeclaration of something read-only.
+ if (lookup.IsReadOnly()) {
+ return ThrowRedeclarationError("const", name);
+ }
+
+ // Determine if this is a redeclaration of an intercepted read-only
+ // property and figure out if the property exists at all.
+ bool found = true;
+ PropertyType type = lookup.type();
+ if (type == INTERCEPTOR) {
+ PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
+ if (intercepted == ABSENT) {
+ // The interceptor claims the property isn't there. We need to
+ // make sure to introduce it.
+ found = false;
+ } else if ((intercepted & READ_ONLY) != 0) {
+ // The property is present, but read-only. Since we're trying to
+ // overwrite it with a variable declaration we must throw a
+ // re-declaration error.
+ return ThrowRedeclarationError("const", name);
+ }
+ // Restore global object from context (in case of GC).
+ global = Top::context()->global();
+ }
+
+ if (found && !assign) {
+ // The global property is there and we're not assigning any value
+ // to it. Just return.
+ return Heap::undefined_value();
+ }
+
+ // Assign the value (or undefined) to the property.
+ Object* value = (assign) ? args[1] : Heap::undefined_value();
+ return global->SetProperty(&lookup, *name, value, attributes);
+}
+
+
+static Object* Runtime_InitializeConstGlobal(Arguments args) {
+ // All constants are declared with an initial value. The name
+ // of the constant is the first argument and the initial value
+ // is the second.
+ RUNTIME_ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(String, name, 0);
+ Handle<Object> value = args.at<Object>(1);
+
+ // Get the current global object from top.
+ GlobalObject* global = Top::context()->global();
+
+ // According to ECMA-262, section 12.2, page 62, the property must
+ // not be deletable. Since it's a const, it must be READ_ONLY too.
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+
+ // Lookup the property locally in the global object. If it isn't
+ // there, we add the property and take special precautions to always
+ // add it as a local property even in case of callbacks in the
+ // prototype chain (this rules out using SetProperty).
+ LookupResult lookup;
+ global->LocalLookup(*name, &lookup);
+ if (!lookup.IsProperty()) {
+ return global->AddProperty(*name, *value, attributes);
+ }
+
+ // Determine if this is a redeclaration of something not
+ // read-only. In case the result is hidden behind an interceptor we
+ // need to ask it for the property attributes.
+ if (!lookup.IsReadOnly()) {
+ if (lookup.type() != INTERCEPTOR) {
+ return ThrowRedeclarationError("var", name);
+ }
+
+ PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
+
+ // Throw re-declaration error if the intercepted property is present
+ // but not read-only.
+ if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+ return ThrowRedeclarationError("var", name);
+ }
+
+ // Restore global object from context (in case of GC) and continue
+ // with setting the value because the property is either absent or
+ // read-only. We also have to do redo the lookup.
+ global = Top::context()->global();
+
+ // BUG 1213579: Handle the case where we have to set a read-only
+ // property through an interceptor and only do it if it's
+ // uninitialized, e.g. the hole. Nirk...
+ global->SetProperty(*name, *value, attributes);
+ return *value;
+ }
+
+ // Set the value, but only we're assigning the initial value to a
+ // constant. For now, we determine this by checking if the
+ // current value is the hole.
+ PropertyType type = lookup.type();
+ if (type == FIELD) {
+ FixedArray* properties = global->properties();
+ int index = lookup.GetFieldIndex();
+ if (properties->get(index)->IsTheHole()) {
+ properties->set(index, *value);
+ }
+ } else if (type == NORMAL) {
+ Dictionary* dictionary = global->property_dictionary();
+ int entry = lookup.GetDictionaryEntry();
+ if (dictionary->ValueAt(entry)->IsTheHole()) {
+ dictionary->ValueAtPut(entry, *value);
+ }
+ } else {
+ // Ignore re-initialization of constants that have already been
+ // assigned a function value.
+ ASSERT(lookup.IsReadOnly() && type == CONSTANT_FUNCTION);
+ }
+
+ // Use the set value as the result of the operation.
+ return *value;
+}
+
+
+static Object* Runtime_InitializeConstContextSlot(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+
+ Handle<Object> value(args[0]);
+ ASSERT(!value->IsTheHole());
+ CONVERT_ARG_CHECKED(Context, context, 1);
+ Handle<String> name(String::cast(args[2]));
+
+ // Initializations are always done in the function context.
+ context = Handle<Context>(context->fcontext());
+
+ int index;
+ PropertyAttributes attributes;
+ ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
+ Handle<Object> context_obj =
+ context->Lookup(name, flags, &index, &attributes);
+
+ // The property should always be present. It is always declared
+ // before being initialized through DeclareContextSlot.
+ ASSERT(attributes != ABSENT && (attributes & READ_ONLY) != 0);
+
+ // If the slot is in the context, we set it but only if it hasn't
+ // been set before.
+ if (index >= 0) {
+ // The constant context slot should always be in the function
+ // context; not in any outer context nor in the arguments object.
+ ASSERT(context_obj.is_identical_to(context));
+ if (context->get(index)->IsTheHole()) {
+ context->set(index, *value);
+ }
+ return *value;
+ }
+
+ // Otherwise, the slot must be in a JS object extension.
+ Handle<JSObject> context_ext(JSObject::cast(*context_obj));
+
+ // We must initialize the value only if it wasn't initialized
+ // before, e.g. for const declarations in a loop. The property has
+ // the hole value if it wasn't initialized yet. NOTE: We cannot use
+ // GetProperty() to get the current value as it 'unholes' the value.
+ LookupResult lookup;
+ context_ext->LocalLookupRealNamedProperty(*name, &lookup);
+ ASSERT(lookup.IsProperty()); // the property was declared
+ ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
+
+ PropertyType type = lookup.type();
+ if (type == FIELD) {
+ FixedArray* properties = context_ext->properties();
+ int index = lookup.GetFieldIndex();
+ if (properties->get(index)->IsTheHole()) {
+ properties->set(index, *value);
+ }
+ } else if (type == NORMAL) {
+ Dictionary* dictionary = context_ext->property_dictionary();
+ int entry = lookup.GetDictionaryEntry();
+ if (dictionary->ValueAt(entry)->IsTheHole()) {
+ dictionary->ValueAtPut(entry, *value);
+ }
+ } else {
+ // We should not reach here. Any real, named property should be
+ // either a field or a dictionary slot.
+ UNREACHABLE();
+ }
+ return *value;
+}
+
+
+static Object* Runtime_RegExpExec(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_CHECKED(JSValue, raw_regexp, args[0]);
+ Handle<JSValue> regexp(raw_regexp);
+ CONVERT_CHECKED(String, raw_subject, args[1]);
+ Handle<String> subject(raw_subject);
+ Handle<Object> index(args[2]);
+ ASSERT(index->IsNumber());
+ return *RegExpImpl::JsreExec(regexp, subject, index);
+}
+
+
+static Object* Runtime_RegExpExecGlobal(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_CHECKED(JSValue, raw_regexp, args[0]);
+ Handle<JSValue> regexp(raw_regexp);
+ CONVERT_CHECKED(String, raw_subject, args[1]);
+ Handle<String> subject(raw_subject);
+ return *RegExpImpl::JsreExecGlobal(regexp, subject);
+}
+
+
+static Object* Runtime_MaterializeRegExpLiteral(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 4);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+ int index = Smi::cast(args[1])->value();
+ Handle<String> pattern = args.at<String>(2);
+ Handle<String> flags = args.at<String>(3);
+
+ // Compute the regular expression literal.
+ bool has_pending_exception;
+ Handle<Object> regexp =
+ RegExpImpl::CreateRegExpLiteral(pattern, flags, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(Top::has_pending_exception());
+ return Failure::Exception();
+ }
+ literals->set(index, *regexp);
+ return *regexp;
+}
+
+
+static Object* Runtime_FunctionGetName(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ return f->shared()->name();
+}
+
+
+static Object* Runtime_FunctionGetScript(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ Handle<Object> script = Handle<Object>(fun->shared()->script());
+ if (!script->IsScript()) return Heap::undefined_value();
+
+ return *GetScriptWrapper(Handle<Script>::cast(script));
+}
+
+
+static Object* Runtime_FunctionGetSourceCode(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ return f->shared()->GetSourceCode();
+}
+
+
+static Object* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ int pos = fun->shared()->start_position();
+ return Smi::FromInt(pos);
+}
+
+
+static Object* Runtime_FunctionSetInstanceClassName(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+ fun->SetInstanceClassName(name);
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_FunctionSetLength(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ CONVERT_CHECKED(Smi, length, args[1]);
+ fun->shared()->set_length(length->value());
+ return length;
+}
+
+
+static Object* Runtime_FunctionSetPrototype(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ Accessors::FunctionSetPrototype(fun, args[1], NULL);
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_SetCode(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSFunction, raw_target, args[0]);
+ Handle<JSFunction> target(raw_target);
+ Handle<Object> code = args.at<Object>(1);
+
+ Handle<Context> context(target->context());
+
+ if (!code->IsNull()) {
+ RUNTIME_ASSERT(code->IsJSFunction());
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
+ SetExpectedNofProperties(target, fun->shared()->expected_nof_properties());
+ if (!fun->is_compiled() && !CompileLazy(fun, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+ // Set the code, formal parameter count, and the length of the target
+ // function.
+ target->set_code(fun->code());
+ target->shared()->set_length(fun->shared()->length());
+ target->shared()->set_formal_parameter_count(
+ fun->shared()->formal_parameter_count());
+ context = Handle<Context>(fun->context());
+
+ // Make sure we get a fresh copy of the literal vector to avoid
+ // cross context contamination.
+ int number_of_literals = fun->literals()->length();
+ if (number_of_literals > 0) {
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(number_of_literals, TENURED);
+ target->set_literals(*literals);
+ }
+ }
+
+ target->set_context(*context);
+ return *target;
+}
+
+
+static Object* CharCodeAt(String* subject, Object* index) {
+ uint32_t i = 0;
+ if (!Array::IndexFromObject(index, &i))
+ return Heap::nan_value();
+ // Flatten the string. If someone wants to get a char at an index
+ // in a cons string, it is likely that more indices will be
+ // accessed.
+ subject->TryFlatten();
+ if (i >= static_cast<uint32_t>(subject->length()))
+ return Heap::nan_value();
+ return Smi::FromInt(subject->Get(i));
+}
+
+
+static Object* Runtime_StringCharCodeAt(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, subject, args[0]);
+ Object* index = args[1];
+ return CharCodeAt(subject, index);
+}
+
+
+static Object* Runtime_CharFromCode(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ uint32_t code;
+ if (Array::IndexFromObject(args[0], &code)) {
+ if (code <= 0xffff) {
+ return Heap::LookupSingleCharacterStringFromCode(code);
+ }
+ }
+ return Heap::empty_string();
+}
+
+
+static inline void ComputeKMPNextTable(String* pattern, int next_table[]) {
+ int i = 0;
+ int j = -1;
+ next_table[0] = -1;
+
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Reset(pattern);
+ int length = pattern->length();
+ uint16_t p = buffer->GetNext();
+ while (i < length - 1) {
+ while (j > -1 && p != pattern->Get(j)) {
+ j = next_table[j];
+ }
+ i++;
+ j++;
+ p = buffer->GetNext();
+ if (p == pattern->Get(j)) {
+ next_table[i] = next_table[j];
+ } else {
+ next_table[i] = j;
+ }
+ }
+}
+
+
+static Object* Runtime_StringIndexOf(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+
+ CONVERT_CHECKED(String, sub, args[0]);
+ CONVERT_CHECKED(String, pat, args[1]);
+ Object* index = args[2];
+
+ int subject_length = sub->length();
+ int pattern_length = pat->length();
+
+ sub->TryFlatten();
+ pat->TryFlatten();
+
+ uint32_t start_index;
+ if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+ if (pattern_length == 0) return Smi::FromInt(start_index);
+
+ // Searching for one specific character is common. For one
+ // character patterns the KMP algorithm is guaranteed to slow down
+ // the search, so we just run through the subject string.
+ if (pattern_length == 1) {
+ uint16_t pattern_char = pat->Get(0);
+ for (int i = start_index; i < subject_length; i++) {
+ if (sub->Get(i) == pattern_char) {
+ return Smi::FromInt(i);
+ }
+ }
+ return Smi::FromInt(-1);
+ }
+
+ // For patterns with a length larger than one character we use the KMP
+ // algorithm.
+ //
+ // Compute the 'next' table.
+ int* next_table = NewArray<int>(pattern_length);
+ ComputeKMPNextTable(pat, next_table);
+ // Search using the 'next' table.
+ int pattern_index = 0;
+ // We would like to use StringInputBuffer here, but it does not have
+ // the ability to start anywhere but the first character of a
+ // string. It would be nice to have efficient forward-seeking
+ // support on StringInputBuffers.
+ int subject_index = start_index;
+ while (subject_index < subject_length) {
+ uint16_t subject_char = sub->Get(subject_index);
+ while (pattern_index > -1 && pat->Get(pattern_index) != subject_char) {
+ pattern_index = next_table[pattern_index];
+ }
+ pattern_index++;
+ subject_index++;
+ if (pattern_index >= pattern_length) {
+ DeleteArray(next_table);
+ return Smi::FromInt(subject_index - pattern_index);
+ }
+ }
+ DeleteArray(next_table);
+ return Smi::FromInt(-1);
+}
+
+
+static Object* Runtime_StringLastIndexOf(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+
+ CONVERT_CHECKED(String, sub, args[0]);
+ CONVERT_CHECKED(String, pat, args[1]);
+ Object* index = args[2];
+
+ sub->TryFlatten();
+ pat->TryFlatten();
+
+ uint32_t start_index;
+ if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+
+ uint32_t pattern_length = pat->length();
+ uint32_t sub_length = sub->length();
+
+ if (start_index + pattern_length > sub_length)
+ start_index = sub_length - pattern_length;
+
+ for (int i = start_index; i >= 0; i--) {
+ bool found = true;
+ for (uint32_t j = 0; j < pattern_length; j++) {
+ if (sub->Get(i + j) != pat->Get(j)) {
+ found = false;
+ break;
+ }
+ }
+ if (found) return Smi::FromInt(i);
+ }
+
+ return Smi::FromInt(-1);
+}
+
+
+static Object* Runtime_StringLocaleCompare(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, str1, args[0]);
+ CONVERT_CHECKED(String, str2, args[1]);
+
+ if (str1 == str2) return Smi::FromInt(0); // Equal.
+ int str1_length = str1->length();
+ int str2_length = str2->length();
+
+ // Decide trivial cases without flattening.
+ if (str1_length == 0) {
+ if (str2_length == 0) return Smi::FromInt(0); // Equal.
+ return Smi::FromInt(-str2_length);
+ } else {
+ if (str2_length == 0) return Smi::FromInt(str1_length);
+ }
+
+ int end = str1_length < str2_length ? str1_length : str2_length;
+
+ // No need to flatten if we are going to find the answer on the first
+ // character. At this point we know there is at least one character
+ // in each string, due to the trivial case handling above.
+ int d = str1->Get(0) - str2->Get(0);
+ if (d != 0) return Smi::FromInt(d);
+
+ str1->TryFlatten();
+ str2->TryFlatten();
+
+ static StringInputBuffer buf1;
+ static StringInputBuffer buf2;
+
+ buf1.Reset(str1);
+ buf2.Reset(str2);
+
+ for (int i = 0; i < end; i++) {
+ uint16_t char1 = buf1.GetNext();
+ uint16_t char2 = buf2.GetNext();
+ if (char1 != char2) return Smi::FromInt(char1 - char2);
+ }
+
+ return Smi::FromInt(str1_length - str2_length);
+}
+
+
+static Object* Runtime_StringSlice(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+
+ CONVERT_CHECKED(String, value, args[0]);
+ CONVERT_DOUBLE_CHECKED(from_number, args[1]);
+ CONVERT_DOUBLE_CHECKED(to_number, args[2]);
+
+ int start = FastD2I(from_number);
+ int end = FastD2I(to_number);
+
+ RUNTIME_ASSERT(end >= start);
+ RUNTIME_ASSERT(start >= 0);
+ RUNTIME_ASSERT(end <= value->length());
+ return value->Slice(start, end);
+}
+
+
+static Object* Runtime_NumberToRadixString(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(value, args[0]);
+ if (isnan(value)) {
+ return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ }
+ return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ }
+ CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
+ int radix = FastD2I(radix_number);
+ RUNTIME_ASSERT(2 <= radix && radix <= 36);
+ char* str = DoubleToRadixCString(value, radix);
+ Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ DeleteArray(str);
+ return result;
+}
+
+
+static Object* Runtime_NumberToFixed(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(value, args[0]);
+ if (isnan(value)) {
+ return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ }
+ return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ }
+ CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ int f = FastD2I(f_number);
+ RUNTIME_ASSERT(f >= 0);
+ char* str = DoubleToFixedCString(value, f);
+ Object* res = Heap::AllocateStringFromAscii(CStrVector(str));
+ DeleteArray(str);
+ return res;
+}
+
+
+static Object* Runtime_NumberToExponential(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(value, args[0]);
+ if (isnan(value)) {
+ return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ }
+ return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ }
+ CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ int f = FastD2I(f_number);
+ RUNTIME_ASSERT(f >= -1 && f <= 20);
+ char* str = DoubleToExponentialCString(value, f);
+ Object* res = Heap::AllocateStringFromAscii(CStrVector(str));
+ DeleteArray(str);
+ return res;
+}
+
+
+static Object* Runtime_NumberToPrecision(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(value, args[0]);
+ if (isnan(value)) {
+ return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ }
+ return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ }
+ CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ int f = FastD2I(f_number);
+ RUNTIME_ASSERT(f >= 1 && f <= 21);
+ char* str = DoubleToPrecisionCString(value, f);
+ Object* res = Heap::AllocateStringFromAscii(CStrVector(str));
+ DeleteArray(str);
+ return res;
+}
+
+
+// Returns a single character string where first character equals
+// string->Get(index).
+static Object* GetCharAt(String* string, uint32_t index) {
+ if (index < static_cast<uint32_t>(string->length())) {
+ string->TryFlatten();
+ return Heap::LookupSingleCharacterStringFromCode(string->Get(index));
+ }
+ return *Execution::CharAt(Handle<String>(string), index);
+}
+
+
+Object* Runtime::GetElementOrCharAt(Handle<Object> object, uint32_t index) {
+ // Handle [] indexing on Strings
+ if (object->IsString()) {
+ Object* result = GetCharAt(String::cast(*object), index);
+ if (!result->IsUndefined()) return result;
+ }
+
+ // Handle [] indexing on String objects
+ if (object->IsStringObjectWithCharacterAt(index)) {
+ JSValue* js_value = JSValue::cast(*object);
+ Object* result = GetCharAt(String::cast(js_value->value()), index);
+ if (!result->IsUndefined()) return result;
+ }
+
+ if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
+ Object* prototype = object->GetPrototype();
+ return prototype->GetElement(index);
+ }
+
+ return object->GetElement(index);
+}
+
+
+Object* Runtime::GetObjectProperty(Handle<Object> object, Object* key) {
+ if (object->IsUndefined() || object->IsNull()) {
+ HandleScope scope;
+ Handle<Object> key_handle(key);
+ Handle<Object> args[2] = { key_handle, object };
+ Handle<Object> error =
+ Factory::NewTypeError("non_object_property_load",
+ HandleVector(args, 2));
+ return Top::Throw(*error);
+ }
+
+ // Check if the given key is an array index.
+ uint32_t index;
+ if (Array::IndexFromObject(key, &index)) {
+ HandleScope scope;
+ return GetElementOrCharAt(object, index);
+ }
+
+ // Convert the key to a string - possibly by calling back into JavaScript.
+ String* name;
+ if (key->IsString()) {
+ name = String::cast(key);
+ } else {
+ HandleScope scope;
+ bool has_pending_exception = false;
+ Handle<Object> converted =
+ Execution::ToString(Handle<Object>(key), &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+ name = String::cast(*converted);
+ }
+
+ // Check if the name is trivially convertable to an index and get
+ // the element if so.
+ if (name->AsArrayIndex(&index)) {
+ HandleScope scope;
+ return GetElementOrCharAt(object, index);
+ } else {
+ PropertyAttributes attr;
+ return object->GetProperty(name, &attr);
+ }
+}
+
+
+static Object* Runtime_GetProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ Handle<Object> object = args.at<Object>(0);
+ Object* key = args[1];
+
+ return Runtime::GetObjectProperty(object, key);
+}
+
+
+Object* Runtime::SetObjectProperty(Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr) {
+ if (object->IsUndefined() || object->IsNull()) {
+ HandleScope scope;
+ Handle<Object> obj(object);
+ Handle<Object> args[2] = { key, obj };
+ Handle<Object> error =
+ Factory::NewTypeError("non_object_property_store",
+ HandleVector(args, 2));
+ return Top::Throw(*error);
+ }
+
+ // If the object isn't a JavaScript object, we ignore the store.
+ if (!object->IsJSObject()) return *value;
+
+ // Check if the given key is an array index.
+ uint32_t index;
+ if (Array::IndexFromObject(*key, &index)) {
+ ASSERT(attr == NONE);
+
+ // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
+ // of a string using [] notation. We need to support this too in
+ // JavaScript.
+ // In the case of a String object we just need to redirect the assignment to
+ // the underlying string if the index is in range. Since the underlying
+ // string does nothing with the assignment then we can ignore such
+ // assignments.
+ if (object->IsStringObjectWithCharacterAt(index))
+ return *value;
+
+ Object* result = JSObject::cast(*object)->SetElement(index, *value);
+ if (result->IsFailure()) return result;
+ return *value;
+ }
+
+ if (key->IsString()) {
+ Object* result;
+ if (String::cast(*key)->AsArrayIndex(&index)) {
+ ASSERT(attr == NONE);
+ result = JSObject::cast(*object)->SetElement(index, *value);
+ } else {
+ String::cast(*key)->TryFlatten();
+ result =
+ JSObject::cast(*object)->SetProperty(String::cast(*key), *value,
+ attr);
+ }
+ if (result->IsFailure()) return result;
+ return *value;
+ }
+
+ // Handlify object and value before calling into JavaScript again.
+ Handle<JSObject> object_handle = Handle<JSObject>::cast(object);
+ Handle<Object> value_handle = value;
+
+ // Call-back into JavaScript to convert the key to a string.
+ HandleScope scope;
+ bool has_pending_exception = false;
+ Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+ Handle<String> name = Handle<String>::cast(converted);
+
+ if (name->AsArrayIndex(&index)) {
+ ASSERT(attr == NONE);
+ return object_handle->SetElement(index, *value_handle);
+ } else {
+ return object_handle->SetProperty(*name, *value_handle, attr);
+ }
+}
+
+
+static Object* Runtime_AddProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 4);
+
+ CONVERT_CHECKED(JSObject, object, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+ RUNTIME_ASSERT(!object->HasLocalProperty(name));
+ CONVERT_CHECKED(Smi, attr_obj, args[3]);
+
+ int attr = attr_obj->value();
+ RUNTIME_ASSERT((attr & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
+
+ return object->AddProperty(name, args[2], attributes);
+}
+
+
+static Object* Runtime_SetProperty(Arguments args) {
+ NoHandleAllocation ha;
+ RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
+
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+
+ // Compute attributes.
+ PropertyAttributes attributes = NONE;
+ if (args.length() == 4) {
+ CONVERT_CHECKED(Smi, value_obj, args[3]);
+ int value = value_obj->value();
+ // Only attribute bits should be set.
+ ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ attributes = static_cast<PropertyAttributes>(value);
+ }
+ return Runtime::SetObjectProperty(object, key, value, attributes);
+}
+
+
+// Set a local property, even if it is READ_ONLY. If the property does not
+// exist, it will be added with attributes NONE.
+static Object* Runtime_IgnoreAttributesAndSetProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+
+ CONVERT_CHECKED(JSObject, object, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+
+ return object->IgnoreAttributesAndSetLocalProperty(name, args[2]);
+}
+
+
+static Object* Runtime_DeleteProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSObject, object, args[0]);
+ CONVERT_CHECKED(String, key, args[1]);
+ return object->DeleteProperty(key);
+}
+
+
+static Object* Runtime_HasLocalProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ CONVERT_CHECKED(String, key, args[1]);
+
+ // Only JS objects can have properties.
+ if (args[0]->IsJSObject()) {
+ JSObject* object = JSObject::cast(args[0]);
+ if (object->HasLocalProperty(key)) return Heap::true_value();
+ } else if (args[0]->IsString()) {
+ // Well, there is one exception: Handle [] on strings.
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ String* string = String::cast(args[0]);
+ if (index < static_cast<uint32_t>(string->length()))
+ return Heap::true_value();
+ }
+ }
+ return Heap::false_value();
+}
+
+
+static Object* Runtime_HasProperty(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+
+ // Only JS objects can have properties.
+ if (args[0]->IsJSObject()) {
+ JSObject* object = JSObject::cast(args[0]);
+ CONVERT_CHECKED(String, key, args[1]);
+ if (object->HasProperty(key)) return Heap::true_value();
+ }
+ return Heap::false_value();
+}
+
+
+static Object* Runtime_HasElement(Arguments args) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+
+ // Only JS objects can have elements.
+ if (args[0]->IsJSObject()) {
+ JSObject* object = JSObject::cast(args[0]);
+ CONVERT_CHECKED(Smi, index_obj, args[1]);
+ uint32_t index = index_obj->value();
+ if (object->HasElement(index)) return Heap::true_value();
+ }
+ return Heap::false_value();
+}
+
+
+static Object* Runtime_IsPropertyEnumerable(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSObject, object, args[0]);
+ CONVERT_CHECKED(String, key, args[1]);
+
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ return Heap::ToBoolean(object->HasElement(index));
+ }
+
+ LookupResult result;
+ object->LocalLookup(key, &result);
+ if (!result.IsProperty()) return Heap::false_value();
+ return Heap::ToBoolean(!result.IsDontEnum());
+}
+
+
+static Object* Runtime_GetPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSObject, raw_object, args[0]);
+ Handle<JSObject> object(raw_object);
+ return *GetKeysFor(object);
+}
+
+
+// Returns either a FixedArray as Runtime_GetPropertyNames,
+// or, if the given object has an enum cache that contains
+// all enumerable properties of the object and its prototypes
+// have none, the map of the object. This is used to speed up
+// the check for deletions during a for-in.
+static Object* Runtime_GetPropertyNamesFast(Arguments args) {
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSObject, raw_object, args[0]);
+
+ if (raw_object->IsSimpleEnum()) return raw_object->map();
+
+ HandleScope scope;
+ Handle<JSObject> object(raw_object);
+ Handle<FixedArray> content = GetKeysInFixedArrayFor(object);
+
+ // Test again, since cache may have been built by preceding call.
+ if (object->IsSimpleEnum()) return object->map();
+
+ return *content;
+}
+
+
+static Object* Runtime_GetArgumentsProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ // Compute the frame holding the arguments.
+ JavaScriptFrameIterator it;
+ it.AdvanceToArgumentsFrame();
+ JavaScriptFrame* frame = it.frame();
+
+ // Get the actual number of provided arguments.
+ const uint32_t n = frame->GetProvidedParametersCount();
+
+ // Try to convert the key to an index. If successful and within
+ // index return the the argument from the frame.
+ uint32_t index;
+ if (Array::IndexFromObject(args[0], &index) && index < n) {
+ return frame->GetParameter(index);
+ }
+
+ // Convert the key to a string.
+ HandleScope scope;
+ bool exception = false;
+ Handle<Object> converted =
+ Execution::ToString(args.at<Object>(0), &exception);
+ if (exception) return Failure::Exception();
+ Handle<String> key = Handle<String>::cast(converted);
+
+ // Try to convert the string key into an array index.
+ if (key->AsArrayIndex(&index)) {
+ if (index < n) {
+ return frame->GetParameter(index);
+ } else {
+ return Top::initial_object_prototype()->GetElement(index);
+ }
+ }
+
+ // Handle special arguments properties.
+ if (key->Equals(Heap::length_symbol())) return Smi::FromInt(n);
+ if (key->Equals(Heap::callee_symbol())) return frame->function();
+
+ // Lookup in the initial Object.prototype object.
+ return Top::initial_object_prototype()->GetProperty(*key);
+}
+
+
+static Object* Runtime_ToBool(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ return args[0]->ToBoolean();
+}
+
+
+// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
+// Possible optimizations: put the type string into the oddballs.
+static Object* Runtime_Typeof(Arguments args) {
+ NoHandleAllocation ha;
+
+ Object* obj = args[0];
+ if (obj->IsNumber()) return Heap::number_symbol();
+ HeapObject* heap_obj = HeapObject::cast(obj);
+
+ // typeof an undetectable object is 'undefined'
+ if (heap_obj->map()->is_undetectable())
+ return Heap::undefined_symbol();
+
+ InstanceType instance_type = heap_obj->map()->instance_type();
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ return Heap::string_symbol();
+ }
+
+ switch (instance_type) {
+ case ODDBALL_TYPE:
+ if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
+ return Heap::boolean_symbol();
+ }
+ if (heap_obj->IsNull()) {
+ return Heap::object_symbol();
+ }
+ ASSERT(heap_obj->IsUndefined());
+ return Heap::undefined_symbol();
+ case JS_FUNCTION_TYPE:
+ return Heap::function_symbol();
+ default:
+ // For any kind of object not handled above, the spec rule for
+ // host objects gives that it is okay to return "object"
+ return Heap::object_symbol();
+ }
+}
+
+
+static Object* Runtime_StringToNumber(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(String, subject, args[0]);
+ return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
+}
+
+
+static Object* Runtime_StringFromCharCodeArray(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSArray, codes, args[0]);
+ int length = Smi::cast(codes->length())->value();
+
+ // Check if the string can be ASCII.
+ int i;
+ for (i = 0; i < length; i++) {
+ Object* element = codes->GetElement(i);
+ CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
+ if ((chr & 0xffff) > String::kMaxAsciiCharCode)
+ break;
+ }
+
+ Object* object = NULL;
+ if (i == length) { // The string is ASCII.
+ object = Heap::AllocateRawAsciiString(length);
+ } else { // The string is not ASCII.
+ object = Heap::AllocateRawTwoByteString(length);
+ }
+
+ if (object->IsFailure()) return object;
+ String* result = String::cast(object);
+ for (int i = 0; i < length; i++) {
+ Object* element = codes->GetElement(i);
+ CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
+ result->Set(i, chr & 0xffff);
+ }
+ return result;
+}
+
+
+// kNotEscaped is generated by the following:
+//
+// #!/bin/perl
+// for (my $i = 0; $i < 256; $i++) {
+// print "\n" if $i % 16 == 0;
+// my $c = chr($i);
+// my $escaped = 1;
+// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
+// print $escaped ? "0, " : "1, ";
+// }
+
+
+static bool IsNotEscaped(uint16_t character) {
+ // Only for 8 bit characters, the rest are always escaped (in a different way)
+ ASSERT(character < 256);
+ static const char kNotEscaped[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ return kNotEscaped[character] != 0;
+}
+
+
+static Object* Runtime_URIEscape(Arguments args) {
+ const char hex_chars[] = "0123456789ABCDEF";
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(String, source, args[0]);
+
+ source->TryFlatten();
+
+ int escaped_length = 0;
+ int length = source->length();
+ {
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Reset(source);
+ while (buffer->has_more()) {
+ uint16_t character = buffer->GetNext();
+ if (character >= 256) {
+ escaped_length += 6;
+ } else if (IsNotEscaped(character)) {
+ escaped_length++;
+ } else {
+ escaped_length += 3;
+ }
+ // We don't allow strings that are longer than Smi range.
+ if (!Smi::IsValid(escaped_length)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ }
+ }
+ // No length change implies no change. Return original string if no change.
+ if (escaped_length == length) {
+ return source;
+ }
+ Object* o = Heap::AllocateRawAsciiString(escaped_length);
+ if (o->IsFailure()) return o;
+ String* destination = String::cast(o);
+ int dest_position = 0;
+
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Rewind();
+ while (buffer->has_more()) {
+ uint16_t character = buffer->GetNext();
+ if (character >= 256) {
+ destination->Set(dest_position, '%');
+ destination->Set(dest_position+1, 'u');
+ destination->Set(dest_position+2, hex_chars[character >> 12]);
+ destination->Set(dest_position+3, hex_chars[(character >> 8) & 0xf]);
+ destination->Set(dest_position+4, hex_chars[(character >> 4) & 0xf]);
+ destination->Set(dest_position+5, hex_chars[character & 0xf]);
+ dest_position += 6;
+ } else if (IsNotEscaped(character)) {
+ destination->Set(dest_position, character);
+ dest_position++;
+ } else {
+ destination->Set(dest_position, '%');
+ destination->Set(dest_position+1, hex_chars[character >> 4]);
+ destination->Set(dest_position+2, hex_chars[character & 0xf]);
+ dest_position += 3;
+ }
+ }
+ return destination;
+}
+
+
+static inline int TwoDigitHex(uint16_t character1, uint16_t character2) {
+ static const signed char kHexValue['g'] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15 };
+
+ if (character1 > 'f') return -1;
+ int hi = kHexValue[character1];
+ if (hi == -1) return -1;
+ if (character2 > 'f') return -1;
+ int lo = kHexValue[character2];
+ if (lo == -1) return -1;
+ return (hi << 4) + lo;
+}
+
+
+static inline int Unescape(String* source, int i, int length, int* step) {
+ uint16_t character = source->Get(i);
+ int32_t hi, lo;
+ if (character == '%' &&
+ i <= length - 6 &&
+ source->Get(i + 1) == 'u' &&
+ (hi = TwoDigitHex(source->Get(i + 2), source->Get(i + 3))) != -1 &&
+ (lo = TwoDigitHex(source->Get(i + 4), source->Get(i + 5))) != -1) {
+ *step = 6;
+ return (hi << 8) + lo;
+ } else if (character == '%' &&
+ i <= length - 3 &&
+ (lo = TwoDigitHex(source->Get(i + 1), source->Get(i + 2))) != -1) {
+ *step = 3;
+ return lo;
+ } else {
+ *step = 1;
+ return character;
+ }
+}
+
+
+static Object* Runtime_URIUnescape(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(String, source, args[0]);
+
+ source->TryFlatten();
+
+ bool ascii = true;
+ int length = source->length();
+
+ int unescaped_length = 0;
+ for (int i = 0; i < length; unescaped_length++) {
+ int step;
+ if (Unescape(source, i, length, &step) > String::kMaxAsciiCharCode)
+ ascii = false;
+ i += step;
+ }
+
+ // No length change implies no change. Return original string if no change.
+ if (unescaped_length == length)
+ return source;
+
+ Object* o = ascii ?
+ Heap::AllocateRawAsciiString(unescaped_length) :
+ Heap::AllocateRawTwoByteString(unescaped_length);
+ if (o->IsFailure()) return o;
+ String* destination = String::cast(o);
+
+ int dest_position = 0;
+ for (int i = 0; i < length; dest_position++) {
+ int step;
+ destination->Set(dest_position, Unescape(source, i, length, &step));
+ i += step;
+ }
+ return destination;
+}
+
+
+static Object* Runtime_StringParseInt(Arguments args) {
+ NoHandleAllocation ha;
+
+ CONVERT_CHECKED(String, s, args[0]);
+ CONVERT_DOUBLE_CHECKED(n, args[1]);
+ int radix = FastD2I(n);
+
+ s->TryFlatten();
+
+ int len = s->length();
+ int i;
+
+ // Skip leading white space.
+ for (i = 0; i < len && Scanner::kIsWhiteSpace.get(s->Get(i)); i++) ;
+ if (i == len) return Heap::nan_value();
+
+ // Compute the sign (default to +).
+ int sign = 1;
+ if (s->Get(i) == '-') {
+ sign = -1;
+ i++;
+ } else if (s->Get(i) == '+') {
+ i++;
+ }
+
+ // Compute the radix if 0.
+ if (radix == 0) {
+ radix = 10;
+ if (i < len && s->Get(i) == '0') {
+ radix = 8;
+ if (i + 1 < len) {
+ int c = s->Get(i + 1);
+ if (c == 'x' || c == 'X') {
+ radix = 16;
+ i += 2;
+ }
+ }
+ }
+ } else if (radix == 16) {
+ // Allow 0x or 0X prefix if radix is 16.
+ if (i + 1 < len && s->Get(i) == '0') {
+ int c = s->Get(i + 1);
+ if (c == 'x' || c == 'X') i += 2;
+ }
+ }
+
+ RUNTIME_ASSERT(2 <= radix && radix <= 36);
+ double value;
+ int end_index = StringToInt(s, i, radix, &value);
+ if (end_index != i) {
+ return Heap::NumberFromDouble(sign * value);
+ }
+ return Heap::nan_value();
+}
+
+
+static Object* Runtime_StringParseFloat(Arguments args) {
+ NoHandleAllocation ha;
+ CONVERT_CHECKED(String, str, args[0]);
+
+ // ECMA-262 section 15.1.2.3, empty string is NaN
+ double value = StringToDouble(str, ALLOW_TRAILING_JUNK, OS::nan_value());
+
+ // Create a number object from the value.
+ return Heap::NumberFromDouble(value);
+}
+
+
+static unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping;
+static unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping;
+
+
+template <class Converter>
+static Object* ConvertCase(Arguments args,
+ unibrow::Mapping<Converter, 128> *mapping) {
+ NoHandleAllocation ha;
+
+ CONVERT_CHECKED(String, s, args[0]);
+ int raw_string_length = s->length();
+ // Assume that the string is not empty; we need this assumption later
+ if (raw_string_length == 0) return s;
+ int length = raw_string_length;
+
+ s->TryFlatten();
+
+ // We try this twice, once with the assumption that the result is
+ // no longer than the input and, if that assumption breaks, again
+ // with the exact length. This is implemented using a goto back
+ // to this label if we discover that the assumption doesn't hold.
+ // I apologize sincerely for this and will give a vaffel-is to
+ // anyone who can implement it in a nicer way.
+ try_convert:
+
+ // Allocate the resulting string.
+ //
+ // NOTE: This assumes that the upper/lower case of an ascii
+ // character is also ascii. This is currently the case, but it
+ // might break in the future if we implement more context and locale
+ // dependent upper/lower conversions.
+ Object* o = s->IsAscii()
+ ? Heap::AllocateRawAsciiString(length)
+ : Heap::AllocateRawTwoByteString(length);
+ if (o->IsFailure())
+ return o;
+ String* result = String::cast(o);
+ bool has_changed_character = false;
+
+
+ // Convert all characters to upper case, assuming that they will fit
+ // in the buffer
+ Access<StringInputBuffer> buffer(&string_input_buffer);
+ buffer->Reset(s);
+ unibrow::uchar chars[unibrow::kMaxCaseConvertedSize];
+ int i = 0;
+ // We can assume that the string is not empty
+ uc32 current = buffer->GetNext();
+ while (i < length) {
+ uc32 next = buffer->has_more() ? buffer->GetNext() : 0;
+ int char_length = mapping->get(current, next, chars);
+ if (char_length == 0) {
+ // The case conversion of this character is the character itself.
+ result->Set(i, current);
+ i++;
+ } else if (char_length == 1) {
+ // Common case: converting the letter resulted in one character.
+ ASSERT(static_cast<uc32>(chars[0]) != current);
+ result->Set(i, chars[0]);
+ has_changed_character = true;
+ i++;
+ } else if (length == raw_string_length) {
+ // We've assumed that the result would be as long as the
+ // input but here is a character that converts to several
+ // characters. No matter, we calculate the exact length
+ // of the result and try the whole thing again.
+ //
+ // Note that this leaves room for optimization. We could just
+ // memcpy what we already have to the result string. Also,
+ // the result string is the last object allocated we could
+ // "realloc" it and probably, in the vast majority of cases,
+ // extend the existing string to be able to hold the full
+ // result.
+ int current_length = i + char_length + mapping->get(next, 0, chars);
+ while (buffer->has_more()) {
+ current = buffer->GetNext();
+ int char_length = mapping->get(current, 0, chars);
+ if (char_length == 0) char_length = 1;
+ current += char_length;
+ }
+ length = current_length;
+ goto try_convert;
+ } else {
+ for (int j = 0; j < char_length; j++) {
+ result->Set(i, chars[j]);
+ i++;
+ }
+ has_changed_character = true;
+ }
+ current = next;
+ }
+ if (has_changed_character) {
+ return result;
+ } else {
+ // If we didn't actually change anything in doing the conversion
+ // we simple return the result and let the converted string
+ // become garbage; there is no reason to keep two identical strings
+ // alive.
+ return s;
+ }
+}
+
+
+static Object* Runtime_StringToLowerCase(Arguments args) {
+ return ConvertCase<unibrow::ToLowercase>(args, &to_lower_mapping);
+}
+
+
+static Object* Runtime_StringToUpperCase(Arguments args) {
+ return ConvertCase<unibrow::ToUppercase>(args, &to_upper_mapping);
+}
+
+
+static Object* Runtime_ConsStringFst(Arguments args) {
+ NoHandleAllocation ha;
+
+ CONVERT_CHECKED(ConsString, str, args[0]);
+ return str->first();
+}
+
+
+static Object* Runtime_ConsStringSnd(Arguments args) {
+ NoHandleAllocation ha;
+
+ CONVERT_CHECKED(ConsString, str, args[0]);
+ return str->second();
+}
+
+
+static Object* Runtime_NumberToString(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ Object* number = args[0];
+ RUNTIME_ASSERT(number->IsNumber());
+
+ Object* cached = Heap::GetNumberStringCache(number);
+ if (cached != Heap::undefined_value()) {
+ return cached;
+ }
+
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str;
+ if (number->IsSmi()) {
+ int num = Smi::cast(number)->value();
+ str = IntToCString(num, buffer);
+ } else {
+ double num = HeapNumber::cast(number)->value();
+ str = DoubleToCString(num, buffer);
+ }
+ Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
+
+ if (!result->IsFailure()) {
+ Heap::SetNumberStringCache(number, String::cast(result));
+ }
+ return result;
+}
+
+
+static Object* Runtime_NumberToInteger(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ Object* obj = args[0];
+
+ if (obj->IsSmi())
+ return obj;
+
+ CONVERT_DOUBLE_CHECKED(number, obj);
+ return Heap::NumberFromDouble(DoubleToInteger(number));
+}
+
+
+static Object* Runtime_NumberToJSUint32(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ Object* obj = args[0];
+ if (obj->IsSmi() && Smi::cast(obj)->value() >= 0) return obj;
+ CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, obj);
+ return Heap::NumberFromUint32(number);
+}
+
+
+static Object* Runtime_NumberToJSInt32(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ Object* obj = args[0];
+ if (obj->IsSmi()) return obj;
+ CONVERT_DOUBLE_CHECKED(number, obj);
+ return Heap::NumberFromInt32(DoubleToInt32(number));
+}
+
+
+static Object* Runtime_NumberAdd(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ return Heap::AllocateHeapNumber(x + y);
+}
+
+
+static Object* Runtime_NumberSub(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ return Heap::AllocateHeapNumber(x - y);
+}
+
+
+static Object* Runtime_NumberMul(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ return Heap::AllocateHeapNumber(x * y);
+}
+
+
+static Object* Runtime_NumberAlloc(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ return Heap::AllocateHeapNumber(0);
+}
+
+
+static Object* Runtime_NumberUnaryMinus(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(-x);
+}
+
+
+static Object* Runtime_NumberDiv(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ return Heap::NewNumberFromDouble(x / y);
+}
+
+
+static Object* Runtime_NumberMod(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+
+#ifdef WIN32
+ // Workaround MS fmod bugs. ECMA-262 says:
+ // dividend is finite and divisor is an infinity => result equals dividend
+ // dividend is a zero and divisor is nonzero finite => result equals dividend
+ if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
+ !(x == 0 && (y != 0 && isfinite(y))))
+#endif
+ x = fmod(x, y);
+ // NewNumberFromDouble may return a Smi instead of a Number object
+ return Heap::NewNumberFromDouble(x);
+}
+
+
+static Object* Runtime_StringAdd(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, str1, args[0]);
+ CONVERT_CHECKED(String, str2, args[1]);
+ int len1 = str1->length();
+ int len2 = str2->length();
+ if (len1 == 0) return str2;
+ if (len2 == 0) return str1;
+ int length_sum = len1 + len2;
+ // Make sure that an out of memory exception is thrown if the length
+ // of the new cons string is too large to fit in a Smi.
+ if (length_sum > Smi::kMaxValue || length_sum < 0) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ return Heap::AllocateConsString(str1, str2);
+}
+
+
+static Object* Runtime_StringBuilderConcat(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ CONVERT_CHECKED(JSArray, array, args[0]);
+ CONVERT_CHECKED(String, special, args[1]);
+ int special_length = special->length();
+ Object* smi_array_length = array->length();
+ if (!smi_array_length->IsSmi()) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ int array_length = Smi::cast(smi_array_length)->value();
+ if (!array->HasFastElements()) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ FixedArray* fixed_array = FixedArray::cast(array->elements());
+ if (fixed_array->length() < array_length)
+ array_length = fixed_array->length();
+
+ if (array_length == 0) {
+ return Heap::empty_string();
+ } else if (array_length == 1) {
+ Object* first = fixed_array->get(0);
+ if (first->IsString()) return first;
+ }
+
+ bool ascii = special->IsAscii();
+ int position = 0;
+ for (int i = 0; i < array_length; i++) {
+ Object* elt = fixed_array->get(i);
+ if (elt->IsSmi()) {
+ int len = Smi::cast(elt)->value();
+ int pos = len >> 11;
+ len &= 0x7ff;
+ if (pos + len > special_length) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ position += len;
+ } else if (elt->IsString()) {
+ String* element = String::cast(elt);
+ int element_length = element->length();
+ if (!Smi::IsValid(element_length + position)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ position += element_length;
+ if (ascii && !element->IsAscii())
+ ascii = false;
+ } else {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ }
+
+ int length = position;
+ position = 0;
+ Object* object;
+ if (ascii) {
+ object = Heap::AllocateRawAsciiString(length);
+ } else {
+ object = Heap::AllocateRawTwoByteString(length);
+ }
+ if (object->IsFailure()) return object;
+
+ String* answer = String::cast(object);
+ for (int i = 0; i < array_length; i++) {
+ Object* element = fixed_array->get(i);
+ if (element->IsSmi()) {
+ int len = Smi::cast(element)->value();
+ int pos = len >> 11;
+ len &= 0x7ff;
+ String::Flatten(special, answer, pos, pos + len, position);
+ position += len;
+ } else {
+ String* string = String::cast(element);
+ int element_length = string->length();
+ String::Flatten(string, answer, 0, element_length, position);
+ position += element_length;
+ }
+ }
+ return answer;
+}
+
+
+static Object* Runtime_NumberOr(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return Heap::NumberFromInt32(x | y);
+}
+
+
+static Object* Runtime_NumberAnd(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return Heap::NumberFromInt32(x & y);
+}
+
+
+static Object* Runtime_NumberXor(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return Heap::NumberFromInt32(x ^ y);
+}
+
+
+static Object* Runtime_NumberNot(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ return Heap::NumberFromInt32(~x);
+}
+
+
+static Object* Runtime_NumberShl(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return Heap::NumberFromInt32(x << (y & 0x1f));
+}
+
+
+static Object* Runtime_NumberShr(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return Heap::NumberFromUint32(x >> (y & 0x1f));
+}
+
+
+static Object* Runtime_NumberSar(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return Heap::NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
+}
+
+
+static Object* Runtime_ObjectEquals(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ return Smi::FromInt(args[0] == args[1] ? EQUAL : NOT_EQUAL);
+}
+
+
+static Object* Runtime_NumberEquals(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
+ if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
+ if (x == y) return Smi::FromInt(EQUAL);
+ Object* result;
+ if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
+ result = Smi::FromInt(EQUAL);
+ } else {
+ result = Smi::FromInt(NOT_EQUAL);
+ }
+ return result;
+}
+
+
+static Object* Runtime_StringEquals(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, x, args[0]);
+ CONVERT_CHECKED(String, y, args[1]);
+
+ // This is very similar to String::Equals(String*) but that version
+ // requires flattened strings as input, whereas we flatten the
+ // strings only if the fast cases fail. Note that this may fail,
+ // requiring a GC. String::Equals(String*) returns a bool and has
+ // no way to signal a failure.
+ if (y == x) return Smi::FromInt(EQUAL);
+ if (x->IsSymbol() && y->IsSymbol()) return Smi::FromInt(NOT_EQUAL);
+ // Compare contents
+ int len = x->length();
+ if (len != y->length()) return Smi::FromInt(NOT_EQUAL);
+ if (len == 0) return Smi::FromInt(EQUAL);
+ // Fast case: First, middle and last characters.
+ if (x->Get(0) != y->Get(0)) return Smi::FromInt(NOT_EQUAL);
+ if (x->Get(len>>1) != y->Get(len>>1)) return Smi::FromInt(NOT_EQUAL);
+ if (x->Get(len - 1) != y->Get(len - 1)) return Smi::FromInt(NOT_EQUAL);
+
+ x->TryFlatten();
+ y->TryFlatten();
+
+ static StringInputBuffer buf1;
+ static StringInputBuffer buf2;
+ buf1.Reset(x);
+ buf2.Reset(y);
+ while (buf1.has_more()) {
+ if (buf1.GetNext() != buf2.GetNext())
+ return Smi::FromInt(NOT_EQUAL);
+ }
+ return Smi::FromInt(EQUAL);
+}
+
+
+static Object* Runtime_NumberCompare(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ if (isnan(x) || isnan(y)) return args[2];
+ if (x == y) return Smi::FromInt(EQUAL);
+ if (isless(x, y)) return Smi::FromInt(LESS);
+ return Smi::FromInt(GREATER);
+}
+
+
+static Object* Runtime_StringCompare(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, x, args[0]);
+ CONVERT_CHECKED(String, y, args[1]);
+
+ // A few fast case tests before we flatten.
+ if (x == y) return Smi::FromInt(EQUAL);
+ if (y->length() == 0) {
+ if (x->length() == 0)
+ return Smi::FromInt(EQUAL);
+ return Smi::FromInt(GREATER);
+ } else if (x->length() == 0) {
+ return Smi::FromInt(LESS);
+ }
+ {
+ int d = x->Get(0) - y->Get(0);
+ if (d < 0) return Smi::FromInt(LESS);
+ else if (d > 0) return Smi::FromInt(GREATER);
+ }
+
+ x->TryFlatten();
+ y->TryFlatten();
+
+ static StringInputBuffer bufx;
+ static StringInputBuffer bufy;
+ bufx.Reset(x);
+ bufy.Reset(y);
+ while (bufx.has_more() && bufy.has_more()) {
+ int d = bufx.GetNext() - bufy.GetNext();
+ if (d < 0) return Smi::FromInt(LESS);
+ else if (d > 0) return Smi::FromInt(GREATER);
+ }
+
+ // x is (non-trivial) prefix of y:
+ if (bufy.has_more()) return Smi::FromInt(LESS);
+ // y is prefix of x:
+ return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+}
+
+
+static Object* Runtime_Math_abs(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(fabs(x));
+}
+
+
+static Object* Runtime_Math_acos(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(acos(x));
+}
+
+
+static Object* Runtime_Math_asin(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(asin(x));
+}
+
+
+static Object* Runtime_Math_atan(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(atan(x));
+}
+
+
+static Object* Runtime_Math_atan2(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ double result;
+ if (isinf(x) && isinf(y)) {
+ // Make sure that the result in case of two infinite arguments
+ // is a multiple of Pi / 4. The sign of the result is determined
+ // by the first argument (x) and the sign of the second argument
+ // determines the multiplier: one or three.
+ static double kPiDividedBy4 = 0.78539816339744830962;
+ int multiplier = (x < 0) ? -1 : 1;
+ if (y < 0) multiplier *= 3;
+ result = multiplier * kPiDividedBy4;
+ } else {
+ result = atan2(x, y);
+ }
+ return Heap::AllocateHeapNumber(result);
+}
+
+
+static Object* Runtime_Math_ceil(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::NumberFromDouble(ceiling(x));
+}
+
+
+static Object* Runtime_Math_cos(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(cos(x));
+}
+
+
+static Object* Runtime_Math_exp(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(exp(x));
+}
+
+
+static Object* Runtime_Math_floor(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::NumberFromDouble(floor(x));
+}
+
+
+static Object* Runtime_Math_log(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(log(x));
+}
+
+
+static Object* Runtime_Math_pow(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_CHECKED(y, args[1]);
+ if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+ return Heap::nan_value();
+ } else if (y == 0) {
+ return Smi::FromInt(1);
+ } else {
+ return Heap::AllocateHeapNumber(pow(x, y));
+ }
+}
+
+// Returns a number value with positive sign, greater than or equal to
+// 0 but less than 1, chosen randomly.
+static Object* Runtime_Math_random(Arguments) {
+ NoHandleAllocation ha;
+
+ // To get much better precision, we combine the results of two
+ // invocations of random(). The result is computed by normalizing a
+ // double in the range [0, RAND_MAX + 1) obtained by adding the
+ // high-order bits in the range [0, RAND_MAX] with the low-order
+ // bits in the range [0, 1).
+ double lo = static_cast<double>(random()) / (RAND_MAX + 1.0);
+ double hi = static_cast<double>(random());
+ double result = (hi + lo) / (RAND_MAX + 1.0);
+ ASSERT(result >= 0 && result < 1);
+ return Heap::AllocateHeapNumber(result);
+}
+
+
+static Object* Runtime_Math_round(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
+ return Heap::NumberFromDouble(floor(x + 0.5));
+}
+
+
+static Object* Runtime_Math_sin(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(sin(x));
+}
+
+
+static Object* Runtime_Math_sqrt(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(sqrt(x));
+}
+
+
+static Object* Runtime_Math_tan(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::AllocateHeapNumber(tan(x));
+}
+
+
+static Object* Runtime_NewArguments(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ // ECMA-262, 3rd., 10.1.8, p.39
+ CONVERT_CHECKED(JSFunction, callee, args[0]);
+
+ // Compute the frame holding the arguments.
+ JavaScriptFrameIterator it;
+ it.AdvanceToArgumentsFrame();
+ JavaScriptFrame* frame = it.frame();
+
+ const int length = frame->GetProvidedParametersCount();
+ Object* result = Heap::AllocateArgumentsObject(callee, length);
+ if (result->IsFailure()) return result;
+ FixedArray* array = FixedArray::cast(JSObject::cast(result)->elements());
+ ASSERT(array->length() == length);
+ for (int i = 0; i < length; i++) {
+ array->set(i, frame->GetParameter(i));
+ }
+ return result;
+}
+
+
+static Object* Runtime_NewClosure(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSFunction, boilerplate, 0);
+ CONVERT_ARG_CHECKED(Context, context, 1);
+
+ Handle<JSFunction> result =
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
+ return *result;
+}
+
+
+static Object* Runtime_NewObject(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ Object* constructor = args[0];
+ if (constructor->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(constructor);
+
+ // Handle steping into constructors.
+ if (Debug::StepInActive()) {
+ StackFrameIterator it;
+ it.Advance();
+ ASSERT(InternalFrame::cast(it.frame())->is_construct_trampoline());
+ it.Advance();
+ if (it.frame()->fp() == Debug::step_in_fp()) {
+ HandleScope scope;
+ Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ }
+ }
+
+ if (function->has_initial_map() &&
+ function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
+ // The 'Function' function ignores the receiver object when
+ // called using 'new' and creates a new JSFunction object that
+ // is returned. The receiver object is only used for error
+ // reporting if an error occurs when constructing the new
+ // JSFunction. AllocateJSObject should not be used to allocate
+ // JSFunctions since it does not properly initialize the shared
+ // part of the function. Since the receiver is ignored anyway,
+ // we use the global object as the receiver instead of a new
+ // JSFunction object. This way, errors are reported the same
+ // way whether or not 'Function' is called using 'new'.
+ return Top::context()->global();
+ }
+ return Heap::AllocateJSObject(function);
+ }
+
+ HandleScope scope;
+ Handle<Object> cons(constructor);
+ // The constructor is not a function; throw a type error.
+ Handle<Object> type_error =
+ Factory::NewTypeError("not_constructor", HandleVector(&cons, 1));
+ return Top::Throw(*type_error);
+}
+
+
+#ifdef DEBUG
+DEFINE_bool(trace_lazy, false, "trace lazy compilation");
+#endif
+
+
+static Object* Runtime_LazyCompile(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+#ifdef DEBUG
+ if (FLAG_trace_lazy) {
+ PrintF("[lazy: ");
+ function->shared()->name()->Print();
+ PrintF("]\n");
+ }
+#endif
+
+ // Compile the target function.
+ ASSERT(!function->is_compiled());
+ if (!CompileLazy(function, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+
+ return function->code();
+}
+
+
+static Object* Runtime_GetCalledFunction(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ StackFrameIterator it;
+ // Get past the JS-to-C exit frame.
+ ASSERT(it.frame()->is_exit());
+ it.Advance();
+ // Get past the CALL_NON_FUNCTION activation frame.
+ ASSERT(it.frame()->is_java_script());
+ it.Advance();
+ // Argument adaptor frames do not copy the function; we have to skip
+ // past them to get to the real calling frame.
+ if (it.frame()->is_arguments_adaptor()) it.Advance();
+ // Get the function from the top of the expression stack of the
+ // calling frame.
+ StandardFrame* frame = StandardFrame::cast(it.frame());
+ int index = frame->ComputeExpressionsCount() - 1;
+ Object* result = frame->GetExpression(index);
+ return result;
+}
+
+
+static Object* Runtime_GetFunctionDelegate(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ RUNTIME_ASSERT(!args[0]->IsJSFunction());
+ return *Execution::GetFunctionDelegate(args.at<Object>(0));
+}
+
+
+static Object* Runtime_NewContext(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSFunction, function, args[1]);
+ int length = ScopeInfo<>::NumberOfContextSlots(function->code());
+ Object* result = Heap::AllocateFunctionContext(length, function);
+ if (result->IsFailure()) return result;
+
+ Top::set_context(Context::cast(result));
+
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_PushContext(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ // Convert the object to a proper JavaScript object.
+ Object* object = args[1];
+ if (!object->IsJSObject()) {
+ object = object->ToObject();
+ if (object->IsFailure()) {
+ if (!Failure::cast(object)->IsInternalError()) return object;
+ HandleScope scope;
+ Handle<Object> handle(args[1]);
+ Handle<Object> result =
+ Factory::NewTypeError("with_expression", HandleVector(&handle, 1));
+ return Top::Throw(*result);
+ }
+ }
+
+ Object* result =
+ Heap::AllocateWithContext(Top::context(), JSObject::cast(object));
+ if (result->IsFailure()) return result;
+
+ Top::set_context(Context::cast(result));
+
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_LookupContext(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(Context, context, 0);
+ CONVERT_ARG_CHECKED(String, name, 1);
+
+ int index;
+ PropertyAttributes attributes;
+ ContextLookupFlags flags = FOLLOW_CHAINS;
+ Handle<Object> context_obj =
+ context->Lookup(name, flags, &index, &attributes);
+
+ if (index < 0 && *context_obj != NULL) {
+ ASSERT(context_obj->IsJSObject());
+ return *context_obj;
+ }
+
+ // No intermediate context found. Use global object by default.
+ return Top::context()->global();
+}
+
+
+
+
+// A mechanism to return pairs of Object*'s. This is somewhat
+// compiler-dependent as it assumes that a 64-bit value (a long long)
+// is returned via two registers (edx:eax on ia32). Both the ia32 and
+// arm platform support this; it is mostly an issue of "coaxing" the
+// compiler to do the right thing.
+//
+// TODO(1236026): This is a non-portable hack that should be removed.
+typedef uint64_t ObjPair;
+ObjPair MakePair(Object* x, Object* y) {
+ return reinterpret_cast<uint32_t>(x) |
+ (reinterpret_cast<ObjPair>(y) << 32);
+}
+
+
+static Object* Unhole(Object* x, PropertyAttributes attributes) {
+ ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
+ USE(attributes);
+ return x->IsTheHole() ? Heap::undefined_value() : x;
+}
+
+
+static ObjPair LoadContextSlotHelper(Arguments args, bool throw_error) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+
+ if (!args[0]->IsContext()) return MakePair(IllegalOperation(), NULL);
+ Handle<Context> context = args.at<Context>(0);
+ Handle<String> name(String::cast(args[1]));
+
+ int index;
+ PropertyAttributes attributes;
+ ContextLookupFlags flags = FOLLOW_CHAINS;
+ Handle<Object> context_obj =
+ context->Lookup(name, flags, &index, &attributes);
+
+ if (index >= 0) {
+ if (context_obj->IsContext()) {
+ // The context is an Execution context, and the "property" we were looking
+ // for is a local variable in that context. According to ECMA-262, 3rd.,
+ // 10.1.6 and 10.2.3, the receiver is the global object.
+ return MakePair(
+ Unhole(Handle<Context>::cast(context_obj)->get(index), attributes),
+ Top::context()->global());
+ } else {
+ return MakePair(
+ Unhole(Handle<JSObject>::cast(context_obj)->GetElement(index),
+ attributes),
+ *context_obj);
+ }
+ }
+
+ if (*context_obj != NULL) {
+ ASSERT(Handle<JSObject>::cast(context_obj)->HasProperty(*name));
+ // Note: As of 5/29/2008, GetProperty does the "unholing" and so this call
+ // here is redundant. We left it anyway, to be explicit; also it's not clear
+ // why GetProperty should do the unholing in the first place.
+ return MakePair(
+ Unhole(Handle<JSObject>::cast(context_obj)->GetProperty(*name),
+ attributes),
+ *context_obj);
+ }
+
+ if (throw_error) {
+ // The property doesn't exist - throw exception.
+ Handle<Object> reference_error =
+ Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
+ return MakePair(Top::Throw(*reference_error), NULL);
+ } else {
+ // The property doesn't exist - return undefined
+ return MakePair(Heap::undefined_value(), Heap::undefined_value());
+ }
+}
+
+
+static ObjPair Runtime_LoadContextSlot(Arguments args) {
+ return LoadContextSlotHelper(args, true);
+}
+
+
+static ObjPair Runtime_LoadContextSlotNoReferenceError(Arguments args) {
+ return LoadContextSlotHelper(args, false);
+}
+
+
+static Object* Runtime_StoreContextSlot(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+
+ Handle<Object> value(args[0]);
+ CONVERT_ARG_CHECKED(Context, context, 1);
+ Handle<String> name(String::cast(args[2]));
+
+ int index;
+ PropertyAttributes attributes;
+ ContextLookupFlags flags = FOLLOW_CHAINS;
+ Handle<Object> context_obj =
+ context->Lookup(name, flags, &index, &attributes);
+
+ if (index >= 0) {
+ if (context_obj->IsContext()) {
+ // Ignore if read_only variable.
+ if ((attributes & READ_ONLY) == 0) {
+ Handle<Context>::cast(context_obj)->set(index, *value);
+ }
+ } else {
+ ASSERT((attributes & READ_ONLY) == 0);
+ Object* result =
+ Handle<JSObject>::cast(context_obj)->SetElement(index, *value);
+ USE(result);
+ ASSERT(!result->IsFailure());
+ }
+ return *value;
+ }
+
+ // Slow case: The property is not in a FixedArray context.
+ // It is either in an JSObject extension context or it was not found.
+ Handle<JSObject> context_ext;
+
+ if (*context_obj != NULL) {
+ // The property exists in the extension context.
+ context_ext = Handle<JSObject>::cast(context_obj);
+ } else {
+ // The property was not found. It needs to be stored in the global context.
+ ASSERT(attributes == ABSENT);
+ attributes = NONE;
+ context_ext = Handle<JSObject>(Top::context()->global());
+ }
+
+ // Set the property, but ignore if read_only variable.
+ if ((attributes & READ_ONLY) == 0) {
+ Handle<Object> set = SetProperty(context_ext, name, value, attributes);
+ if (set.is_null()) {
+ // Failure::Exception is converted to a null handle in the
+ // handle-based methods such as SetProperty. We therefore need
+ // to convert null handles back to exceptions.
+ ASSERT(Top::has_pending_exception());
+ return Failure::Exception();
+ }
+ }
+ return *value;
+}
+
+
+static Object* Runtime_Throw(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ return Top::Throw(args[0]);
+}
+
+
+static Object* Runtime_ReThrow(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ return Top::ReThrow(args[0]);
+}
+
+
+static Object* Runtime_ThrowReferenceError(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ Handle<Object> name(args[0]);
+ Handle<Object> reference_error =
+ Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
+ return Top::Throw(*reference_error);
+}
+
+
+static Object* Runtime_StackOverflow(Arguments args) {
+ NoHandleAllocation na;
+ return Top::StackOverflow();
+}
+
+
+static Object* RuntimePreempt(Arguments args) {
+ // Clear the preempt request flag.
+ StackGuard::Continue(PREEMPT);
+
+ ContextSwitcher::PreemptionReceived();
+
+ {
+ v8::Unlocker unlocker;
+ Thread::YieldCPU();
+ }
+
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DebugBreak(Arguments args) {
+ // Don't break in system functions. If the current function is either in the
+ // builtins object of some context or is in the debug context just return with
+ // the debug break stack guard active.
+ JavaScriptFrameIterator it;
+ JavaScriptFrame* frame = it.frame();
+ Object* fun = frame->function();
+ if (fun->IsJSFunction()) {
+ GlobalObject* global = JSFunction::cast(fun)->context()->global();
+ if (global->IsJSBuiltinsObject() || Debug::IsDebugGlobal(global)) {
+ return args[0];
+ }
+ }
+
+ // Clear the debug request flag.
+ StackGuard::Continue(DEBUGBREAK);
+
+ // Make sure debugger is loaded.
+ if (!Debug::Load()) {
+ return args[0];
+ }
+
+ HandleScope scope;
+ SaveBreakFrame save;
+ EnterDebuggerContext enter;
+
+ // Process debug requests. Returns true if break request.
+ bool break_request = Debugger::ProcessPendingRequests();
+
+ // Notify the debug event listeners if break request.
+ if (break_request) {
+ Debugger::OnDebugBreak(Factory::undefined_value());
+ }
+
+ // Return to continue execution.
+ return args[0];
+}
+
+
+static Object* Runtime_StackGuard(Arguments args) {
+ ASSERT(args.length() == 1);
+
+ // First check if this is a real stack overflow.
+ if (StackGuard::IsStackOverflow()) return Runtime_StackOverflow(args);
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+ if (StackGuard::IsDebugBreak()) Runtime_DebugBreak(args);
+ if (StackGuard::IsPreempted()) RuntimePreempt(args);
+ if (StackGuard::IsInterrupted()) {
+ // interrupt
+ StackGuard::Continue(INTERRUPT);
+ return Top::StackOverflow();
+ }
+ return Heap::undefined_value();
+}
+
+
+// NOTE: These PrintXXX functions are defined for all builds (not just
+// DEBUG builds) because we may want to be able to trace function
+// calls in all modes.
+static void PrintString(String* str) {
+ // not uncommon to have empty strings
+ if (str->length() > 0) {
+ SmartPointer<char> s =
+ str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ PrintF("%s", *s);
+ }
+}
+
+
+static void PrintObject(Object* obj) {
+ if (obj->IsSmi()) {
+ PrintF("%d", Smi::cast(obj)->value());
+ } else if (obj->IsString() || obj->IsSymbol()) {
+ PrintString(String::cast(obj));
+ } else if (obj->IsNumber()) {
+ PrintF("%g", obj->Number());
+ } else if (obj->IsFailure()) {
+ PrintF("<failure>");
+ } else if (obj->IsUndefined()) {
+ PrintF("<undefined>");
+ } else if (obj->IsNull()) {
+ PrintF("<null>");
+ } else if (obj->IsTrue()) {
+ PrintF("<true>");
+ } else if (obj->IsFalse()) {
+ PrintF("<false>");
+ } else {
+ PrintF("%p", obj);
+ }
+}
+
+
+static int StackSize() {
+ int n = 0;
+ for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
+ return n;
+}
+
+
+static void PrintTransition(Object* result) {
+ // indentation
+ { const int nmax = 80;
+ int n = StackSize();
+ if (n <= nmax)
+ PrintF("%4d:%*s", n, n, "");
+ else
+ PrintF("%4d:%*s", n, nmax, "...");
+ }
+
+ if (result == NULL) {
+ // constructor calls
+ JavaScriptFrameIterator it;
+ JavaScriptFrame* frame = it.frame();
+ if (frame->IsConstructor()) PrintF("new ");
+ // function name
+ Object* fun = frame->function();
+ if (fun->IsJSFunction()) {
+ PrintObject(JSFunction::cast(fun)->shared()->name());
+ } else {
+ PrintObject(fun);
+ }
+ // function arguments
+ // (we are intentionally only printing the actually
+ // supplied parameters, not all parameters required)
+ PrintF("(this=");
+ PrintObject(frame->receiver());
+ const int length = frame->GetProvidedParametersCount();
+ for (int i = 0; i < length; i++) {
+ PrintF(", ");
+ PrintObject(frame->GetParameter(i));
+ }
+ PrintF(") {\n");
+
+ } else {
+ // function result
+ PrintF("} -> ");
+ PrintObject(result);
+ PrintF("\n");
+ }
+}
+
+
+static Object* Runtime_TraceEnter(Arguments args) {
+ NoHandleAllocation ha;
+ PrintTransition(NULL);
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_TraceExit(Arguments args) {
+ NoHandleAllocation ha;
+ PrintTransition(args[0]);
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_DebugPrint(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+#ifdef DEBUG
+ if (args[0]->IsString()) {
+ // If we have a string, assume it's a code "marker"
+ // and print some interesting cpu debugging info.
+ JavaScriptFrameIterator it;
+ JavaScriptFrame* frame = it.frame();
+ PrintF("fp = %p, sp = %p, pp = %p: ",
+ frame->fp(), frame->sp(), frame->pp());
+ } else {
+ PrintF("DebugPrint: ");
+ }
+ args[0]->Print();
+#else
+ PrintF("DebugPrint: %p", args[0]);
+#endif
+ PrintF("\n");
+
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_DebugTrace(Arguments args) {
+ ASSERT(args.length() == 1);
+ NoHandleAllocation ha;
+ Top::PrintStack();
+ return args[0]; // return TOS
+}
+
+
+static Object* Runtime_DateCurrentTime(Arguments) {
+ NoHandleAllocation ha;
+
+ // According to ECMA-262, section 15.9.1, page 117, the precision of
+ // the number in a Date object representing a particular instant in
+ // time is milliseconds. Therefore, we floor the result of getting
+ // the OS time.
+ double millis = floor(OS::TimeCurrentMillis());
+ return Heap::NumberFromDouble(millis);
+}
+
+
+static Object* Runtime_DateParseString(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(String, string_object, args[0]);
+
+ Handle<String> str(string_object);
+ Handle<FixedArray> output = Factory::NewFixedArray(DateParser::OUTPUT_SIZE);
+ if (DateParser::Parse(*str, *output)) {
+ return *Factory::NewJSArrayWithElements(output);
+ } else {
+ return *Factory::null_value();
+ }
+}
+
+
+static Object* Runtime_DateLocalTimezone(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ char* zone = OS::LocalTimezone(x);
+ return Heap::AllocateStringFromUtf8(CStrVector(zone));
+}
+
+
+static Object* Runtime_DateLocalTimeOffset(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ return Heap::NumberFromDouble(OS::LocalTimeOffset());
+}
+
+
+static Object* Runtime_DateDaylightSavingsOffset(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(x, args[0]);
+ return Heap::NumberFromDouble(OS::DaylightSavingsOffset(x));
+}
+
+
+
+static Object* Runtime_NumberIsNaN(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(value, args[0]);
+ if (isnan(value)) {
+ return Heap::true_value();
+ } else {
+ return Heap::false_value();
+ }
+}
+
+
+static Object* Runtime_NumberIsFinite(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(value, args[0]);
+ Object* result;
+ if (isnan(value) || (fpclassify(value) == FP_INFINITE)) {
+ result = Heap::false_value();
+ } else {
+ result = Heap::true_value();
+ }
+ return result;
+}
+
+
+static Object* Runtime_NumberMaxValue(Arguments) {
+ NoHandleAllocation ha;
+
+ return Heap::number_max_value();
+}
+
+
+static Object* Runtime_NumberMinValue(Arguments) {
+ NoHandleAllocation ha;
+
+ return Heap::number_min_value();
+}
+
+
+static Object* Runtime_NumberNaN(Arguments) {
+ NoHandleAllocation ha;
+
+ return Heap::nan_value();
+}
+
+
+static Object* Runtime_NumberNegativeInfinity(Arguments) {
+ NoHandleAllocation ha;
+
+ return Heap::negative_infinity_value();
+}
+
+
+static Object* Runtime_NumberPositiveInfinity(Arguments) {
+ NoHandleAllocation ha;
+
+ return Heap::infinity_value();
+}
+
+
+static Object* EvalContext() {
+ // The topmost JS frame belongs to the eval function which called
+ // the CompileString runtime function. We need to unwind one level
+ // to get to the caller of eval.
+ StackFrameLocator locator;
+ JavaScriptFrame* frame = locator.FindJavaScriptFrame(1);
+
+ // Check if the caller of eval() supports eval. If not, eval is
+ // called through an alias in which case we throw an EvalError.
+ HandleScope scope;
+ if (!ScopeInfo<>::SupportsEval(frame->FindCode())) {
+ Handle<Object> error =
+ Factory::NewEvalError("illegal_eval", HandleVector<Object>(NULL, 0));
+ return Top::Throw(*error);
+ }
+
+ // Fetch the caller context from the frame.
+ Handle<Context> caller(Context::cast(frame->context()));
+
+ // Check for eval() invocations that cross environments. Use the
+ // context from the stack if evaluating in current environment.
+ Handle<Context> target = Top::global_context();
+ if (caller->global_context() == *target) return *caller;
+
+ // Compute a function closure that captures the calling context. We
+ // need a function that has trivial scope info, since it is only
+ // used to hold the context chain together.
+ Handle<JSFunction> closure = Factory::NewFunction(Factory::empty_symbol(),
+ Factory::undefined_value());
+ closure->set_context(*caller);
+
+ // Create a new adaptor context that has the target environment as
+ // the extension object. This enables the evaluated code to see both
+ // the current context with locals and everything and to see global
+ // variables declared in the target global object. Furthermore, any
+ // properties introduced with 'var' will be added to the target
+ // global object because it is the extension object.
+ Handle<Context> adaptor =
+ Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, closure);
+ adaptor->set_extension(target->global());
+ return *adaptor;
+}
+
+
+static Object* Runtime_EvalReceiver(Arguments args) {
+ StackFrameLocator locator;
+ return locator.FindJavaScriptFrame(1)->receiver();
+}
+
+
+static Object* Runtime_CompileString(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ bool contextual = args[1]->IsTrue();
+ RUNTIME_ASSERT(contextual || args[1]->IsFalse());
+
+ // Compute the eval context.
+ Handle<Context> context;
+ if (contextual) {
+ // Get eval context. May not be available if we are calling eval
+ // through an alias, and the corresponding frame doesn't have a
+ // proper eval context set up.
+ Object* eval_context = EvalContext();
+ if (eval_context->IsFailure()) return eval_context;
+ context = Handle<Context>(Context::cast(eval_context));
+ } else {
+ context = Handle<Context>(Top::context()->global_context());
+ }
+
+ // Compile eval() source.
+ Handle<String> source(String::cast(args[0]));
+ Handle<JSFunction> boilerplate =
+ Compiler::CompileEval(context->IsGlobalContext(), source);
+ if (boilerplate.is_null()) return Failure::Exception();
+ Handle<JSFunction> fun =
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
+ return *fun;
+}
+
+
+static Object* Runtime_CompileScript(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 4);
+
+ CONVERT_ARG_CHECKED(String, source, 0);
+ CONVERT_ARG_CHECKED(String, script, 1);
+ CONVERT_CHECKED(Smi, line_attrs, args[2]);
+ int line = line_attrs->value();
+ CONVERT_CHECKED(Smi, col_attrs, args[3]);
+ int col = col_attrs->value();
+ Handle<JSFunction> boilerplate =
+ Compiler::Compile(source, script, line, col, NULL, NULL);
+ if (boilerplate.is_null()) return Failure::Exception();
+ Handle<JSFunction> fun =
+ Factory::NewFunctionFromBoilerplate(boilerplate,
+ Handle<Context>(Top::context()));
+ return *fun;
+}
+
+
+static Object* Runtime_SetNewFunctionAttributes(Arguments args) {
+ // This utility adjusts the property attributes for newly created Function
+ // object ("new Function(...)") by changing the map.
+ // All it does is changing the prototype property to enumerable
+ // as specified in ECMA262, 15.3.5.2.
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, func, 0);
+ ASSERT(func->map()->instance_type() ==
+ Top::function_instance_map()->instance_type());
+ ASSERT(func->map()->instance_size() ==
+ Top::function_instance_map()->instance_size());
+ func->set_map(*Top::function_instance_map());
+ return *func;
+}
+
+
+// This will not allocate (flatten the string), but it may run
+// very slowly for very deeply nested ConsStrings. For debugging use only.
+static Object* Runtime_GlobalPrint(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(String, string, args[0]);
+ StringInputBuffer buffer(string);
+ while (buffer.has_more()) {
+ uint16_t character = buffer.GetNext();
+ PrintF("%c", character);
+ }
+ return string;
+}
+
+
+static Object* Runtime_RemoveArrayHoles(Arguments args) {
+ ASSERT(args.length() == 1);
+ // Ignore the case if this is not a JSArray.
+ if (!args[0]->IsJSArray()) return args[0];
+ return JSArray::cast(args[0])->RemoveHoles();
+}
+
+
+// Move contents of argument 0 (an array) to argument 1 (an array)
+static Object* Runtime_MoveArrayContents(Arguments args) {
+ ASSERT(args.length() == 2);
+ CONVERT_CHECKED(JSArray, from, args[0]);
+ CONVERT_CHECKED(JSArray, to, args[1]);
+ to->SetContent(FixedArray::cast(from->elements()));
+ to->set_length(from->length());
+ from->SetContent(Heap::empty_fixed_array());
+ from->set_length(0);
+ return to;
+}
+
+
+// How many elements does this array have?
+static Object* Runtime_EstimateNumberOfElements(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSArray, array, args[0]);
+ HeapObject* elements = array->elements();
+ if (elements->IsDictionary()) {
+ return Smi::FromInt(Dictionary::cast(elements)->NumberOfElements());
+ } else {
+ return array->length();
+ }
+}
+
+
+// Returns an array that tells you where in the [0, length) interval an array
+// might have elements. Can either return keys or intervals. Keys can have
+// gaps in (undefined). Intervals can also span over some undefined keys.
+static Object* Runtime_GetArrayKeys(Arguments args) {
+ ASSERT(args.length() == 2);
+ HandleScope scope;
+ CONVERT_CHECKED(JSArray, raw_array, args[0]);
+ Handle<JSArray> array(raw_array);
+ CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
+ HeapObject* elements = array->elements();
+ if (elements->IsDictionary()) {
+ // Create an array and get all the keys into it, then remove all the
+ // keys that are not integers in the range 0 to length-1.
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(array);
+ int keys_length = keys->length();
+ for (int i = 0; i < keys_length; i++) {
+ Object* key = keys->get(i);
+ uint32_t index;
+ if (!Array::IndexFromObject(key, &index) || index >= length) {
+ // Zap invalid keys.
+ keys->set_undefined(i);
+ }
+ }
+ return *Factory::NewJSArrayWithElements(keys);
+ } else {
+ Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
+ // -1 means start of array.
+ single_interval->set(0, Smi::FromInt(-1));
+ Handle<Object> length_object =
+ Factory::NewNumber(static_cast<double>(length));
+ single_interval->set(1, *length_object);
+ return *Factory::NewJSArrayWithElements(single_interval);
+ }
+}
+
+
+// DefineAccessor takes an optional final argument which is the
+// property attributes (eg, DONT_ENUM, DONT_DELETE). IMPORTANT: due
+// to the way accessors are implemented, it is set for both the getter
+// and setter on the first call to DefineAccessor and ignored on
+// subsequent calls.
+static Object* Runtime_DefineAccessor(Arguments args) {
+ RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
+ // Compute attributes.
+ PropertyAttributes attributes = NONE;
+ if (args.length() == 5) {
+ CONVERT_CHECKED(Smi, attrs, args[4]);
+ int value = attrs->value();
+ // Only attribute bits should be set.
+ ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ attributes = static_cast<PropertyAttributes>(value);
+ }
+
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+ CONVERT_CHECKED(Smi, flag, args[2]);
+ CONVERT_CHECKED(JSFunction, fun, args[3]);
+ return obj->DefineAccessor(name, flag->value() == 0, fun, attributes);
+}
+
+
+static Object* Runtime_LookupAccessor(Arguments args) {
+ ASSERT(args.length() == 3);
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+ CONVERT_CHECKED(Smi, flag, args[2]);
+ return obj->LookupAccessor(name, flag->value() == 0);
+}
+
+
+// Helper functions for wrapping and unwrapping stack frame ids.
+static Smi* WrapFrameId(StackFrame::Id id) {
+ ASSERT(IsAligned(OffsetFrom(id), 4));
+ return Smi::FromInt(id >> 2);
+}
+
+
+static StackFrame::Id UnwrapFrameId(Smi* wrapped) {
+ return static_cast<StackFrame::Id>(wrapped->value() << 2);
+}
+
+
+// Adds a JavaScript function as a debug event listener.
+// args[0]: debug event listener function
+// args[1]: object supplied during callback
+static Object* Runtime_AddDebugEventListener(Arguments args) {
+ ASSERT(args.length() == 2);
+ // Convert the parameters to API objects to call the API function for adding
+ // a JavaScript function as debug event listener.
+ CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
+ v8::Handle<v8::Function> fun(ToApi<v8::Function>(raw_fun));
+ v8::Handle<v8::Value> data(ToApi<v8::Value>(args.at<Object>(0)));
+ v8::Debug::AddDebugEventListener(fun, data);
+
+ return Heap::undefined_value();
+}
+
+
+// Removes a JavaScript function debug event listener.
+// args[0]: debug event listener function
+static Object* Runtime_RemoveDebugEventListener(Arguments args) {
+ ASSERT(args.length() == 1);
+ // Convert the parameter to an API object to call the API function for
+ // removing a JavaScript function debug event listener.
+ CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
+ v8::Handle<v8::Function> fun(ToApi<v8::Function>(raw_fun));
+ v8::Debug::RemoveDebugEventListener(fun);
+
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_Break(Arguments args) {
+ ASSERT(args.length() == 1);
+ StackGuard::DebugBreak();
+ return Heap::undefined_value();
+}
+
+
+static Object* DebugLookupResultValue(LookupResult* result) {
+ Object* value;
+ switch (result->type()) {
+ case NORMAL: {
+ Dictionary* dict =
+ JSObject::cast(result->holder())->property_dictionary();
+ value = dict->ValueAt(result->GetDictionaryEntry());
+ if (value->IsTheHole()) {
+ return Heap::undefined_value();
+ }
+ return value;
+ }
+ case FIELD:
+ value =
+ JSObject::cast(
+ result->holder())->properties()->get(result->GetFieldIndex());
+ if (value->IsTheHole()) {
+ return Heap::undefined_value();
+ }
+ return value;
+ case CONSTANT_FUNCTION:
+ return result->GetConstantFunction();
+ case CALLBACKS:
+ return Heap::undefined_value();
+ case MAP_TRANSITION:
+ return Heap::undefined_value();
+ case INTERCEPTOR:
+ return Heap::undefined_value();
+ default:
+ UNREACHABLE();
+ }
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DebugGetLocalPropertyDetails(Arguments args) {
+ HandleScope scope;
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_CHECKED(String, name, 1);
+
+ // Check if the name is trivially convertible to an index and get the element
+ // if so.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ Handle<FixedArray> details = Factory::NewFixedArray(2);
+ details->set(0, Runtime::GetElementOrCharAt(obj, index));
+ details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
+ return *Factory::NewJSArrayWithElements(details);
+ }
+
+ // Perform standard local lookup on the object.
+ LookupResult result;
+ obj->LocalLookup(*name, &result);
+ if (result.IsProperty()) {
+ Handle<Object> value(DebugLookupResultValue(&result));
+ Handle<FixedArray> details = Factory::NewFixedArray(2);
+ details->set(0, *value);
+ details->set(1, result.GetPropertyDetails().AsSmi());
+ return *Factory::NewJSArrayWithElements(details);
+ }
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DebugGetProperty(Arguments args) {
+ HandleScope scope;
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_CHECKED(String, name, 1);
+
+ LookupResult result;
+ obj->Lookup(*name, &result);
+ if (result.IsProperty()) {
+ return DebugLookupResultValue(&result);
+ }
+ return Heap::undefined_value();
+}
+
+
+// Return the names of the local named properties.
+// args[0]: object
+static Object* Runtime_DebugLocalPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Heap::undefined_value();
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int n = obj->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
+ Handle<FixedArray> names = Factory::NewFixedArray(n);
+ obj->GetLocalPropertyNames(*names);
+ return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return the names of the local indexed properties.
+// args[0]: object
+static Object* Runtime_DebugLocalElementNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Heap::undefined_value();
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
+ Handle<FixedArray> names = Factory::NewFixedArray(n);
+ obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
+ return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return the property type calculated from the property details.
+// args[0]: smi with property details.
+static Object* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(Smi, details, args[0]);
+ PropertyType type = PropertyDetails(details).type();
+ return Smi::FromInt(static_cast<int>(type));
+}
+
+
+// Return the property attribute calculated from the property details.
+// args[0]: smi with property details.
+static Object* Runtime_DebugPropertyAttributesFromDetails(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(Smi, details, args[0]);
+ PropertyAttributes attributes = PropertyDetails(details).attributes();
+ return Smi::FromInt(static_cast<int>(attributes));
+}
+
+
+// Return the property insertion index calculated from the property details.
+// args[0]: smi with property details.
+static Object* Runtime_DebugPropertyIndexFromDetails(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(Smi, details, args[0]);
+ int index = PropertyDetails(details).index();
+ return Smi::FromInt(index);
+}
+
+
+// Return information on whether an object has a named or indexed interceptor.
+// args[0]: object
+static Object* Runtime_DebugInterceptorInfo(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Smi::FromInt(0);
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int result = 0;
+ if (obj->HasNamedInterceptor()) result |= 2;
+ if (obj->HasIndexedInterceptor()) result |= 1;
+
+ return Smi::FromInt(result);
+}
+
+
+// Return property names from named interceptor.
+// args[0]: object
+static Object* Runtime_DebugNamedInterceptorPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ RUNTIME_ASSERT(obj->HasNamedInterceptor());
+
+ v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ return Heap::undefined_value();
+}
+
+
+// Return element names from indexed interceptor.
+// args[0]: object
+static Object* Runtime_DebugIndexedInterceptorElementNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ RUNTIME_ASSERT(obj->HasIndexedInterceptor());
+
+ v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ return Heap::undefined_value();
+}
+
+
+// Return property value from named interceptor.
+// args[0]: object
+// args[1]: property name
+static Object* Runtime_DebugNamedInterceptorPropertyValue(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ RUNTIME_ASSERT(obj->HasNamedInterceptor());
+ CONVERT_ARG_CHECKED(String, name, 1);
+
+ PropertyAttributes attributes;
+ Object* result = obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
+ return result;
+}
+
+
+// Return element value from indexed interceptor.
+// args[0]: object
+// args[1]: index
+static Object* Runtime_DebugIndexedInterceptorElementValue(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ RUNTIME_ASSERT(obj->HasIndexedInterceptor());
+ CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
+
+ Object* result = obj->GetElementWithInterceptor(*obj, index);
+ return result;
+}
+
+
+static Object* Runtime_CheckExecutionState(Arguments args) {
+ ASSERT(args.length() >= 1);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ // Check that the break id is valid and that there is a valid frame
+ // where execution is broken.
+ if (break_id != Top::break_id() ||
+ Top::break_frame_id() == StackFrame::NO_ID) {
+ return Top::Throw(Heap::illegal_execution_state_symbol());
+ }
+
+ return Heap::true_value();
+}
+
+
+static Object* Runtime_GetFrameCount(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ // Check arguments.
+ Object* result = Runtime_CheckExecutionState(args);
+ if (result->IsFailure()) return result;
+
+ // Count all frames which are relevant to debugging stack trace.
+ int n = 0;
+ StackFrame::Id id = Top::break_frame_id();
+ for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) n++;
+ return Smi::FromInt(n);
+}
+
+
+static const int kFrameDetailsFrameIdIndex = 0;
+static const int kFrameDetailsReceiverIndex = 1;
+static const int kFrameDetailsFunctionIndex = 2;
+static const int kFrameDetailsArgumentCountIndex = 3;
+static const int kFrameDetailsLocalCountIndex = 4;
+static const int kFrameDetailsSourcePositionIndex = 5;
+static const int kFrameDetailsConstructCallIndex = 6;
+static const int kFrameDetailsDebuggerFrameIndex = 7;
+static const int kFrameDetailsFirstDynamicIndex = 8;
+
+// Return an array with frame details
+// args[0]: number: break id
+// args[1]: number: frame index
+//
+// The array returned contains the following information:
+// 0: Frame id
+// 1: Receiver
+// 2: Function
+// 3: Argument count
+// 4: Local count
+// 5: Source position
+// 6: Constructor call
+// 7: Debugger frame
+// Arguments name, value
+// Locals name, value
+static Object* Runtime_GetFrameDetails(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+
+ // Check arguments.
+ Object* result = Runtime_CheckExecutionState(args);
+ if (result->IsFailure()) return result;
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+
+ // Find the relevant frame with the requested index.
+ StackFrame::Id id = Top::break_frame_id();
+ int count = 0;
+ JavaScriptFrameIterator it(id);
+ for (; !it.done(); it.Advance()) {
+ if (count == index) break;
+ count++;
+ }
+ if (it.done()) return Heap::undefined_value();
+
+ // Traverse the saved contexts chain to find the active context for the
+ // selected frame.
+ SaveContext* save = Top::save_context();
+ while (save != NULL && reinterpret_cast<Address>(save) < it.frame()->sp()) {
+ save = save->prev();
+ }
+
+ // Get the frame id.
+ Handle<Object> frame_id(WrapFrameId(it.frame()->id()));
+
+ // Find source position.
+ int position = it.frame()->FindCode()->SourcePosition(it.frame()->pc());
+
+ // Check for constructor frame.
+ bool constructor = it.frame()->IsConstructor();
+
+ // Get code and read scope info from it for local variable information.
+ Handle<Code> code(it.frame()->FindCode());
+ ScopeInfo<> info(*code);
+
+ // Get the context.
+ Handle<Context> context(Context::cast(it.frame()->context()));
+
+ // Get the locals names and values into a temporary array.
+ //
+ // TODO(1240907): Hide compiler-introduced stack variables
+ // (e.g. .result)? For users of the debugger, they will probably be
+ // confusing.
+ Handle<FixedArray> locals = Factory::NewFixedArray(info.NumberOfLocals() * 2);
+ for (int i = 0; i < info.NumberOfLocals(); i++) {
+ // Name of the local.
+ locals->set(i * 2, *info.LocalName(i));
+
+ // Fetch the value of the local - either from the stack or from a
+ // heap-allocated context.
+ if (i < info.number_of_stack_slots()) {
+ locals->set(i * 2 + 1, it.frame()->GetExpression(i));
+ } else {
+ Handle<String> name = info.LocalName(i);
+ // Traverse the context chain to the function context as all local
+ // variables stored in the context will be on the function context.
+ while (context->previous() != NULL) {
+ context = Handle<Context>(context->previous());
+ }
+ ASSERT(context->is_function_context());
+ locals->set(i * 2 + 1,
+ context->get(ScopeInfo<>::ContextSlotIndex(*code, *name,
+ NULL)));
+ }
+ }
+
+ // Now advance to the arguments adapter frame (if any). If contains all
+ // the provided parameters and
+
+ // Now advance to the arguments adapter frame (if any). It contains all
+ // the provided parameters whereas the function frame always have the number
+ // of arguments matching the functions parameters. The rest of the
+ // information (except for what is collected above) is the same.
+ it.AdvanceToArgumentsFrame();
+
+ // Find the number of arguments to fill. At least fill the number of
+ // parameters for the function and fill more if more parameters are provided.
+ int argument_count = info.number_of_parameters();
+ if (argument_count < it.frame()->GetProvidedParametersCount()) {
+ argument_count = it.frame()->GetProvidedParametersCount();
+ }
+
+ // Calculate the size of the result.
+ int details_size = kFrameDetailsFirstDynamicIndex +
+ 2 * (argument_count + info.NumberOfLocals());
+ Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+
+ // Add the frame id.
+ details->set(kFrameDetailsFrameIdIndex, *frame_id);
+
+ // Add the function (same as in function frame).
+ details->set(kFrameDetailsFunctionIndex, it.frame()->function());
+
+ // Add the arguments count.
+ details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
+
+ // Add the locals count
+ details->set(kFrameDetailsLocalCountIndex,
+ Smi::FromInt(info.NumberOfLocals()));
+
+ // Add the source position.
+ if (position != kNoPosition) {
+ details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
+ } else {
+ details->set(kFrameDetailsSourcePositionIndex, Heap::undefined_value());
+ }
+
+ // Add the constructor information.
+ details->set(kFrameDetailsConstructCallIndex, Heap::ToBoolean(constructor));
+
+ // Add information on whether this frame is invoked in the debugger context.
+ details->set(kFrameDetailsDebuggerFrameIndex,
+ Heap::ToBoolean(*save->context() == *Debug::debug_context()));
+
+ // Fill the dynamic part.
+ int details_index = kFrameDetailsFirstDynamicIndex;
+
+ // Add arguments name and value.
+ for (int i = 0; i < argument_count; i++) {
+ // Name of the argument.
+ if (i < info.number_of_parameters()) {
+ details->set(details_index++, *info.parameter_name(i));
+ } else {
+ details->set(details_index++, Heap::undefined_value());
+ }
+
+ // Parameter value.
+ if (i < it.frame()->GetProvidedParametersCount()) {
+ details->set(details_index++, it.frame()->GetParameter(i));
+ } else {
+ details->set(details_index++, Heap::undefined_value());
+ }
+ }
+
+ // Add locals name and value from the temporary copy from the function frame.
+ for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
+ details->set(details_index++, locals->get(i));
+ }
+
+ // Add the receiver (same as in function frame).
+ // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
+ // THE FRAME ITERATOR TO WRAP THE RECEIVER.
+ Handle<Object> receiver(it.frame()->receiver());
+ if (!receiver->IsJSObject()) {
+ // If the receiver is NOT a JSObject we have hit an optimization
+ // where a value object is not converted into a wrapped JS objects.
+ // To hide this optimization from the debugger, we wrap the receiver
+ // by creating correct wrapper object based on the calling frame's
+ // global context.
+ it.Advance();
+ Handle<Context> calling_frames_global_context(
+ Context::cast(Context::cast(it.frame()->context())->global_context()));
+ receiver = Factory::ToObject(receiver, calling_frames_global_context);
+ }
+ details->set(kFrameDetailsReceiverIndex, *receiver);
+
+ ASSERT_EQ(details_size, details_index);
+ return *Factory::NewJSArrayWithElements(details);
+}
+
+
+static Object* Runtime_GetCFrames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ Object* result = Runtime_CheckExecutionState(args);
+ if (result->IsFailure()) return result;
+
+ static const int kMaxCFramesSize = 200;
+ OS::StackFrame frames[kMaxCFramesSize];
+ int frames_count = OS::StackWalk(frames, kMaxCFramesSize);
+ if (frames_count == OS::kStackWalkError) {
+ return Heap::undefined_value();
+ }
+
+ Handle<String> address_str = Factory::LookupAsciiSymbol("address");
+ Handle<String> text_str = Factory::LookupAsciiSymbol("text");
+ Handle<FixedArray> frames_array = Factory::NewFixedArray(frames_count);
+ for (int i = 0; i < frames_count; i++) {
+ Handle<JSObject> frame_value = Factory::NewJSObject(Top::object_function());
+ frame_value->SetProperty(
+ *address_str,
+ *Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address)),
+ NONE);
+
+ // Get the stack walk text for this frame.
+ Handle<String> frame_text;
+ if (strlen(frames[i].text) > 0) {
+ Vector<const char> str(frames[i].text, strlen(frames[i].text));
+ frame_text = Factory::NewStringFromAscii(str);
+ }
+
+ if (!frame_text.is_null()) {
+ frame_value->SetProperty(*text_str, *frame_text, NONE);
+ }
+
+ frames_array->set(i, *frame_value);
+ }
+ return *Factory::NewJSArrayWithElements(frames_array);
+}
+
+
+static Object* Runtime_GetBreakLocations(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
+ Handle<SharedFunctionInfo> shared(raw_fun->shared());
+ // Find the number of break points
+ Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
+ if (break_locations->IsUndefined()) return Heap::undefined_value();
+ // Return array as JS array
+ return *Factory::NewJSArrayWithElements(
+ Handle<FixedArray>::cast(break_locations));
+}
+
+
+// Set a break point in a function
+// args[0]: function
+// args[1]: number: break source position (within the function source)
+// args[2]: number: break point object
+static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
+ Handle<SharedFunctionInfo> shared(raw_fun->shared());
+ CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
+ RUNTIME_ASSERT(source_position >= 0);
+ Handle<Object> break_point_object_arg = args.at<Object>(2);
+
+ // Set break point.
+ Debug::SetBreakPoint(shared, source_position, break_point_object_arg);
+
+ return Heap::undefined_value();
+}
+
+
+static Object* FindSharedFunctionInfoInScript(Handle<Script> script,
+ int position) {
+ // Iterate the heap looking for SharedFunctionInfo generated from the
+ // script. The inner most SharedFunctionInfo containing the source position
+ // for the requested break point is found.
+ // NOTE: This might reqire several heap iterations. If the SharedFunctionInfo
+ // which is found is not compiled it is compiled and the heap is iterated
+ // again as the compilation might create inner functions from the newly
+ // compiled function and the actual requested break point might be in one of
+ // these functions.
+ bool done = false;
+ // The current candidate for the source position:
+ int target_start_position = kNoPosition;
+ Handle<SharedFunctionInfo> target;
+ // The current candidate for the last function in script:
+ Handle<SharedFunctionInfo> last;
+ while (!done) {
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (obj->IsSharedFunctionInfo()) {
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+ if (shared->script() == *script) {
+ // If the SharedFunctionInfo found has the requested script data and
+ // contains the source position it is a candidate.
+ int start_position = shared->function_token_position();
+ if (start_position == kNoPosition) {
+ start_position = shared->start_position();
+ }
+ if (start_position <= position &&
+ position <= shared->end_position()) {
+ // If there is no candidate or this function is within the currrent
+ // candidate this is the new candidate.
+ if (target.is_null()) {
+ target_start_position = start_position;
+ target = shared;
+ } else {
+ if (target_start_position < start_position &&
+ shared->end_position() < target->end_position()) {
+ target_start_position = start_position;
+ target = shared;
+ }
+ }
+ }
+
+ // Keep track of the last function in the script.
+ if (last.is_null() ||
+ shared->end_position() > last->start_position()) {
+ last = shared;
+ }
+ }
+ }
+ }
+
+ // Make sure some candidate is selected.
+ if (target.is_null()) {
+ if (!last.is_null()) {
+ // Position after the last function - use last.
+ target = last;
+ } else {
+ // Unable to find function - possibly script without any function.
+ return Heap::undefined_value();
+ }
+ }
+
+ // If the candidate found is compiled we are done. NOTE: when lazy
+ // compilation of inner functions is introduced some additional checking
+ // needs to be done here to compile inner functions.
+ done = target->is_compiled();
+ if (!done) {
+ // If the candidate is not compiled compile it to reveal any inner
+ // functions which might contain the requested source position.
+ CompileLazyShared(target, KEEP_EXCEPTION);
+ }
+ }
+
+ return *target;
+}
+
+
+// Change the state of a break point in a script. NOTE: Regarding performance
+// see the NOTE for GetScriptFromScriptData.
+// args[0]: script to set break point in
+// args[1]: number: break source position (within the script source)
+// args[2]: number: break point object
+static Object* Runtime_SetScriptBreakPoint(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
+ RUNTIME_ASSERT(source_position >= 0);
+ Handle<Object> break_point_object_arg = args.at<Object>(2);
+
+ // Get the script from the script wrapper.
+ RUNTIME_ASSERT(wrapper->value()->IsScript());
+ Handle<Script> script(Script::cast(wrapper->value()));
+
+ Object* result = FindSharedFunctionInfoInScript(script, source_position);
+ if (!result->IsUndefined()) {
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+ // Find position within function. The script position might be before the
+ // source position of the first function.
+ int position;
+ if (shared->start_position() > source_position) {
+ position = 0;
+ } else {
+ position = source_position - shared->start_position();
+ }
+ Debug::SetBreakPoint(shared, position, break_point_object_arg);
+ }
+ return Heap::undefined_value();
+}
+
+
+// Clear a break point
+// args[0]: number: break point object
+static Object* Runtime_ClearBreakPoint(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ Handle<Object> break_point_object_arg = args.at<Object>(0);
+
+ // Clear break point.
+ Debug::ClearBreakPoint(break_point_object_arg);
+
+ return Heap::undefined_value();
+}
+
+
+// Change the state of break on exceptions
+// args[0]: boolean indicating uncaught exceptions
+// args[1]: boolean indicating on/off
+static Object* Runtime_ChangeBreakOnException(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ ASSERT(args[0]->IsNumber());
+ ASSERT(args[1]->IsBoolean());
+
+ // Update break point state
+ ExceptionBreakType type =
+ static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+ bool enable = args[1]->ToBoolean()->IsTrue();
+ Debug::ChangeBreakOnException(type, enable);
+ return Heap::undefined_value();
+}
+
+
+// Prepare for stepping
+// args[0]: break id for checking execution state
+// args[1]: step action from the enumeration StepAction
+// args[2]: number of times to perform the step
+static Object* Runtime_PrepareStep(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ // Check arguments.
+ Object* check_result = Runtime_CheckExecutionState(args);
+ if (check_result->IsFailure()) return check_result;
+ if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+
+ // Get the step action and check validity.
+ StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
+ if (step_action != StepIn &&
+ step_action != StepNext &&
+ step_action != StepOut &&
+ step_action != StepInMin &&
+ step_action != StepMin) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+
+ // Get the number of steps.
+ int step_count = NumberToInt32(args[2]);
+ if (step_count < 1) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+
+ // Prepare step.
+ Debug::PrepareStep(static_cast<StepAction>(step_action), step_count);
+ return Heap::undefined_value();
+}
+
+
+// Clear all stepping set by PrepareStep.
+static Object* Runtime_ClearStepping(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ Debug::ClearStepping();
+ return Heap::undefined_value();
+}
+
+
+// Creates a copy of the with context chain. The copy of the context chain is
+// is linked to the function context supplied.
+static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
+ Handle<Context> function_context) {
+ // At the bottom of the chain. Return the function context to link to.
+ if (context_chain->is_function_context()) {
+ return function_context;
+ }
+
+ // Recursively copy the with contexts.
+ Handle<Context> previous(context_chain->previous());
+ Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
+ return Factory::NewWithContext(
+ CopyWithContextChain(function_context, previous), extension);
+}
+
+
+// Helper function to find or create the arguments object for
+// Runtime_DebugEvaluate.
+static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
+ Handle<JSFunction> function,
+ Handle<Code> code,
+ const ScopeInfo<>* sinfo,
+ Handle<Context> function_context) {
+ // Try to find the value of 'arguments' to pass as parameter. If it is not
+ // found (that is the debugged function does not reference 'arguments' and
+ // does not support eval) then create an 'arguments' object.
+ int index;
+ if (sinfo->number_of_stack_slots() > 0) {
+ index = ScopeInfo<>::StackSlotIndex(*code, Heap::arguments_symbol());
+ if (index != -1) {
+ return Handle<Object>(frame->GetExpression(index));
+ }
+ }
+
+ if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+ index = ScopeInfo<>::ContextSlotIndex(*code, Heap::arguments_symbol(),
+ NULL);
+ if (index != -1) {
+ return Handle<Object>(function_context->get(index));
+ }
+ }
+
+ const int length = frame->GetProvidedParametersCount();
+ Handle<Object> arguments = Factory::NewArgumentsObject(function, length);
+ FixedArray* array = FixedArray::cast(JSObject::cast(*arguments)->elements());
+ ASSERT(array->length() == length);
+ for (int i = 0; i < length; i++) {
+ array->set(i, frame->GetParameter(i));
+ }
+ return arguments;
+}
+
+
+// Evaluate a piece of JavaScript in the context of a stack frame for
+// debugging. This is acomplished by creating a new context which in its
+// extension part has all the parameters and locals of the function on the
+// stack frame. A function which calls eval with the code to evaluate is then
+// compiled in this context and called in this context. As this context
+// replaces the context of the function on the stack frame a new (empty)
+// function is created as well to be used as the closure for the context.
+// This function and the context acts as replacements for the function on the
+// stack frame presenting the same view of the values of parameters and
+// local variables as if the piece of JavaScript was evaluated at the point
+// where the function on the stack frame is currently stopped.
+static Object* Runtime_DebugEvaluate(Arguments args) {
+ HandleScope scope;
+
+ // Check the execution state and decode arguments frame and source to be
+ // evaluated.
+ ASSERT(args.length() == 3);
+ Object* check_result = Runtime_CheckExecutionState(args);
+ if (check_result->IsFailure()) return check_result;
+ CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+ CONVERT_ARG_CHECKED(String, source, 2);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator it(id);
+ JavaScriptFrame* frame = it.frame();
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<Code> code(function->code());
+ ScopeInfo<> sinfo(*code);
+
+ // Traverse the saved contexts chain to find the active context for the
+ // selected frame.
+ SaveContext* save = Top::save_context();
+ while (save != NULL && reinterpret_cast<Address>(save) < frame->sp()) {
+ save = save->prev();
+ }
+ ASSERT(save != NULL);
+ SaveContext savex;
+ Top::set_context(*(save->context()));
+ Top::set_security_context(*(save->security_context()));
+
+ // Create the (empty) function replacing the function on the stack frame for
+ // the purpose of evaluating in the context created below. It is important
+ // that this function does not describe any parameters and local variables
+ // in the context. If it does then this will cause problems with the lookup
+ // in Context::Lookup, where context slots for parameters and local variables
+ // are looked at before the extension object.
+ Handle<JSFunction> go_between =
+ Factory::NewFunction(Factory::empty_string(), Factory::undefined_value());
+ go_between->set_context(function->context());
+#ifdef DEBUG
+ ScopeInfo<> go_between_sinfo(go_between->shared()->code());
+ ASSERT(go_between_sinfo.number_of_parameters() == 0);
+ ASSERT(go_between_sinfo.number_of_context_slots() == 0);
+#endif
+
+ // Allocate and initialize a context extension object with all the
+ // arguments, stack locals heap locals and extension properties of the
+ // debugged function.
+ Handle<JSObject> context_ext = Factory::NewJSObject(Top::object_function());
+ // First fill all parameters to the context extension.
+ for (int i = 0; i < sinfo.number_of_parameters(); ++i) {
+ SetProperty(context_ext,
+ sinfo.parameter_name(i),
+ Handle<Object>(frame->GetParameter(i)), NONE);
+ }
+ // Second fill all stack locals to the context extension.
+ for (int i = 0; i < sinfo.number_of_stack_slots(); i++) {
+ SetProperty(context_ext,
+ sinfo.stack_slot_name(i),
+ Handle<Object>(frame->GetExpression(i)), NONE);
+ }
+ // Third fill all context locals to the context extension.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->fcontext());
+ for (int i = Context::MIN_CONTEXT_SLOTS;
+ i < sinfo.number_of_context_slots();
+ ++i) {
+ int context_index =
+ ScopeInfo<>::ContextSlotIndex(*code, *sinfo.context_slot_name(i), NULL);
+ SetProperty(context_ext,
+ sinfo.context_slot_name(i),
+ Handle<Object>(function_context->get(context_index)), NONE);
+ }
+ // Finally copy any properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (function_context->extension() != NULL &&
+ !function_context->IsGlobalContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ SetProperty(context_ext, key, GetProperty(ext, key), NONE);
+ }
+ }
+
+ // Allocate a new context for the debug evaluation and set the extension
+ // object build.
+ Handle<Context> context =
+ Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+ context->set_extension(*context_ext);
+ // Copy any with contexts present and chain them in front of this context.
+ context = CopyWithContextChain(frame_context, context);
+
+ // Wrap the evaluation statement in a new function compiled in the newly
+ // created context. The function has one parameter which has to be called
+ // 'arguments'. This it to have access to what would have been 'arguments' in
+ // the function beeing debugged.
+ // function(arguments,__source__) {return eval(__source__);}
+ static const char* source_str =
+ "function(arguments,__source__){return eval(__source__);}";
+ static const int source_str_length = strlen(source_str);
+ Handle<String> function_source =
+ Factory::NewStringFromAscii(Vector<const char>(source_str,
+ source_str_length));
+ Handle<JSFunction> boilerplate =
+ Compiler::CompileEval(context->IsGlobalContext(), function_source);
+ if (boilerplate.is_null()) return Failure::Exception();
+ Handle<JSFunction> compiled_function =
+ Factory::NewFunctionFromBoilerplate(boilerplate, context);
+
+ // Invoke the result of the compilation to get the evaluation function.
+ bool has_pending_exception;
+ Handle<Object> receiver(frame->receiver());
+ Handle<Object> evaluation_function =
+ Execution::Call(compiled_function, receiver, 0, NULL,
+ &has_pending_exception);
+
+ Handle<Object> arguments = GetArgumentsObject(frame, function, code, &sinfo,
+ function_context);
+
+ // Invoke the evaluation function and return the result.
+ const int argc = 2;
+ Object** argv[argc] = { arguments.location(),
+ Handle<Object>::cast(source).location() };
+ Handle<Object> result =
+ Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
+ argc, argv, &has_pending_exception);
+ return *result;
+}
+
+
+static Object* Runtime_DebugEvaluateGlobal(Arguments args) {
+ HandleScope scope;
+
+ // Check the execution state and decode arguments frame and source to be
+ // evaluated.
+ ASSERT(args.length() == 2);
+ Object* check_result = Runtime_CheckExecutionState(args);
+ if (check_result->IsFailure()) return check_result;
+ CONVERT_ARG_CHECKED(String, source, 1);
+
+ // Enter the top context from before the debugger was invoked.
+ SaveContext save;
+ SaveContext* top = &save;
+ while (top != NULL && *top->context() == *Debug::debug_context()) {
+ top = top->prev();
+ }
+ if (top != NULL) {
+ Top::set_context(*top->context());
+ Top::set_security_context(*top->security_context());
+ }
+
+ // Get the global context now set to the top context from before the
+ // debugger was invoked.
+ Handle<Context> context = Top::global_context();
+
+ // Compile the source to be evaluated.
+ Handle<JSFunction> boilerplate(Compiler::CompileEval(true, source));
+ if (boilerplate.is_null()) return Failure::Exception();
+ Handle<JSFunction> compiled_function =
+ Handle<JSFunction>(Factory::NewFunctionFromBoilerplate(boilerplate,
+ context));
+
+ // Invoke the result of the compilation to get the evaluation function.
+ bool has_pending_exception;
+ Handle<Object> receiver = Top::global();
+ Handle<Object> result =
+ Execution::Call(compiled_function, receiver, 0, NULL,
+ &has_pending_exception);
+ return *result;
+}
+
+
+// Helper function used by Runtime_DebugGetLoadedScripts below.
+static int DebugGetLoadedScripts(FixedArray* instances, int instances_size) {
+ NoHandleAllocation ha;
+ AssertNoAllocation no_alloc;
+
+ // Get hold of the current empty script.
+ Context* context = Top::context()->global_context();
+ Script* empty = context->empty_script();
+
+ // Scan heap for Script objects.
+ int count = 0;
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (obj->IsScript() && obj != empty) {
+ if (instances != NULL && count < instances_size) {
+ instances->set(count, obj);
+ }
+ count++;
+ }
+ }
+
+ return count;
+}
+
+
+static Object* Runtime_DebugGetLoadedScripts(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
+ // rid of all the cached script wrappes and the second gets rid of the
+ // scripts which is no longer referenced.
+ Heap::CollectGarbage(0, OLD_SPACE);
+ Heap::CollectGarbage(0, OLD_SPACE);
+
+ // Get the number of scripts.
+ int count;
+ count = DebugGetLoadedScripts(NULL, 0);
+
+ // Allocate an array to hold the result.
+ Handle<FixedArray> instances = Factory::NewFixedArray(count);
+
+ // Fill the script objects.
+ count = DebugGetLoadedScripts(*instances, count);
+
+ // Convert the script objects to proper JS objects.
+ for (int i = 0; i < count; i++) {
+ Handle<Script> script(Script::cast(instances->get(i)));
+ instances->set(i, *GetScriptWrapper(script));
+ }
+
+ // Return result as a JS array.
+ Handle<JSObject> result = Factory::NewJSObject(Top::array_function());
+ Handle<JSArray>::cast(result)->SetContent(*instances);
+ return *result;
+}
+
+
+// Helper function used by Runtime_DebugReferencedBy below.
+static int DebugReferencedBy(JSObject* target,
+ Object* instance_filter, int max_references,
+ FixedArray* instances, int instances_size,
+ JSFunction* context_extension_function,
+ JSFunction* arguments_function) {
+ NoHandleAllocation ha;
+ AssertNoAllocation no_alloc;
+
+ // Iterate the heap.
+ int count = 0;
+ JSObject* last = NULL;
+ HeapIterator iterator;
+ while (iterator.has_next() &&
+ (max_references == 0 || count < max_references)) {
+ // Only look at all JSObjects.
+ HeapObject* heap_obj = iterator.next();
+ if (heap_obj->IsJSObject()) {
+ // Skip context extension objects and argument arrays as these are
+ // checked in the context of functions using them.
+ JSObject* obj = JSObject::cast(heap_obj);
+ if (obj->map()->constructor() == context_extension_function ||
+ obj->map()->constructor() == arguments_function) {
+ continue;
+ }
+
+ // Check if the JS object has a reference to the object looked for.
+ if (obj->ReferencesObject(target)) {
+ // Check instance filter if supplied. This is normally used to avoid
+ // references from mirror objects (see Runtime_IsInPrototypeChain).
+ if (!instance_filter->IsUndefined()) {
+ Object* V = obj;
+ while (true) {
+ Object* prototype = V->GetPrototype();
+ if (prototype->IsNull()) {
+ break;
+ }
+ if (instance_filter == prototype) {
+ obj = NULL; // Don't add this object.
+ break;
+ }
+ V = prototype;
+ }
+ }
+
+ if (obj != NULL) {
+ // Valid reference found add to instance array if supplied an update
+ // count.
+ if (instances != NULL && count < instances_size) {
+ instances->set(count, obj);
+ }
+ last = obj;
+ count++;
+ }
+ }
+ }
+ }
+
+ // Check for circular reference only. This can happen when the object is only
+ // referenced from mirrors and has a circular reference in which case the
+ // object is not really alive and would have been garbage collected if not
+ // referenced from the mirror.
+ if (count == 1 && last == target) {
+ count = 0;
+ }
+
+ // Return the number of referencing objects found.
+ return count;
+}
+
+
+// Scan the heap for objects with direct references to an object
+// args[0]: the object to find references to
+// args[1]: constructor function for instances to exclude (Mirror)
+// args[2]: the the maximum number of objects to return
+static Object* Runtime_DebugReferencedBy(Arguments args) {
+ ASSERT(args.length() == 3);
+
+ // First perform a full GC in order to avoid references from dead objects.
+ Heap::CollectGarbage(0, OLD_SPACE);
+
+ // Check parameters.
+ CONVERT_CHECKED(JSObject, target, args[0]);
+ Object* instance_filter = args[1];
+ RUNTIME_ASSERT(instance_filter->IsUndefined() ||
+ instance_filter->IsJSObject());
+ CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
+ RUNTIME_ASSERT(max_references >= 0);
+
+ // Get the constructor function for context extension and arguments array.
+ JSFunction* context_extension_function =
+ Top::context()->global_context()->context_extension_function();
+ JSObject* arguments_boilerplate =
+ Top::context()->global_context()->arguments_boilerplate();
+ JSFunction* arguments_function =
+ JSFunction::cast(arguments_boilerplate->map()->constructor());
+
+ // Get the number of referencing objects.
+ int count;
+ count = DebugReferencedBy(target, instance_filter, max_references,
+ NULL, 0,
+ context_extension_function, arguments_function);
+
+ // Allocate an array to hold the result.
+ Object* object = Heap::AllocateFixedArray(count);
+ if (object->IsFailure()) return object;
+ FixedArray* instances = FixedArray::cast(object);
+
+ // Fill the referencing objects.
+ count = DebugReferencedBy(target, instance_filter, max_references,
+ instances, count,
+ context_extension_function, arguments_function);
+
+ // Return result as JS array.
+ Object* result =
+ Heap::AllocateJSObject(
+ Top::context()->global_context()->array_function());
+ if (!result->IsFailure()) JSArray::cast(result)->SetContent(instances);
+ return result;
+}
+
+
+// Helper function used by Runtime_DebugConstructedBy below.
+static int DebugConstructedBy(JSFunction* constructor, int max_references,
+ FixedArray* instances, int instances_size) {
+ AssertNoAllocation no_alloc;
+
+ // Iterate the heap.
+ int count = 0;
+ HeapIterator iterator;
+ while (iterator.has_next() &&
+ (max_references == 0 || count < max_references)) {
+ // Only look at all JSObjects.
+ HeapObject* heap_obj = iterator.next();
+ if (heap_obj->IsJSObject()) {
+ JSObject* obj = JSObject::cast(heap_obj);
+ if (obj->map()->constructor() == constructor) {
+ // Valid reference found add to instance array if supplied an update
+ // count.
+ if (instances != NULL && count < instances_size) {
+ instances->set(count, obj);
+ }
+ count++;
+ }
+ }
+ }
+
+ // Return the number of referencing objects found.
+ return count;
+}
+
+
+// Scan the heap for objects constructed by a specific function.
+// args[0]: the constructor to find instances of
+// args[1]: the the maximum number of objects to return
+static Object* Runtime_DebugConstructedBy(Arguments args) {
+ ASSERT(args.length() == 2);
+
+ // First perform a full GC in order to avoid dead objects.
+ Heap::CollectGarbage(0, OLD_SPACE);
+
+ // Check parameters.
+ CONVERT_CHECKED(JSFunction, constructor, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
+ RUNTIME_ASSERT(max_references >= 0);
+
+ // Get the number of referencing objects.
+ int count;
+ count = DebugConstructedBy(constructor, max_references, NULL, 0);
+
+ // Allocate an array to hold the result.
+ Object* object = Heap::AllocateFixedArray(count);
+ if (object->IsFailure()) return object;
+ FixedArray* instances = FixedArray::cast(object);
+
+ // Fill the referencing objects.
+ count = DebugConstructedBy(constructor, max_references, instances, count);
+
+ // Return result as JS array.
+ Object* result =
+ Heap::AllocateJSObject(
+ Top::context()->global_context()->array_function());
+ if (!result->IsFailure()) JSArray::cast(result)->SetContent(instances);
+ return result;
+}
+
+
+static Object* Runtime_GetPrototype(Arguments args) {
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+
+ return obj->GetPrototype();
+}
+
+
+static Object* Runtime_SystemBreak(Arguments args) {
+ CPU::DebugBreak();
+ return Heap::undefined_value();
+}
+
+
+// Finds the script object from the script data. NOTE: This operation uses
+// heap traversal to find the function generated for the source position
+// for the requested break point. For lazily compiled functions several heap
+// traversals might be required rendering this operation as a rather slow
+// operation. However for setting break points which is normally done through
+// some kind of user interaction the performance is not crucial.
+static Handle<Object> Runtime_GetScriptFromScriptName(
+ Handle<String> script_name) {
+ // Scan the heap for Script objects to find the script with the requested
+ // script data.
+ Handle<Script> script;
+ HeapIterator iterator;
+ while (script.is_null() && iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ // If a script is found check if it has the script data requested.
+ if (obj->IsScript()) {
+ if (Script::cast(obj)->name()->IsString()) {
+ if (String::cast(Script::cast(obj)->name())->Equals(*script_name)) {
+ script = Handle<Script>(Script::cast(obj));
+ }
+ }
+ }
+ }
+
+ // If no script with the requested script data is found return undefined.
+ if (script.is_null()) return Factory::undefined_value();
+
+ // Return the script found.
+ return GetScriptWrapper(script);
+}
+
+
+// Get the script object from script data. NOTE: Regarding performance
+// see the NOTE for GetScriptFromScriptData.
+// args[0]: script data for the script to find the source for
+static Object* Runtime_GetScript(Arguments args) {
+ HandleScope scope;
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(String, script_name, args[0]);
+
+ // Find the requested script.
+ Handle<Object> result =
+ Runtime_GetScriptFromScriptName(Handle<String>(script_name));
+ return *result;
+}
+
+
+static Object* Runtime_FunctionGetAssemblerCode(Arguments args) {
+#ifdef DEBUG
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ // Get the function and make sure it is compiled.
+ CONVERT_ARG_CHECKED(JSFunction, func, 0);
+ if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+ func->code()->PrintLn();
+#endif // DEBUG
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_Abort(Arguments args) {
+ ASSERT(args.length() == 2);
+ OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
+ Smi::cast(args[1])->value());
+ Top::PrintStack();
+ OS::Abort();
+ UNREACHABLE();
+ return NULL;
+}
+
+
+static Object* Runtime_ListNatives(Arguments args) {
+ ASSERT(args.length() == 1);
+ HandleScope scope;
+ Handle<JSArray> result = Factory::NewJSArray(0);
+ int index = 0;
+#define ADD_ENTRY(Name, argc) \
+ { \
+ HandleScope inner; \
+ Handle<String> name = \
+ Factory::NewStringFromAscii(Vector<const char>(#Name, strlen(#Name))); \
+ Handle<JSArray> pair = Factory::NewJSArray(0); \
+ SetElement(pair, 0, name); \
+ SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \
+ SetElement(result, index++, pair); \
+ }
+ RUNTIME_FUNCTION_LIST(ADD_ENTRY)
+#undef ADD_ENTRY
+ return *result;
+}
+
+
+static Object* Runtime_IS_VAR(Arguments args) {
+ UNREACHABLE(); // implemented as macro in the parser
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Runtime
+
+#define F(name, nargs) \
+ { #name, "RuntimeStub_" #name, FUNCTION_ADDR(Runtime_##name), nargs, \
+ static_cast<int>(Runtime::k##name) },
+
+static Runtime::Function Runtime_functions[] = {
+ RUNTIME_FUNCTION_LIST(F)
+ { NULL, NULL, NULL, 0, -1 }
+};
+
+#undef F
+
+
+Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
+ ASSERT(0 <= fid && fid < kNofFunctions);
+ return &Runtime_functions[fid];
+}
+
+
+Runtime::Function* Runtime::FunctionForName(const char* name) {
+ for (Function* f = Runtime_functions; f->name != NULL; f++) {
+ if (strcmp(f->name, name) == 0) {
+ return f;
+ }
+ }
+ return NULL;
+}
+
+
+void Runtime::PerformGC(Object* result) {
+ Failure* failure = Failure::cast(result);
+ // Try to do a garbage collection; ignore it if it fails. The C
+ // entry stub will throw an out-of-memory exception in that case.
+ Heap::CollectGarbage(failure->requested(), failure->allocation_space());
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_RUNTIME_H_
+#define V8_RUNTIME_H_
+
+namespace v8 { namespace internal {
+
+// The interface to C++ runtime functions.
+
+// ----------------------------------------------------------------------------
+// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
+// either directly by id (via the code generator), or indirectly
+// via a native call by name (from within JS code).
+
+#define RUNTIME_FUNCTION_LIST(F) \
+ /* Property access */ \
+ F(AddProperty, 4) \
+ F(GetProperty, 2) \
+ F(DeleteProperty, 2) \
+ F(HasLocalProperty, 2) \
+ F(HasProperty, 2) \
+ F(HasElement, 2) \
+ F(IsPropertyEnumerable, 2) \
+ F(GetPropertyNames, 1) \
+ F(GetPropertyNamesFast, 1) \
+ F(GetArgumentsProperty, 1) \
+ \
+ F(IsInPrototypeChain, 2) \
+ \
+ F(IsConstructCall, 1) \
+ \
+ /* Utilities */ \
+ F(GetBuiltins, 1) \
+ F(GetCalledFunction, 1) \
+ F(GetFunctionDelegate, 1) \
+ F(NewArguments, 1) \
+ F(LazyCompile, 1) \
+ F(SetNewFunctionAttributes, 1) \
+ \
+ /* ConsStrings */ \
+ F(ConsStringFst, 1) \
+ F(ConsStringSnd, 1) \
+ \
+ /* Conversions */ \
+ F(ToBool, 1) \
+ F(Typeof, 1) \
+ \
+ F(StringToNumber, 1) \
+ F(StringFromCharCodeArray, 1) \
+ F(StringParseInt, 2) \
+ F(StringParseFloat, 1) \
+ F(StringToLowerCase, 1) \
+ F(StringToUpperCase, 1) \
+ F(CharFromCode, 1) \
+ F(URIEscape, 1) \
+ F(URIUnescape, 1) \
+ \
+ F(NumberToString, 1) \
+ F(NumberToInteger, 1) \
+ F(NumberToJSUint32, 1) \
+ F(NumberToJSInt32, 1) \
+ \
+ /* Arithmetic operations */ \
+ F(NumberAdd, 2) \
+ F(NumberSub, 2) \
+ F(NumberMul, 2) \
+ F(NumberDiv, 2) \
+ F(NumberMod, 2) \
+ F(NumberAlloc, 1) \
+ F(NumberUnaryMinus, 1) \
+ \
+ F(StringAdd, 2) \
+ F(StringBuilderConcat, 2) \
+ \
+ /* Bit operations */ \
+ F(NumberOr, 2) \
+ F(NumberAnd, 2) \
+ F(NumberXor, 2) \
+ F(NumberNot, 1) \
+ \
+ F(NumberShl, 2) \
+ F(NumberShr, 2) \
+ F(NumberSar, 2) \
+ \
+ F(NumberIsNaN, 1) \
+ F(NumberIsFinite, 1) \
+ \
+ /* Comparisons */ \
+ F(ObjectEquals, 2) \
+ F(NumberEquals, 2) \
+ F(StringEquals, 2) \
+ \
+ F(NumberCompare, 3) \
+ F(StringCompare, 2) \
+ \
+ /* Math */ \
+ F(Math_abs, 1) \
+ F(Math_acos, 1) \
+ F(Math_asin, 1) \
+ F(Math_atan, 1) \
+ F(Math_atan2, 2) \
+ F(Math_ceil, 1) \
+ F(Math_cos, 1) \
+ F(Math_exp, 1) \
+ F(Math_floor, 1) \
+ F(Math_log, 1) \
+ F(Math_pow, 2) \
+ F(Math_random, 1) \
+ F(Math_round, 1) \
+ F(Math_sin, 1) \
+ F(Math_sqrt, 1) \
+ F(Math_tan, 1) \
+ \
+ /* Regular expressions */ \
+ F(RegExpCompile, 3) \
+ F(RegExpExec, 3) \
+ F(RegExpExecGlobal, 2) \
+ \
+ /* Strings */ \
+ F(StringCharCodeAt, 2) \
+ F(StringIndexOf, 3) \
+ F(StringLastIndexOf, 3) \
+ F(StringLocaleCompare, 2) \
+ F(StringSlice, 3) \
+ \
+ /* Numbers */ \
+ F(NumberToRadixString, 2) \
+ F(NumberToFixed, 2) \
+ F(NumberToExponential, 2) \
+ F(NumberToPrecision, 2) \
+ \
+ /* Reflection */ \
+ F(FunctionSetInstanceClassName, 2) \
+ F(FunctionSetLength, 2) \
+ F(FunctionSetPrototype, 2) \
+ F(FunctionGetName, 1) \
+ F(FunctionGetSourceCode, 1) \
+ F(FunctionGetScript, 1) \
+ F(FunctionGetScriptSourcePosition, 1) \
+ F(GetScript, 1) \
+ \
+ F(ClassOf, 1) \
+ F(SetCode, 2) \
+ \
+ F(CreateApiFunction, 1) \
+ F(IsTemplate, 1) \
+ F(GetTemplateField, 2) \
+ \
+ /* Dates */ \
+ F(DateCurrentTime, 1) \
+ F(DateParseString, 1) \
+ F(DateLocalTimezone, 1) \
+ F(DateLocalTimeOffset, 1) \
+ F(DateDaylightSavingsOffset, 1) \
+ \
+ /* Numbers */ \
+ F(NumberMaxValue, 1) \
+ F(NumberMinValue, 1) \
+ F(NumberNaN, 1) \
+ F(NumberNegativeInfinity, 1) \
+ F(NumberPositiveInfinity, 1) \
+ \
+ /* Globals */ \
+ F(CompileString, 2) \
+ F(CompileScript, 4) \
+ F(GlobalPrint, 1) \
+ \
+ /* Eval */ \
+ F(EvalReceiver, 1) \
+ \
+ F(SetProperty, -1 /* 3 or 4 */) \
+ F(IgnoreAttributesAndSetProperty, 3) \
+ \
+ /* Arrays */ \
+ F(RemoveArrayHoles, 1) \
+ F(GetArrayKeys, 2) \
+ F(MoveArrayContents, 2) \
+ F(EstimateNumberOfElements, 1) \
+ \
+ /* Getters and Setters */ \
+ F(DefineAccessor, -1 /* 4 or 5 */) \
+ F(LookupAccessor, 3) \
+ \
+ /* Debugging */ \
+ F(AddDebugEventListener, 2) \
+ F(RemoveDebugEventListener, 1) \
+ F(Break, 1) \
+ F(DebugGetLocalPropertyDetails, 2) \
+ F(DebugGetProperty, 2) \
+ F(DebugLocalPropertyNames, 1) \
+ F(DebugLocalElementNames, 1) \
+ F(DebugPropertyTypeFromDetails, 1) \
+ F(DebugPropertyAttributesFromDetails, 1) \
+ F(DebugPropertyIndexFromDetails, 1) \
+ F(DebugInterceptorInfo, 1) \
+ F(DebugNamedInterceptorPropertyNames, 1) \
+ F(DebugIndexedInterceptorElementNames, 1) \
+ F(DebugNamedInterceptorPropertyValue, 2) \
+ F(DebugIndexedInterceptorElementValue, 2) \
+ F(CheckExecutionState, 1) \
+ F(GetFrameCount, 1) \
+ F(GetFrameDetails, 2) \
+ F(GetCFrames, 1) \
+ F(GetBreakLocations, 1) \
+ F(SetFunctionBreakPoint, 3) \
+ F(SetScriptBreakPoint, 3) \
+ F(ClearBreakPoint, 1) \
+ F(ChangeBreakOnException, 2) \
+ F(PrepareStep, 3) \
+ F(ClearStepping, 1) \
+ F(DebugEvaluate, 3) \
+ F(DebugEvaluateGlobal, 2) \
+ F(DebugGetLoadedScripts, 1) \
+ F(DebugReferencedBy, 3) \
+ F(DebugConstructedBy, 2) \
+ F(GetPrototype, 1) \
+ F(SystemBreak, 1) \
+ \
+ /* Literals */ \
+ F(MaterializeRegExpLiteral, 4)\
+ F(CreateArrayLiteral, 1) \
+ F(CreateObjectLiteralBoilerplate, 3) \
+ F(CloneObjectLiteralBoilerplate, 1) \
+ \
+ /* Statements */ \
+ F(NewClosure, 2) \
+ F(NewObject, 1) \
+ F(Throw, 1) \
+ F(ReThrow, 1) \
+ F(ThrowReferenceError, 1) \
+ F(StackGuard, 1) \
+ \
+ /* Contexts */ \
+ F(NewContext, 2) \
+ F(PushContext, 2) \
+ F(LookupContext, 2) \
+ F(LoadContextSlot, 2) \
+ F(LoadContextSlotNoReferenceError, 2) \
+ F(StoreContextSlot, 3) \
+ \
+ /* Declarations and initialization */ \
+ F(DeclareGlobals, 3) \
+ F(DeclareContextSlot, 5) \
+ F(InitializeVarGlobal, -1 /* 1 or 2 */) \
+ F(InitializeConstGlobal, -1 /* 1 or 2 */) \
+ F(InitializeConstContextSlot, 3) \
+ \
+ /* Debugging */ \
+ F(DebugPrint, 1) \
+ F(DebugTrace, 1) \
+ F(TraceEnter, 1) \
+ F(TraceExit, 1) \
+ F(DebugBreak, 1) \
+ F(FunctionGetAssemblerCode, 1) \
+ F(Abort, 2) \
+ \
+ /* Testing */ \
+ F(ListNatives, 1) \
+ \
+ /* Pseudo functions - handled as macros by parser */ \
+ F(IS_VAR, 1)
+
+
+// ----------------------------------------------------------------------------
+// Runtime provides access to all C++ runtime functions.
+
+class Runtime : public AllStatic {
+ public:
+ enum FunctionId {
+#define F(name, nargs) k##name,
+ RUNTIME_FUNCTION_LIST(F)
+ kNofFunctions
+#undef F
+ };
+ static Object* CreateArrayLiteral(Arguments args);
+
+ // Runtime function descriptor.
+ struct Function {
+ // The JS name of the function.
+ const char* name;
+
+ // The name of the stub that calls the runtime function.
+ const char* stub_name;
+
+ // The C++ (native) entry point.
+ byte* entry;
+
+ // The number of arguments expected; nargs < 0 if variable no. of
+ // arguments.
+ int nargs;
+ int stub_id;
+ };
+
+ // Get the runtime function with the given function id.
+ static Function* FunctionForId(FunctionId fid);
+
+ // Get the runtime function with the given name.
+ static Function* FunctionForName(const char* name);
+
+ // TODO(1240886): The following three methods are *not* handle safe,
+ // but accept handle arguments. This seems fragile.
+
+ // Support getting the characters in a string using [] notation as
+ // in Firefox/SpiderMonkey, Safari and Opera.
+ static Object* GetElementOrCharAt(Handle<Object> object, uint32_t index);
+
+ static Object* SetObjectProperty(Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr);
+
+ static Object* GetObjectProperty(Handle<Object> object, Object* key);
+
+ // Helper functions used stubs.
+ static void PerformGC(Object* result);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_RUNTIME_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This files contains runtime support implemented in JavaScript.
+
+// CAUTION: Some of the functions specified in this file are called
+// directly from compiled code. These are the functions with names in
+// ALL CAPS. The compiled code passes the first argument in 'this' and
+// it does not push the function onto the stack. This means that you
+// cannot use contexts in all these functions.
+
+
+/* -----------------------------------
+ - - - C o m p a r i s o n - - -
+ -----------------------------------
+*/
+
+// The following const declarations are shared with other native JS files.
+// They are all declared at this one spot to avoid const redeclaration errors.
+const $Object = global.Object;
+const $Array = global.Array;
+const $String = global.String;
+const $Number = global.Number;
+const $Function = global.Function;
+const $Boolean = global.Boolean;
+const $NaN = %NumberNaN(1);
+
+
+// ECMA-262, section 11.9.1, page 55.
+function EQUALS(y) {
+ var x = this;
+
+ // NOTE: We use iteration instead of recursion, because it is
+ // difficult to call EQUALS with the correct setting of 'this' in
+ // an efficient way.
+
+ while (true) {
+
+ if (IS_NUMBER(x)) {
+ if (y == null) return 1; // not equal
+ return %NumberEquals(x, %ToNumber(y));
+
+ } else if (IS_STRING(x)) {
+ if (IS_STRING(y)) return %StringEquals(x, y);
+ if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+ if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+ if (y == null) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+
+ } else if (IS_BOOLEAN(x)) {
+ if (IS_BOOLEAN(y)) return %ObjectEquals(x, y);
+ if (y == null) return 1; // not equal
+ return %NumberEquals(%ToNumber(x), %ToNumber(y));
+
+ } else if (x == null) {
+ // NOTE: This checks for both null and undefined.
+ return (y == null) ? 0 : 1;
+
+ } else {
+ if (IS_OBJECT(y)) return %ObjectEquals(x, y);
+ if (IS_FUNCTION(y)) return %ObjectEquals(x, y);
+ x = %ToPrimitive(x, NO_HINT);
+
+ }
+ }
+};
+
+
+// ECMA-262, section 11.9.4, page 56.
+function STRICT_EQUALS(x) {
+ if (IS_NUMBER(this)) {
+ if (!IS_NUMBER(x)) return 1; // not equal
+ return %NumberEquals(this, x);
+ }
+
+ if (IS_STRING(this)) {
+ if (!IS_STRING(x)) return 1; // not equal
+ return %StringEquals(this, x);
+ }
+
+ if (IS_BOOLEAN(this)) {
+ if (!IS_BOOLEAN(x)) return 1; // not equal
+ if (this) return x ? 0 : 1;
+ else return x ? 1 : 0;
+ }
+
+ if (IS_UNDEFINED(this)) { // both undefined and undetectable
+ return IS_UNDEFINED(x) ? 0 : 1;
+ }
+
+ return %ObjectEquals(this, x);
+};
+
+
+// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
+// the result when either (or both) the operands are NaN.
+function COMPARE(x, ncr) {
+ // Improve performance for floating point compares
+ if (IS_NUMBER(this) && IS_NUMBER(x)) {
+ return %NumberCompare(this, x, ncr);
+ }
+
+ var a = %ToPrimitive(this, NUMBER_HINT);
+ var b = %ToPrimitive(x, NUMBER_HINT);
+ if (IS_STRING(a) && IS_STRING(b)) {
+ return %StringCompare(a, b);
+ } else {
+ return %NumberCompare(%ToNumber(a), %ToNumber(b), ncr);
+ }
+};
+
+
+
+/* -----------------------------------
+ - - - A r i t h m e t i c - - -
+ -----------------------------------
+*/
+
+// ECMA-262, section 11.6.1, page 50.
+function ADD(x) {
+ // Fast case: Check for number operands and do the addition.
+ if (IS_NUMBER(this) && IS_NUMBER(x)) {
+ return %NumberAdd(this, x);
+ }
+
+ var a = %ToPrimitive(this, NO_HINT);
+ var b = %ToPrimitive(x, NO_HINT);
+
+ if (IS_STRING(a)) {
+ return %StringAdd(a, %ToString(b));
+ } else if (IS_STRING(b)) {
+ return %StringAdd(%ToString(a), b);
+ } else {
+ return %NumberAdd(%ToNumber(a), %ToNumber(b));
+ }
+};
+
+
+// ECMA-262, section 11.6.2, page 50.
+function SUB(x) {
+ return %NumberSub(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.5.1, page 48.
+function MUL(x) {
+ return %NumberMul(%ToNumber(this), %ToNumber(x));
+};
+
+
+function MULNEG(x) {
+ return %NumberUnaryMinus(%NumberMul(%ToNumber(this), %ToNumber(x)));
+};
+
+
+// ECMA-262, section 11.5.2, page 49.
+function DIV(x) {
+ return %NumberDiv(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.5.3, page 49.
+function MOD(x) {
+ return %NumberMod(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.4.4, page 47.
+function INC() {
+ return %NumberAdd(%ToNumber(this), 1);
+};
+
+
+// ECMA-262, section 11.4.5, page 48.
+function DEC() {
+ return %NumberSub(%ToNumber(this), 1);
+};
+
+
+
+/* -------------------------------------------
+ - - - B i t o p e r a t i o n s - - -
+ -------------------------------------------
+*/
+
+// ECMA-262, section 11.10, page 57.
+function BIT_OR(x) {
+ return %NumberOr(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.10, page 57.
+function BIT_AND(x) {
+ return %NumberAnd(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.10, page 57.
+function BIT_XOR(x) {
+ return %NumberXor(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.4.7, page 47.
+function UNARY_MINUS() {
+ return %NumberUnaryMinus(%ToNumber(this));
+};
+
+
+// ECMA-262, section 11.4.8, page 48.
+function BIT_NOT() {
+ return %NumberNot(%ToNumber(this));
+};
+
+
+// ECMA-262, section 11.7.1, page 51.
+function SHL(x) {
+ return %NumberShl(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.7.2, page 51.
+function SAR(x) {
+ return %NumberSar(%ToNumber(this), %ToNumber(x));
+};
+
+
+// ECMA-262, section 11.7.3, page 52.
+function SHR(x) {
+ return %NumberShr(%ToNumber(this), %ToNumber(x));
+};
+
+
+
+/* -----------------------------
+ - - - H e l p e r s - - -
+ -----------------------------
+*/
+
+// ECMA-262, section 11.4.1, page 46.
+function DELETE(key) {
+ return %DeleteProperty(%ToObject(this), %ToString(key));
+};
+
+
+// ECMA-262, section 11.8.7, page 54.
+function IN(x) {
+ if (x == null || (!IS_OBJECT(x) && !IS_FUNCTION(x))) {
+ throw %MakeTypeError('invalid_in_operator_use', [this, x]);
+ }
+ return %_IsSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
+};
+
+
+// ECMA-262, section 11.8.6, page 54.
+function INSTANCE_OF(F) {
+ var V = this;
+ if (!IS_FUNCTION(F)) {
+ throw %MakeTypeError('instanceof_function_expected', [V]);
+ }
+
+ // If V is not an object, return false.
+ if (IS_NULL(V) || (!IS_OBJECT(V) && !IS_FUNCTION(V))) {
+ return false;
+ }
+
+ // Get the prototype of F; if it is not an object, throw an error.
+ var O = F.prototype;
+ if (IS_NULL(O) || (!IS_OBJECT(O) && !IS_FUNCTION(O))) {
+ throw %MakeTypeError('instanceof_nonobject_proto', [O]);
+ }
+
+ // Return whether or not O is in the prototype chain of V.
+ return %IsInPrototypeChain(O, V);
+};
+
+
+// Get an array of property keys for the given object. Used in
+// for-in statements.
+function GET_KEYS() {
+ return %GetPropertyNames(this);
+};
+
+
+// Filter a given key against an object by checking if the object
+// has a property with the given key; return the key as a string if
+// it has. Otherwise returns null. Used in for-in statements.
+function FILTER_KEY(key) {
+ var string = %ToString(key);
+ if (%HasProperty(this, string)) return string;
+ return null;
+};
+
+
+function CALL_NON_FUNCTION() {
+ var callee = %GetCalledFunction(0);
+ var delegate = %GetFunctionDelegate(callee);
+ if (!IS_FUNCTION(delegate)) {
+ throw %MakeTypeError('called_non_callable', [typeof callee]);
+ }
+
+ var parameters = %NewArguments(delegate);
+ return delegate.apply(callee, parameters);
+};
+
+
+function APPLY_PREPARE(args) {
+ var length;
+ // First check whether length is a positive Smi and args is an array. This is the
+ // fast case. If this fails, we do the slow case that takes care of more eventualities
+ if (%_IsArray(args)) {
+ length = args.length;
+ if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
+ return length;
+ }
+ }
+
+ length = (args == null) ? 0 : %ToUint32(args.length);
+
+ // We can handle any number of apply arguments if the stack is
+ // big enough, but sanity check the value to avoid overflow when
+ // multiplying with pointer size.
+ if (length > 0x800000) {
+ throw %MakeRangeError('apply_overflow', [length]);
+ }
+
+ if (!IS_FUNCTION(this)) {
+ throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
+ }
+
+ // Make sure the arguments list has the right type.
+ if (args != null &&
+ %ClassOf(args) != 'Array' &&
+ %ClassOf(args) != 'Arguments') {
+ throw %MakeTypeError('apply_wrong_args', []);
+ }
+
+ // Return the length which is the number of arguments to copy to the
+ // stack. It is guaranteed to be a small integer at this point.
+ return length;
+};
+
+
+function APPLY_OVERFLOW(length) {
+ throw %MakeRangeError('apply_overflow', [length]);
+};
+
+
+// Convert the receiver to an object - forward to ToObject.
+function TO_OBJECT() {
+ return %ToObject(this);
+};
+
+
+// Convert the receiver to a number - forward to ToNumber.
+function TO_NUMBER() {
+ return %ToNumber(this);
+};
+
+
+// Convert the receiver to a string - forward to ToString.
+function TO_STRING() {
+ return %ToString(this);
+};
+
+
+/* -------------------------------------
+ - - - C o n v e r s i o n s - - -
+ -------------------------------------
+*/
+
+// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
+// (1) for number hint, and (2) for string hint.
+function ToPrimitive(x, hint) {
+ if (!IS_OBJECT(x) && !IS_FUNCTION(x)) return x;
+ if (x == null) return x; // check for null, undefined
+ if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
+ return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
+};
+
+
+// ECMA-262, section 9.3, page 31.
+function ToNumber(x) {
+ if (IS_NUMBER(x)) return x;
+ if (IS_STRING(x)) return %StringToNumber(x);
+ if (IS_BOOLEAN(x)) return x ? 1 : 0;
+ if (IS_UNDEFINED(x)) return %NumberNaN(1);
+ return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
+};
+
+
+// ECMA-262, section 9.8, page 35.
+function ToString(x) {
+ if (IS_STRING(x)) return x;
+ if (IS_NUMBER(x)) return %NumberToString(x);
+ if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
+ if (IS_UNDEFINED(x)) return 'undefined';
+ return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
+};
+
+
+// ... where did this come from?
+function ToBoolean(x) {
+ if (IS_BOOLEAN(x)) return x;
+ if (IS_STRING(x)) return x.length != 0;
+ if (x == null) return false;
+ if (IS_NUMBER(x)) return !((x == 0) || %NumberIsNaN(x));
+ return true;
+};
+
+
+// ECMA-262, section 9.9, page 36.
+function ToObject(x) {
+ if (IS_STRING(x)) return new $String(x);
+ if (IS_NUMBER(x)) return new $Number(x);
+ if (IS_BOOLEAN(x)) return new $Boolean(x);
+ if (x == null) throw %MakeTypeError('null_to_object');
+ return x;
+};
+
+
+// ECMA-262, section 9.4, page 34.
+function ToInteger(x) {
+ if (%_IsSmi(x)) return x;
+ return %NumberToInteger(ToNumber(x));
+};
+
+
+// ECMA-262, section 9.6, page 34.
+function ToUint32(x) {
+ return %NumberToJSUint32(ToNumber(x));
+};
+
+
+// ECMA-262, section 9.5, page 34
+function ToInt32(x) {
+ if (%_IsSmi(x)) return x;
+ return %NumberToJSInt32(ToNumber(x));
+};
+
+
+
+/* ---------------------------------
+ - - - U t i l i t i e s - - -
+ ---------------------------------
+*/
+
+// Returns if the given x is a primitive value - not an object or a
+// function.
+function IsPrimitive(x) {
+ if (!IS_OBJECT(x) && !IS_FUNCTION(x)) {
+ return true;
+ } else {
+ // Even though the type of null is "object", null is still
+ // considered a primitive value.
+ return IS_NULL(x);
+ }
+};
+
+
+// ECMA-262, section 8.6.2.6, page 28.
+function DefaultNumber(x) {
+ if (IS_FUNCTION(x.valueOf)) {
+ var v = x.valueOf();
+ if (%IsPrimitive(v)) return v;
+ }
+
+ if (IS_FUNCTION(x.toString)) {
+ var s = x.toString();
+ if (%IsPrimitive(s)) return s;
+ }
+
+ throw %MakeTypeError('cannot_convert_to_primitive', []);
+};
+
+
+// ECMA-262, section 8.6.2.6, page 28.
+function DefaultString(x) {
+ if (IS_FUNCTION(x.toString)) {
+ var s = x.toString();
+ if (%IsPrimitive(s)) return s;
+ }
+
+ if (IS_FUNCTION(x.valueOf)) {
+ var v = x.valueOf();
+ if (%IsPrimitive(v)) return v;
+ }
+
+ throw %MakeTypeError('cannot_convert_to_primitive', []);
+};
+
+
+// NOTE: Setting the prototype for Array must take place as early as
+// possible due to code generation for array literals. When
+// generating code for a array literal a boilerplate array is created
+// that is cloned when running the code. It is essiential that the
+// boilerplate gets the right prototype.
+%FunctionSetPrototype($Array, new $Array(0));
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scanner.h"
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// Character predicates
+
+
+unibrow::Predicate<IdentifierStart, 128> Scanner::kIsIdentifierStart;
+unibrow::Predicate<IdentifierPart, 128> Scanner::kIsIdentifierPart;
+unibrow::Predicate<unibrow::LineTerminator, 128> Scanner::kIsLineTerminator;
+unibrow::Predicate<unibrow::WhiteSpace, 128> Scanner::kIsWhiteSpace;
+
+
+StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_;
+
+
+// ----------------------------------------------------------------------------
+// UTF8Buffer
+
+UTF8Buffer::UTF8Buffer() : data_(NULL) {
+ Initialize(NULL, 0);
+}
+
+
+UTF8Buffer::~UTF8Buffer() {
+ DeleteArray(data_);
+}
+
+
+void UTF8Buffer::Initialize(char* src, int length) {
+ DeleteArray(data_);
+ data_ = src;
+ size_ = length;
+ Reset();
+}
+
+
+void UTF8Buffer::AddChar(uc32 c) {
+ const int min_size = 1024;
+ if (pos_ + static_cast<int>(unibrow::Utf8::kMaxEncodedSize) > size_) {
+ int new_size = size_ * 2;
+ if (new_size < min_size) {
+ new_size = min_size;
+ }
+ char* new_data = NewArray<char>(new_size);
+ memcpy(new_data, data_, pos_);
+ DeleteArray(data_);
+ data_ = new_data;
+ size_ = new_size;
+ }
+ if (static_cast<unsigned>(c) < unibrow::Utf8::kMaxOneByteChar) {
+ data_[pos_++] = c; // common case: 7bit ASCII
+ } else {
+ pos_ += unibrow::Utf8::Encode(&data_[pos_], c);
+ }
+ ASSERT(pos_ <= size_);
+}
+
+
+// ----------------------------------------------------------------------------
+// UTF16Buffer
+
+
+UTF16Buffer::UTF16Buffer()
+ : pos_(0),
+ pushback_buffer_(0),
+ last_(0),
+ stream_(NULL) { }
+
+
+void UTF16Buffer::Initialize(Handle<String> data,
+ unibrow::CharacterStream* input) {
+ data_ = data;
+ pos_ = 0;
+ stream_ = input;
+}
+
+
+Handle<String> UTF16Buffer::SubString(int start, int end) {
+ return internal::SubString(data_, start, end);
+}
+
+
+void UTF16Buffer::PushBack(uc32 ch) {
+ pushback_buffer()->Add(last_);
+ last_ = ch;
+ pos_--;
+}
+
+
+uc32 UTF16Buffer::Advance() {
+ // NOTE: It is of importance to Persian / Farsi resources that we do
+ // *not* strip format control characters in the scanner; see
+ //
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=274152
+ //
+ // So, even though ECMA-262, section 7.1, page 11, dictates that we
+ // must remove Unicode format-control characters, we do not. This is
+ // in line with how IE and SpiderMonkey handles it.
+ if (!pushback_buffer()->is_empty()) {
+ pos_++;
+ return last_ = pushback_buffer()->RemoveLast();
+ } else if (stream_->has_more()) {
+ pos_++;
+ uc32 next = stream_->GetNext();
+ return last_ = next;
+ } else {
+ // note: currently the following increment is necessary to avoid a
+ // test-parser problem!
+ pos_++;
+ return last_ = static_cast<uc32>(-1);
+ }
+}
+
+
+void UTF16Buffer::SeekForward(int pos) {
+ pos_ = pos;
+ ASSERT(pushback_buffer()->is_empty());
+ stream_->Seek(pos);
+}
+
+
+// ----------------------------------------------------------------------------
+// Scanner
+
+Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) {
+ Token::Initialize();
+}
+
+
+void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
+ int position) {
+ // Initialize the source buffer.
+ source_.Initialize(source, stream);
+ position_ = position;
+
+ // Reset literals buffer
+ literals_.Reset();
+
+ // Set c0_ (one character ahead)
+ ASSERT(kCharacterLookaheadBufferSize == 1);
+ Advance();
+
+ // Skip initial whitespace (allowing HTML comment ends) and scan
+ // first token.
+ SkipWhiteSpace(true);
+ Scan();
+}
+
+
+Handle<String> Scanner::SubString(int start, int end) {
+ return source_.SubString(start - position_, end - position_);
+}
+
+
+Token::Value Scanner::Next() {
+ // BUG 1215673: Find a thread safe way to set a stack limit in
+ // pre-parse mode. Otherwise, we cannot safely pre-parse from other
+ // threads.
+ current_ = next_;
+ // Check for stack-overflow before returning any tokens.
+ StackLimitCheck check;
+ if (check.HasOverflowed()) {
+ stack_overflow_ = true;
+ next_.token = Token::ILLEGAL;
+ } else {
+ Scan();
+ }
+ return current_.token;
+}
+
+
+void Scanner::StartLiteral() {
+ next_.literal_pos = literals_.pos();
+}
+
+
+void Scanner::AddChar(uc32 c) {
+ literals_.AddChar(c);
+}
+
+
+void Scanner::TerminateLiteral() {
+ next_.literal_end = literals_.pos();
+ AddChar(0);
+}
+
+
+void Scanner::AddCharAdvance() {
+ AddChar(c0_);
+ Advance();
+}
+
+
+void Scanner::Advance() {
+ c0_ = source_.Advance();
+}
+
+
+void Scanner::PushBack(uc32 ch) {
+ source_.PushBack(ch);
+ c0_ = ch;
+}
+
+
+void Scanner::SkipWhiteSpace(bool initial) {
+ has_line_terminator_before_next_ = initial;
+
+ while (true) {
+ while (kIsWhiteSpace.get(c0_)) {
+ // IsWhiteSpace() includes line terminators!
+ if (kIsLineTerminator.get(c0_))
+ // Ignore line terminators, but remember them. This is necessary
+ // for automatic semicolon insertion.
+ has_line_terminator_before_next_ = true;
+ Advance();
+ }
+
+ // If there is an HTML comment end '-->' at the beginning of a
+ // line (with only whitespace in front of it), we treat the rest
+ // of the line as a comment. This is in line with the way
+ // SpiderMonkey handles it.
+ if (c0_ == '-' && has_line_terminator_before_next_) {
+ Advance();
+ if (c0_ == '-') {
+ Advance();
+ if (c0_ == '>') {
+ // Treat the rest of the line as a comment.
+ SkipSingleLineComment();
+ // Continue skipping white space after the comment.
+ continue;
+ }
+ PushBack('-'); // undo Advance()
+ }
+ PushBack('-'); // undo Advance()
+ }
+ return;
+ }
+}
+
+
+Token::Value Scanner::SkipSingleLineComment() {
+ Advance();
+
+ // The line terminator at the end of the line is not considered
+ // to be part of the single-line comment; it is recognized
+ // separately by the lexical grammar and becomes part of the
+ // stream of input elements for the syntactic grammar (see
+ // ECMA-262, section 7.4, page 12).
+ while (c0_ >= 0 && !kIsLineTerminator.get(c0_)) {
+ Advance();
+ }
+
+ return Token::COMMENT;
+}
+
+
+Token::Value Scanner::SkipMultiLineComment() {
+ ASSERT(c0_ == '*');
+ Advance();
+
+ while (c0_ >= 0) {
+ char ch = c0_;
+ Advance();
+ // If we have reached the end of the multi-line comment, we
+ // consume the '/' and insert a whitespace. This way all
+ // multi-line comments are treated as whitespace - even the ones
+ // containing line terminators. This contradicts ECMA-262, section
+ // 7.4, page 12, that says that multi-line comments containing
+ // line terminators should be treated as a line terminator, but it
+ // matches the behaviour of SpiderMonkey and KJS.
+ if (ch == '*' && c0_ == '/') {
+ c0_ = ' ';
+ return Token::COMMENT;
+ }
+ }
+
+ // Unterminated multi-line comment.
+ return Token::ILLEGAL;
+}
+
+
+Token::Value Scanner::ScanHtmlComment() {
+ // Check for <!-- comments.
+ ASSERT(c0_ == '!');
+ Advance();
+ if (c0_ == '-') {
+ Advance();
+ if (c0_ == '-') return SkipSingleLineComment();
+ PushBack('-'); // undo Advance()
+ }
+ PushBack('!'); // undo Advance()
+ ASSERT(c0_ == '!');
+ return Token::LT;
+}
+
+
+void Scanner::Scan() {
+ Token::Value token;
+ bool has_line_terminator = false;
+ do {
+ SkipWhiteSpace(has_line_terminator);
+
+ // Remember the line terminator in previous loop
+ has_line_terminator = has_line_terminator_before_next();
+
+ // Remember the position of the next token
+ next_.location.beg_pos = source_pos();
+
+ token = ScanToken();
+ } while (token == Token::COMMENT);
+
+ next_.location.end_pos = source_pos();
+ next_.token = token;
+}
+
+
+void Scanner::SeekForward(int pos) {
+ source_.SeekForward(pos - 1);
+ Advance();
+ Scan();
+}
+
+
+uc32 Scanner::ScanHexEscape(uc32 c, int length) {
+ ASSERT(length <= 4); // prevent overflow
+
+ uc32 digits[4];
+ uc32 x = 0;
+ for (int i = 0; i < length; i++) {
+ digits[i] = c0_;
+ int d = HexValue(c0_);
+ if (d < 0) {
+ // According to ECMA-262, 3rd, 7.8.4, page 18, these hex escapes
+ // should be illegal, but other JS VMs just return the
+ // non-escaped version of the original character.
+
+ // Push back digits read, except the last one (in c0_).
+ for (int j = i-1; j >= 0; j--) {
+ PushBack(digits[j]);
+ }
+
+ return c;
+ }
+ x = x * 16 + d;
+ Advance();
+ }
+
+ return x;
+}
+
+
+// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
+// ECMA-262. Other JS VMs support them.
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
+ uc32 x = c - '0';
+ for (int i = 0; i < length; i++) {
+ int d = c0_ - '0';
+ if (d < 0 || d > 7) break;
+ int nx = x * 8 + d;
+ if (nx >= 256) break;
+ x = nx;
+ Advance();
+ }
+ return x;
+}
+
+
+void Scanner::ScanEscape() {
+ uc32 c = c0_;
+ Advance();
+
+ // Skip escaped newlines.
+ if (kIsLineTerminator.get(c)) {
+ // Allow CR+LF newlines in multiline string literals.
+ if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
+ // Allow LF+CR newlines in multiline string literals.
+ if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
+ return;
+ }
+
+ switch (c) {
+ case '\'': // fall through
+ case '"' : // fall through
+ case '\\': break;
+ case 'b' : c = '\b'; break;
+ case 'f' : c = '\f'; break;
+ case 'n' : c = '\n'; break;
+ case 'r' : c = '\r'; break;
+ case 't' : c = '\t'; break;
+ case 'u' : c = ScanHexEscape(c, 4); break;
+ case 'v' : c = '\v'; break;
+ case 'x' : c = ScanHexEscape(c, 2); break;
+ case '0' : // fall through
+ case '1' : // fall through
+ case '2' : // fall through
+ case '3' : // fall through
+ case '4' : // fall through
+ case '5' : // fall through
+ case '6' : // fall through
+ case '7' : c = ScanOctalEscape(c, 2); break;
+ }
+
+ // According to ECMA-262, 3rd, 7.8.4 (p 18ff) these
+ // should be illegal, but they are commonly handled
+ // as non-escaped characters by JS VMs.
+ AddChar(c);
+}
+
+
+Token::Value Scanner::ScanString() {
+ uc32 quote = c0_;
+ Advance(); // consume quote
+
+ StartLiteral();
+ while (c0_ != quote && c0_ >= 0 && !kIsLineTerminator.get(c0_)) {
+ uc32 c = c0_;
+ Advance();
+ if (c == '\\') {
+ if (c0_ < 0) return Token::ILLEGAL;
+ ScanEscape();
+ } else {
+ AddChar(c);
+ }
+ }
+ if (c0_ != quote) {
+ return Token::ILLEGAL;
+ }
+ TerminateLiteral();
+
+ Advance(); // consume quote
+ return Token::STRING;
+}
+
+
+Token::Value Scanner::Select(Token::Value tok) {
+ Advance();
+ return tok;
+}
+
+
+Token::Value Scanner::Select(uc32 next, Token::Value then, Token::Value else_) {
+ Advance();
+ if (c0_ == next) {
+ Advance();
+ return then;
+ } else {
+ return else_;
+ }
+}
+
+
+Token::Value Scanner::ScanToken() {
+ switch (c0_) {
+ // strings
+ case '"': case '\'':
+ return ScanString();
+
+ case '<':
+ // < <= << <<= <!--
+ Advance();
+ if (c0_ == '=') return Select(Token::LTE);
+ if (c0_ == '<') return Select('=', Token::ASSIGN_SHL, Token::SHL);
+ if (c0_ == '!') return ScanHtmlComment();
+ return Token::LT;
+
+ case '>':
+ // > >= >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') return Select(Token::GTE);
+ if (c0_ == '>') {
+ // >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') return Select(Token::ASSIGN_SAR);
+ if (c0_ == '>') return Select('=', Token::ASSIGN_SHR, Token::SHR);
+ return Token::SAR;
+ }
+ return Token::GT;
+
+ case '=':
+ // = == ===
+ Advance();
+ if (c0_ == '=') return Select('=', Token::EQ_STRICT, Token::EQ);
+ return Token::ASSIGN;
+
+ case '!':
+ // ! != !==
+ Advance();
+ if (c0_ == '=') return Select('=', Token::NE_STRICT, Token::NE);
+ return Token::NOT;
+
+ case '+':
+ // + ++ +=
+ Advance();
+ if (c0_ == '+') return Select(Token::INC);
+ if (c0_ == '=') return Select(Token::ASSIGN_ADD);
+ return Token::ADD;
+
+ case '-':
+ // - -- -=
+ Advance();
+ if (c0_ == '-') return Select(Token::DEC);
+ if (c0_ == '=') return Select(Token::ASSIGN_SUB);
+ return Token::SUB;
+
+ case '*':
+ // * *=
+ return Select('=', Token::ASSIGN_MUL, Token::MUL);
+
+ case '%':
+ // % %=
+ return Select('=', Token::ASSIGN_MOD, Token::MOD);
+
+ case '/':
+ // / // /* /=
+ Advance();
+ if (c0_ == '/') return SkipSingleLineComment();
+ if (c0_ == '*') return SkipMultiLineComment();
+ if (c0_ == '=') return Select(Token::ASSIGN_DIV);
+ return Token::DIV;
+
+ case '&':
+ // & && &=
+ Advance();
+ if (c0_ == '&') return Select(Token::AND);
+ if (c0_ == '=') return Select(Token::ASSIGN_BIT_AND);
+ return Token::BIT_AND;
+
+ case '|':
+ // | || |=
+ Advance();
+ if (c0_ == '|') return Select(Token::OR);
+ if (c0_ == '=') return Select(Token::ASSIGN_BIT_OR);
+ return Token::BIT_OR;
+
+ case '^':
+ // ^ ^=
+ return Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
+
+ case '.':
+ // . Number
+ Advance();
+ if (IsDecimalDigit(c0_)) return ScanNumber(true);
+ return Token::PERIOD;
+
+ case ':':
+ return Select(Token::COLON);
+
+ case ';':
+ return Select(Token::SEMICOLON);
+
+ case ',':
+ return Select(Token::COMMA);
+
+ case '(':
+ return Select(Token::LPAREN);
+
+ case ')':
+ return Select(Token::RPAREN);
+
+ case '[':
+ return Select(Token::LBRACK);
+
+ case ']':
+ return Select(Token::RBRACK);
+
+ case '{':
+ return Select(Token::LBRACE);
+
+ case '}':
+ return Select(Token::RBRACE);
+
+ case '?':
+ return Select(Token::CONDITIONAL);
+
+ case '~':
+ return Select(Token::BIT_NOT);
+
+ default:
+ if (kIsIdentifierStart.get(c0_))
+ return ScanIdentifier();
+ if (IsDecimalDigit(c0_))
+ return ScanNumber(false);
+ if (c0_ < 0)
+ return Token::EOS;
+ return Select(Token::ILLEGAL);
+ }
+
+ UNREACHABLE();
+ return Token::ILLEGAL;
+}
+
+
+// Returns true if any decimal digits were scanned, returns false otherwise.
+void Scanner::ScanDecimalDigits() {
+ while (IsDecimalDigit(c0_))
+ AddCharAdvance();
+}
+
+
+Token::Value Scanner::ScanNumber(bool seen_period) {
+ ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
+
+ enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
+
+ StartLiteral();
+ if (seen_period) {
+ // we have already seen a decimal point of the float
+ AddChar('.');
+ ScanDecimalDigits(); // we know we have at least one digit
+
+ } else {
+ // if the first character is '0' we must check for octals and hex
+ if (c0_ == '0') {
+ AddCharAdvance();
+
+ // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
+ if (c0_ == 'x' || c0_ == 'X') {
+ // hex number
+ kind = HEX;
+ AddCharAdvance();
+ if (!IsHexDigit(c0_))
+ // we must have at least one hex digit after 'x'/'X'
+ return Token::ILLEGAL;
+ while (IsHexDigit(c0_))
+ AddCharAdvance();
+
+ } else if ('0' <= c0_ && c0_ <= '7') {
+ // (possible) octal number
+ kind = OCTAL;
+ while (true) {
+ if (c0_ == '8' || c0_ == '9') {
+ kind = DECIMAL;
+ break;
+ }
+ if (c0_ < '0' || '7' < c0_) break;
+ AddCharAdvance();
+ }
+ }
+ }
+
+ // Parse decimal digits and allow trailing fractional part.
+ if (kind == DECIMAL) {
+ ScanDecimalDigits(); // optional
+ if (c0_ == '.') {
+ AddCharAdvance();
+ ScanDecimalDigits(); // optional
+ }
+ }
+ }
+
+ // scan exponent, if any
+ if (c0_ == 'e' || c0_ == 'E') {
+ ASSERT(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
+ if (kind == OCTAL) return Token::ILLEGAL; // no exponent for octals allowed
+ // scan exponent
+ AddCharAdvance();
+ if (c0_ == '+' || c0_ == '-')
+ AddCharAdvance();
+ if (!IsDecimalDigit(c0_))
+ // we must have at least one decimal digit after 'e'/'E'
+ return Token::ILLEGAL;
+ ScanDecimalDigits();
+ }
+ TerminateLiteral();
+
+ // The source character immediately following a numeric literal must
+ // not be an identifier start or a decimal digit; see ECMA-262
+ // section 7.8.3, page 17 (note that we read only one decimal digit
+ // if the value is 0).
+ if (IsDecimalDigit(c0_) || kIsIdentifierStart.get(c0_))
+ return Token::ILLEGAL;
+
+ return Token::NUMBER;
+}
+
+
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
+ Advance();
+ if (c0_ != 'u') return unibrow::Utf8::kBadChar;
+ Advance();
+ uc32 c = ScanHexEscape('u', 4);
+ // We do not allow a unicode escape sequence to start another
+ // unicode escape sequence.
+ if (c == '\\') return unibrow::Utf8::kBadChar;
+ return c;
+}
+
+
+Token::Value Scanner::ScanIdentifier() {
+ ASSERT(kIsIdentifierStart.get(c0_));
+
+ bool has_escapes = false;
+
+ StartLiteral();
+ // Scan identifier start character.
+ if (c0_ == '\\') {
+ has_escapes = true;
+ uc32 c = ScanIdentifierUnicodeEscape();
+ // Only allow legal identifier start characters.
+ if (!kIsIdentifierStart.get(c)) return Token::ILLEGAL;
+ AddChar(c);
+ } else {
+ AddCharAdvance();
+ }
+ // Scan the rest of the identifier characters.
+ while (kIsIdentifierPart.get(c0_)) {
+ if (c0_ == '\\') {
+ has_escapes = true;
+ uc32 c = ScanIdentifierUnicodeEscape();
+ // Only allow legal identifier part characters.
+ if (!kIsIdentifierPart.get(c)) return Token::ILLEGAL;
+ AddChar(c);
+ } else {
+ AddCharAdvance();
+ }
+ }
+ TerminateLiteral();
+
+ // We don't have any 1-letter keywords (this is probably a common case).
+ if ((next_.literal_end - next_.literal_pos) == 1)
+ return Token::IDENTIFIER;
+
+ // If the identifier contains unicode escapes, it must not be
+ // resolved to a keyword.
+ if (has_escapes)
+ return Token::IDENTIFIER;
+
+ return Token::Lookup(&literals_.data()[next_.literal_pos]);
+}
+
+
+
+bool Scanner::IsIdentifier(unibrow::CharacterStream* buffer) {
+ // Checks whether the buffer contains an identifier (no escapse).
+ if (!buffer->has_more()) return false;
+ if (!kIsIdentifierStart.get(buffer->GetNext())) return false;
+ while (buffer->has_more()) {
+ if (!kIsIdentifierPart.get(buffer->GetNext())) return false;
+ }
+ return true;
+}
+
+
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
+ // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
+ bool in_character_class = false;
+
+ // Previous token is either '/' or '/=', in the second case, the
+ // pattern starts at =.
+ next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
+ next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
+
+ // Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
+ // the scanner should pass uninterpreted bodies to the RegExp
+ // constructor.
+ StartLiteral();
+ if (seen_equal)
+ AddChar('=');
+
+ while (c0_ != '/' || in_character_class) {
+ if (kIsLineTerminator.get(c0_) || c0_ < 0)
+ return false;
+ if (c0_ == '\\') { // escaped character
+ AddCharAdvance();
+ if (kIsLineTerminator.get(c0_) || c0_ < 0)
+ return false;
+ AddCharAdvance();
+ } else { // unescaped character
+ if (c0_ == '[')
+ in_character_class = true;
+ if (c0_ == ']')
+ in_character_class = false;
+ AddCharAdvance();
+ }
+ }
+ Advance(); // consume '/'
+
+ TerminateLiteral();
+
+ return true;
+}
+
+bool Scanner::ScanRegExpFlags() {
+ // Scan regular expression flags.
+ StartLiteral();
+ while (kIsIdentifierPart.get(c0_))
+ AddCharAdvance();
+ TerminateLiteral();
+
+ next_.location.end_pos = source_pos() - 1;
+ return true;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCANNER_H_
+#define V8_SCANNER_H_
+
+#include "token.h"
+#include "char-predicates-inl.h"
+
+namespace v8 { namespace internal {
+
+
+class UTF8Buffer {
+ public:
+ UTF8Buffer();
+ ~UTF8Buffer();
+
+ void Initialize(char* src, int length);
+ void AddChar(uc32 c);
+ void Reset() { pos_ = 0; }
+ int pos() const { return pos_; }
+ char* data() const { return data_; }
+
+ private:
+ char* data_;
+ int size_;
+ int pos_;
+};
+
+
+class UTF16Buffer {
+ public:
+ UTF16Buffer();
+
+ void Initialize(Handle<String> data, unibrow::CharacterStream* stream);
+ void PushBack(uc32 ch);
+ uc32 Advance(); // returns a value < 0 when the buffer end is reached
+ uint16_t CharAt(int index);
+ int pos() const { return pos_; }
+ int size() const { return size_; }
+ Handle<String> SubString(int start, int end);
+ List<uc32>* pushback_buffer() { return &pushback_buffer_; }
+ void SeekForward(int pos);
+
+ private:
+ Handle<String> data_;
+ int pos_;
+ int size_;
+ List<uc32> pushback_buffer_;
+ uc32 last_;
+ unibrow::CharacterStream* stream_;
+};
+
+
+class Scanner {
+ public:
+
+ typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+ // Construction
+ explicit Scanner(bool is_pre_parsing);
+
+ // Initialize the Scanner to scan source:
+ void Init(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ int position);
+
+ // Returns the next token.
+ Token::Value Next();
+
+ // One token look-ahead (past the token returned by Next()).
+ Token::Value peek() const { return next_.token; }
+
+ // Returns true if there was a line terminator before the peek'ed token.
+ bool has_line_terminator_before_next() const {
+ return has_line_terminator_before_next_;
+ }
+
+ struct Location {
+ Location(int b, int e) : beg_pos(b), end_pos(e) { }
+ Location() : beg_pos(0), end_pos(0) { }
+ int beg_pos;
+ int end_pos;
+ };
+
+ // Returns the location information for the current token
+ // (the token returned by Next()).
+ Location location() const { return current_.location; }
+ Location peek_location() const { return next_.location; }
+
+ // Returns the literal string, if any, for the current token (the
+ // token returned by Next()). The string is 0-terminated and in
+ // UTF-8 format; they may contain 0-characters. Literal strings are
+ // collected for identifiers, strings, and numbers.
+ const char* literal_string() const {
+ return &literals_.data()[current_.literal_pos];
+ }
+ int literal_length() const {
+ return current_.literal_end - current_.literal_pos;
+ }
+
+ Vector<const char> next_literal() const {
+ return Vector<const char>(next_literal_string(), next_literal_length());
+ }
+
+ // Returns the literal string for the next token (the token that
+ // would be returned if Next() were called).
+ const char* next_literal_string() const {
+ return &literals_.data()[next_.literal_pos];
+ }
+ // Returns the length of the next token (that would be returned if
+ // Next() were called).
+ int next_literal_length() const {
+ return next_.literal_end - next_.literal_pos;
+ }
+
+ // Scans the input as a regular expression pattern, previous
+ // character(s) must be /(=). Returns true if a pattern is scanned.
+ bool ScanRegExpPattern(bool seen_equal);
+ // Returns true if regexp flags are scanned (always since flags can
+ // be empty).
+ bool ScanRegExpFlags();
+
+ // Seek forward to the given position. This operation does not
+ // work in general, for instance when there are pushed back
+ // characters, but works for seeking forward until simple delimiter
+ // tokens, which is what it is used for.
+ void SeekForward(int pos);
+
+ Handle<String> SubString(int start_pos, int end_pos);
+ bool stack_overflow() { return stack_overflow_; }
+
+ static StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
+
+ // Tells whether the buffer contains an identifier (no escapes).
+ // Used for checking if a property name is an identifier.
+ static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+ static unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
+ static unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
+ static unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
+ static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+
+ private:
+ // Source.
+ UTF16Buffer source_;
+ int position_;
+
+ // Buffer to hold literal values (identifiers, strings, numbers)
+ // using 0-terminated UTF-8 encoding.
+ UTF8Buffer literals_;
+
+ bool stack_overflow_;
+ static StaticResource<Utf8Decoder> utf8_decoder_;
+
+ // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+ uc32 c0_;
+
+ // The current and look-ahead token.
+ struct TokenDesc {
+ Token::Value token;
+ Location location;
+ int literal_pos, literal_end;
+ };
+
+ TokenDesc current_; // desc for current token (as returned by Next())
+ TokenDesc next_; // desc for next token (one token look-ahead)
+ bool has_line_terminator_before_next_;
+ bool is_pre_parsing_;
+
+ static const int kCharacterLookaheadBufferSize = 1;
+
+ // Literal buffer support
+ void StartLiteral();
+ void AddChar(uc32 ch);
+ void AddCharAdvance();
+ void TerminateLiteral();
+
+ // Low-level scanning support.
+ void Advance();
+ void PushBack(uc32 ch);
+
+ void SkipWhiteSpace(bool initial);
+ Token::Value SkipSingleLineComment();
+ Token::Value SkipMultiLineComment();
+
+ inline Token::Value Select(Token::Value tok);
+ inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_);
+
+ void Scan();
+ Token::Value ScanToken();
+ void ScanDecimalDigits();
+ Token::Value ScanNumber(bool seen_period);
+ Token::Value ScanIdentifier();
+ uc32 ScanHexEscape(uc32 c, int length);
+ uc32 ScanOctalEscape(uc32 c, int length);
+ void ScanEscape();
+ Token::Value ScanString();
+
+ // Scans a possible HTML comment -- begins with '<!'.
+ Token::Value ScanHtmlComment();
+
+ // Return the current source position.
+ int source_pos() {
+ return source_.pos() - kCharacterLookaheadBufferSize + position_;
+ }
+
+ // Decodes a unicode escape-sequence which is part of an identifier.
+ // If the escape sequence cannot be decoded the result is kBadRune.
+ uc32 ScanIdentifierUnicodeEscape();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SCANNER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "scopeinfo.h"
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+
+static int CompareLocal(Variable* const* v, Variable* const* w) {
+ Slot* s = (*v)->slot();
+ Slot* t = (*w)->slot();
+ // We may have rewritten parameters (that are in the arguments object)
+ // and which may have a NULL slot... - find a better solution...
+ int x = (s != NULL ? s->index() : 0);
+ int y = (t != NULL ? t->index() : 0);
+ // Consider sorting them according to type as well?
+ return x - y;
+}
+
+
+template<class Allocator>
+ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
+ : function_name_(Factory::empty_symbol()),
+ supports_eval_(scope->SupportsEval()),
+ parameters_(scope->num_parameters()),
+ stack_slots_(scope->num_stack_slots()),
+ context_slots_(scope->num_heap_slots()),
+ context_modes_(scope->num_heap_slots()) {
+ // Add parameters.
+ for (int i = 0; i < scope->num_parameters(); i++) {
+ ASSERT(parameters_.length() == i);
+ parameters_.Add(scope->parameter(i)->name());
+ }
+
+ // Add stack locals and collect heap locals.
+ // We are assuming that the locals' slots are allocated in
+ // increasing order, so we can simply add them to the
+ // ScopeInfo lists. However, due to usage analysis, this is
+ // not true for context-allocated locals: Some of them
+ // may be parameters which are allocated before the
+ // non-parameter locals. When the non-parameter locals are
+ // sorted according to usage, the allocated slot indices may
+ // not be in increasing order with the variable list anymore.
+ // Thus, we first collect the context-allocated locals, and then
+ // sort them by context slot index before adding them to the
+ // ScopeInfo list.
+ List<Variable*, Allocator> locals(32); // 32 is a wild guess
+ ASSERT(locals.is_empty());
+ scope->CollectUsedVariables(&locals);
+ locals.Sort(&CompareLocal);
+
+ List<Variable*, Allocator> heap_locals(locals.length());
+ for (int i = 0; i < locals.length(); i++) {
+ Variable* var = locals[i];
+ if (var->var_uses()->is_used()) {
+ Slot* slot = var->slot();
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // explicitly added to parameters_ above - ignore
+ break;
+
+ case Slot::LOCAL:
+ ASSERT(stack_slots_.length() == slot->index());
+ stack_slots_.Add(var->name());
+ break;
+
+ case Slot::CONTEXT:
+ heap_locals.Add(var);
+ break;
+
+ case Slot::LOOKUP:
+ case Slot::GLOBAL:
+ // these are currently not used
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ }
+
+ // Add heap locals.
+ if (scope->num_heap_slots() > 0) {
+ // Add user-defined slots.
+ for (int i = 0; i < heap_locals.length(); i++) {
+ ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ context_slots_.length());
+ ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ context_modes_.length());
+ context_slots_.Add(heap_locals[i]->name());
+ context_modes_.Add(heap_locals[i]->mode());
+ }
+
+ } else {
+ ASSERT(heap_locals.length() == 0);
+ }
+
+ // Add the function context slot, if present.
+ // For now, this must happen at the very end because of the
+ // ordering of the scope info slots and the respective slot indices.
+ if (scope->is_function_scope()) {
+ Variable* var = scope->function();
+ if (var != NULL &&
+ var->var_uses()->is_used() &&
+ var->slot()->type() == Slot::CONTEXT) {
+ function_name_ = var->name();
+ // Note that we must not find the function name in the context slot
+ // list - instead it must be handled separately in the
+ // Contexts::Lookup() function. Thus record an empty symbol here so we
+ // get the correct number of context slots.
+ ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ context_slots_.length());
+ ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ context_modes_.length());
+ context_slots_.Add(Factory::empty_symbol());
+ context_modes_.Add(Variable::INTERNAL);
+ }
+ }
+}
+
+
+// Encoding format in the Code object:
+//
+// - function name
+// - supports eval info
+//
+// - number of variables in the context object (smi) (= function context
+// slot index + 1)
+// - list of pairs (name, Var mode) of context-allocated variables (starting
+// with context slot 0)
+// - NULL (sentinel)
+//
+// - number of parameters (smi)
+// - list of parameter names (starting with parameter 0 first)
+// - NULL (sentinel)
+//
+// - number of variables on the stack (smi)
+// - list of names of stack-allocated variables (starting with stack slot 0)
+// - NULL (sentinel)
+
+// The ScopeInfo representation could be simplified and the ScopeInfo
+// re-implemented (with almost the same interface). Here is a
+// suggestion for the new format:
+//
+// - have a single list with all variable names (parameters, stack locals,
+// context locals), followed by a list of non-Object* values containing
+// the variables information (what kind, index, attributes)
+// - searching the linear list of names is fast and yields an index into the
+// list if the variable name is found
+// - that list index is then used to find the variable information in the
+// subsequent list
+// - the list entries don't have to be in any particular order, so all the
+// current sorting business can go away
+// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
+// which returns all information at once
+// - when gathering the information from a Scope, we only need to iterate
+// through the local variables (parameters and context info is already
+// present)
+
+
+static inline Object** ReadInt(Object** p, int* x) {
+ *x = (reinterpret_cast<Smi*>(*p++))->value();
+ return p;
+}
+
+
+static inline Object** ReadBool(Object** p, bool* x) {
+ *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
+ return p;
+}
+
+
+static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
+ *s = Handle<String>(reinterpret_cast<String*>(*p++));
+ return p;
+}
+
+
+static inline Object** ReadSentinel(Object** p) {
+ ASSERT(*p == NULL);
+ return p + 1;
+}
+
+
+template <class Allocator>
+static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
+ ASSERT(list->is_empty());
+ int n;
+ p = ReadInt(p, &n);
+ while (n-- > 0) {
+ Handle<String> s;
+ p = ReadSymbol(p, &s);
+ list->Add(s);
+ }
+ return ReadSentinel(p);
+}
+
+
+template <class Allocator>
+static Object** ReadList(Object** p,
+ List<Handle<String>, Allocator>* list,
+ List<Variable::Mode, Allocator>* modes) {
+ ASSERT(list->is_empty());
+ int n;
+ p = ReadInt(p, &n);
+ while (n-- > 0) {
+ Handle<String> s;
+ int m;
+ p = ReadSymbol(p, &s);
+ p = ReadInt(p, &m);
+ list->Add(s);
+ modes->Add(static_cast<Variable::Mode>(m));
+ }
+ return ReadSentinel(p);
+}
+
+
+template<class Allocator>
+ScopeInfo<Allocator>::ScopeInfo(Code* code)
+ : function_name_(Factory::empty_symbol()),
+ supports_eval_(false),
+ parameters_(4),
+ stack_slots_(8),
+ context_slots_(8),
+ context_modes_(8) {
+ if (code == NULL || code->sinfo_size() == 0) return;
+
+ Object** p0 = &Memory::Object_at(code->sinfo_start());
+ Object** p = p0;
+ p = ReadSymbol(p, &function_name_);
+ p = ReadBool(p, &supports_eval_);
+ p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
+ p = ReadList<Allocator>(p, ¶meters_);
+ p = ReadList<Allocator>(p, &stack_slots_);
+ ASSERT((p - p0) * kPointerSize == code->sinfo_size());
+}
+
+
+static inline Object** WriteInt(Object** p, int x) {
+ *p++ = Smi::FromInt(x);
+ return p;
+}
+
+
+static inline Object** WriteSymbol(Object** p, Handle<String> s) {
+ *p++ = *s;
+ return p;
+}
+
+
+static inline Object** WriteSentinel(Object** p) {
+ *p++ = NULL;
+ return p;
+}
+
+
+template <class Allocator>
+static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
+ const int n = list->length();
+ p = WriteInt(p, n);
+ for (int i = 0; i < n; i++) {
+ p = WriteSymbol(p, list->at(i));
+ }
+ return WriteSentinel(p);
+}
+
+
+template <class Allocator>
+static Object** WriteList(Object** p,
+ List<Handle<String>, Allocator>* list,
+ List<Variable::Mode, Allocator>* modes) {
+ const int n = list->length();
+ p = WriteInt(p, n);
+ for (int i = 0; i < n; i++) {
+ p = WriteSymbol(p, list->at(i));
+ p = WriteInt(p, modes->at(i));
+ }
+ return WriteSentinel(p);
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::Serialize(Code* code) {
+ // function name, supports eval, length & sentinel for 3 tables:
+ const int extra_slots = 1 + 1 + 2 * 3;
+ int size = (extra_slots +
+ context_slots_.length() * 2 +
+ parameters_.length() +
+ stack_slots_.length()) * kPointerSize;
+
+ if (code != NULL) {
+ CHECK(code->sinfo_size() == size);
+ Object** p0 = &Memory::Object_at(code->sinfo_start());
+ Object** p = p0;
+ p = WriteSymbol(p, function_name_);
+ p = WriteInt(p, supports_eval_);
+ p = WriteList(p, &context_slots_, &context_modes_);
+ p = WriteList(p, ¶meters_);
+ p = WriteList(p, &stack_slots_);
+ ASSERT((p - p0) * kPointerSize == size);
+ }
+
+ return size;
+}
+
+
+template<class Allocator>
+void ScopeInfo<Allocator>::IterateScopeInfo(Code* code, ObjectVisitor* v) {
+ Object** start = &Memory::Object_at(code->sinfo_start());
+ Object** end = &Memory::Object_at(code->sinfo_start() + code->sinfo_size());
+ v->VisitPointers(start, end);
+}
+
+
+static Object** ContextEntriesAddr(Code* code) {
+ ASSERT(code->sinfo_size() > 0);
+ // +2 for function name, supports eval:
+ return &Memory::Object_at(code->sinfo_start()) + 2;
+}
+
+
+static Object** ParameterEntriesAddr(Code* code) {
+ ASSERT(code->sinfo_size() > 0);
+ Object** p = ContextEntriesAddr(code);
+ int n; // number of context slots;
+ p = ReadInt(p, &n);
+ return p + n*2 + 1; // *2 for pairs, +1 for sentinel
+}
+
+
+static Object** StackSlotEntriesAddr(Code* code) {
+ ASSERT(code->sinfo_size() > 0);
+ Object** p = ParameterEntriesAddr(code);
+ int n; // number of parameter slots;
+ p = ReadInt(p, &n);
+ return p + n + 1; // +1 for sentinel
+}
+
+
+template<class Allocator>
+bool ScopeInfo<Allocator>::SupportsEval(Code* code) {
+ bool result = false;
+ if (code->sinfo_size() > 0) {
+ ReadBool(&Memory::Object_at(code->sinfo_start()) + 1, &result);
+ }
+#ifdef DEBUG
+ { ScopeInfo info(code);
+ ASSERT(result == info.supports_eval_);
+ }
+#endif
+ return result;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfStackSlots(Code* code) {
+ if (code->sinfo_size() > 0) {
+ Object** p = StackSlotEntriesAddr(code);
+ int n; // number of stack slots;
+ ReadInt(p, &n);
+ return n;
+ }
+ return 0;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) {
+ if (code->sinfo_size() > 0) {
+ Object** p = ContextEntriesAddr(code);
+ int n; // number of context slots;
+ ReadInt(p, &n);
+ return n + Context::MIN_CONTEXT_SLOTS;
+ }
+ return 0;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
+ ASSERT(name->IsSymbol());
+ if (code->sinfo_size() > 0) {
+ // Loop below depends on the NULL sentinel after the stack slot names.
+ ASSERT(NumberOfStackSlots(code) > 0 ||
+ *(StackSlotEntriesAddr(code) + 1) == NULL);
+ // slots start after length entry
+ Object** p0 = StackSlotEntriesAddr(code) + 1;
+ Object** p = p0;
+ while (*p != NULL) {
+ if (*p == name) return p - p0;
+ p++;
+ }
+ }
+ return -1;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
+ String* name,
+ Variable::Mode* mode) {
+ ASSERT(name->IsSymbol());
+ if (code->sinfo_size() > 0) {
+ // Loop below depends on the NULL sentinel after the context slot names.
+ ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
+ *(ContextEntriesAddr(code) + 1) == NULL);
+ // slots start after length entry
+ Object** p0 = ContextEntriesAddr(code) + 1;
+ Object** p = p0;
+ // contexts may have no variable slots (in the presence of eval()).
+ while (*p != NULL) {
+ if (*p == name) {
+ ASSERT(((p - p0) & 1) == 0);
+ if (mode != NULL) {
+ ReadInt(p + 1, reinterpret_cast<int*>(mode));
+ }
+ return ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ }
+ p += 2;
+ }
+ }
+ return -1;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
+ ASSERT(name->IsSymbol());
+ if (code->sinfo_size() > 0) {
+ // We must read parameters from the end since for
+ // multiply declared parameters the value of the
+ // last declaration of that parameter is used
+ // inside a function (and thus we need to look
+ // at the last index). Was bug# 1110337.
+ //
+ // Eventually, we should only register such parameters
+ // once, with corresponding index. This requires a new
+ // implementation of the ScopeInfo code. See also other
+ // comments in this file regarding this.
+ Object** p = ParameterEntriesAddr(code);
+ int n; // number of parameters
+ Object** p0 = ReadInt(p, &n);
+ p = p0 + n;
+ while (p > p0) {
+ p--;
+ if (*p == name) return p - p0;
+ }
+ }
+ return -1;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::FunctionContextSlotIndex(Code* code, String* name) {
+ ASSERT(name->IsSymbol());
+ if (code->sinfo_size() > 0) {
+ Object** p = &Memory::Object_at(code->sinfo_start());
+ if (*p++ == name) {
+ int n;
+ ReadInt(p, &n); // n = number of context slots
+ return n -1; // the function context slot is the last entry
+ }
+ }
+ return -1;
+}
+
+
+template<class Allocator>
+Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
+ // A local variable can be allocated either on the stack or in the context.
+ // For variables allocated in the context they are always preceded by the
+ // number Context::MIN_CONTEXT_SLOTS number of fixed allocated slots in the
+ // context.
+ if (i < number_of_stack_slots()) {
+ return stack_slot_name(i);
+ } else {
+ return context_slot_name(i - number_of_stack_slots() +
+ Context::MIN_CONTEXT_SLOTS);
+ }
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfLocals() const {
+ int number_of_locals = number_of_stack_slots();
+ if (number_of_context_slots() > 0) {
+ ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
+ number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
+ }
+ return number_of_locals;
+}
+
+
+#ifdef DEBUG
+template <class Allocator>
+static void PrintList(const char* list_name,
+ int nof_internal_slots,
+ List<Handle<String>, Allocator>& list) {
+ if (list.length() > 0) {
+ PrintF("\n // %s\n", list_name);
+ if (nof_internal_slots > 0) {
+ PrintF(" %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
+ }
+ for (int i = 0; i < list.length(); i++) {
+ PrintF(" %2d ", i + nof_internal_slots);
+ list[i]->ShortPrint();
+ PrintF("\n");
+ }
+ }
+}
+
+
+template<class Allocator>
+void ScopeInfo<Allocator>::Print() {
+ PrintF("ScopeInfo ");
+ if (function_name_->length() > 0)
+ function_name_->ShortPrint();
+ else
+ PrintF("/* no function name */");
+ PrintF("{");
+
+ if (supports_eval_)
+ PrintF("\n // supports eval\n");
+
+ PrintList<Allocator>("parameters", 0, parameters_);
+ PrintList<Allocator>("stack slots", 0, stack_slots_);
+ PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
+ context_slots_);
+
+ PrintF("}\n");
+}
+#endif // DEBUG
+
+
+// Make sure the classes get instantiated by the template system.
+template class ScopeInfo<FreeStoreAllocationPolicy>;
+template class ScopeInfo<PreallocatedStorage>;
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCOPEINFO_H_
+#define V8_SCOPEINFO_H_
+
+#include "variables.h"
+
+namespace v8 { namespace internal {
+
+// Scope information represents information about a functions's
+// scopes (currently only one, because we don't do any inlining)
+// and the allocation of the scope's variables. Scope information
+// is stored in a compressed form with Code objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+//
+// Historical note: In other VMs built by this team, ScopeInfo was
+// usually called DebugInfo since the information was used (among
+// other things) for on-demand debugging (Self, Smalltalk). However,
+// DebugInfo seems misleading, since this information is primarily used
+// in debugging-unrelated contexts.
+
+// Forward defined as
+// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+template<class Allocator>
+class ScopeInfo BASE_EMBEDDED {
+ public:
+ // Create a ScopeInfo instance from a scope.
+ explicit ScopeInfo(Scope* scope);
+
+ // Create a ScopeInfo instance from a Code object.
+ explicit ScopeInfo(Code* code);
+
+ // Write the ScopeInfo data into a Code object, and returns the
+ // amount of space that was needed. If no Code object is provided
+ // (NULL handle), Serialize() only returns the amount of space needed.
+ //
+ // This operations requires that the Code object has the correct amount
+ // of space for the ScopeInfo data; otherwise the operation fails (fatal
+ // error). Any existing scope info in the Code object is simply overwritten.
+ int Serialize(Code* code);
+
+ // Garbage collection support for scope info embedded in Code objects.
+ // This code is in ScopeInfo because only here we should have to know
+ // about the encoding.
+ static void IterateScopeInfo(Code* code, ObjectVisitor* v);
+
+
+ // --------------------------------------------------------------------------
+ // Lookup
+
+ Handle<String> function_name() const { return function_name_; }
+
+ bool supports_eval() const { return supports_eval_; }
+
+ Handle<String> parameter_name(int i) const { return parameters_[i]; }
+ int number_of_parameters() const { return parameters_.length(); }
+
+ Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
+ int number_of_stack_slots() const { return stack_slots_.length(); }
+
+ Handle<String> context_slot_name(int i) const {
+ return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
+ }
+ int number_of_context_slots() const {
+ int l = context_slots_.length();
+ return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
+ }
+
+ Handle<String> LocalName(int i) const;
+ int NumberOfLocals() const;
+
+
+ // --------------------------------------------------------------------------
+ // The following functions provide quick access to scope info details
+ // for runtime routines w/o the need to explicitly create a ScopeInfo
+ // object.
+ //
+ // ScopeInfo is the only class which should have to know about the
+ // encoding of it's information in a Code object, which is why these
+ // functions are in this class.
+
+ static bool SupportsEval(Code* code);
+
+ // Return the number of stack slots for code.
+ static int NumberOfStackSlots(Code* code);
+
+ // Return the number of context slots for code.
+ static int NumberOfContextSlots(Code* code);
+
+ // Lookup support for scope info embedded in Code objects. Returns
+ // the stack slot index for a given slot name if the slot is
+ // present; otherwise returns a value < 0. The name must be a symbol
+ // (canonicalized).
+ static int StackSlotIndex(Code* code, String* name);
+
+ // Lookup support for scope info embedded in Code objects. Returns the
+ // context slot index for a given slot name if the slot is present; otherwise
+ // returns a value < 0. The name must be a symbol (canonicalized).
+ // If the slot is present and mode != NULL, sets *mode to the corresponding
+ // mode for that variable.
+ static int ContextSlotIndex(Code* code, String* name, Variable::Mode* mode);
+
+ // Lookup support for scope info embedded in Code objects. Returns the
+ // parameter index for a given parameter name if the parameter is present;
+ // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+ static int ParameterIndex(Code* code, String* name);
+
+ // Lookup support for scope info embedded in Code objects. Returns the
+ // function context slot index if the function name is present (named
+ // function expressions, only), otherwise returns a value < 0. The name
+ // must be a symbol (canonicalized).
+ static int FunctionContextSlotIndex(Code* code, String* name);
+
+
+ // --------------------------------------------------------------------------
+ // Debugging support
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Handle<String> function_name_;
+ bool supports_eval_;
+ List<Handle<String>, Allocator > parameters_;
+ List<Handle<String>, Allocator > stack_slots_;
+ List<Handle<String>, Allocator > context_slots_;
+ List<Variable::Mode, Allocator > context_modes_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SCOPEINFO_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "prettyprinter.h"
+#include "scopeinfo.h"
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// A Zone allocator for use with LocalsMap.
+
+class ZoneAllocator: public Allocator {
+ public:
+ /* nothing to do */
+ virtual ~ZoneAllocator() {}
+
+ virtual void* New(size_t size) { return Zone::New(size); }
+
+ /* ignored - Zone is freed in one fell swoop */
+ virtual void Delete(void* p) {}
+};
+
+
+static ZoneAllocator LocalsMapAllocator;
+
+
+// ----------------------------------------------------------------------------
+// Implementation of LocalsMap
+//
+// Note: We are storing the handle locations as key values in the hash map.
+// When inserting a new variable via Declare(), we rely on the fact that
+// the handle location remains alive for the duration of that variable
+// use. Because a Variable holding a handle with the same location exists
+// this is ensured.
+
+static bool Match(void* key1, void* key2) {
+ String* name1 = *reinterpret_cast<String**>(key1);
+ String* name2 = *reinterpret_cast<String**>(key2);
+ ASSERT(name1->IsSymbol());
+ ASSERT(name2->IsSymbol());
+ return name1 == name2;
+}
+
+
+// Dummy constructor
+LocalsMap::LocalsMap(bool gotta_love_static_overloading) : HashMap() {}
+
+LocalsMap::LocalsMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
+LocalsMap::~LocalsMap() {}
+
+
+Variable* LocalsMap::Declare(Scope* scope,
+ Handle<String> name,
+ Variable::Mode mode,
+ bool is_valid_LHS,
+ bool is_this) {
+ HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
+ if (p->value == NULL) {
+ // The variable has not been declared yet -> insert it.
+ ASSERT(p->key == name.location());
+ p->value = new Variable(scope, name, mode, is_valid_LHS, is_this);
+ }
+ return reinterpret_cast<Variable*>(p->value);
+}
+
+
+Variable* LocalsMap::Lookup(Handle<String> name) {
+ HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
+ if (p != NULL) {
+ ASSERT(*reinterpret_cast<String**>(p->key) == *name);
+ ASSERT(p->value != NULL);
+ return reinterpret_cast<Variable*>(p->value);
+ }
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Scope
+
+
+// Dummy constructor
+Scope::Scope()
+ : inner_scopes_(0),
+ locals_(false),
+ temps_(0),
+ params_(0),
+ nonlocals_(0),
+ unresolved_(0),
+ decls_(0) {
+}
+
+
+Scope::Scope(Scope* outer_scope, Type type)
+ : outer_scope_(outer_scope),
+ inner_scopes_(4),
+ type_(type),
+ scope_name_(Factory::empty_symbol()),
+ locals_(),
+ temps_(4),
+ params_(4),
+ nonlocals_(4),
+ unresolved_(16),
+ decls_(4),
+ receiver_(NULL),
+ function_(NULL),
+ arguments_(NULL),
+ arguments_shadow_(NULL),
+ illegal_redecl_(NULL),
+ scope_inside_with_(false),
+ scope_contains_with_(false),
+ scope_calls_eval_(false),
+ outer_scope_calls_eval_(false),
+ inner_scope_calls_eval_(false),
+ force_eager_compilation_(false),
+ num_stack_slots_(0),
+ num_heap_slots_(0) {
+ // At some point we might want to provide outer scopes to
+ // eval scopes (by walking the stack and reading the scope info).
+ // In that case, the ASSERT below needs to be adjusted.
+ ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
+ ASSERT(!HasIllegalRedeclaration());
+}
+
+
+void Scope::Initialize(bool inside_with) {
+ // Add this scope as a new inner scope of the outer scope.
+ if (outer_scope_ != NULL) {
+ outer_scope_->inner_scopes_.Add(this);
+ scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
+ } else {
+ scope_inside_with_ = inside_with;
+ }
+
+ // Declare convenience variables.
+ // Declare and allocate receiver (even for the global scope, and even
+ // if naccesses_ == 0).
+ // NOTE: When loading parameters in the global scope, we must take
+ // care not to access them as properties of the global object, but
+ // instead load them directly from the stack. Currently, the only
+ // such parameter is 'this' which is passed on the stack when
+ // invoking scripts
+ { Variable* var =
+ locals_.Declare(this, Factory::this_symbol(), Variable::VAR, false, true);
+ var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
+ receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
+ receiver_->BindTo(var);
+ }
+
+ if (is_function_scope()) {
+ // Declare 'arguments' variable which exists in all functions.
+ // Note that it may never be accessed, in which case it won't
+ // be allocated during variable allocation.
+ Declare(Factory::arguments_symbol(), Variable::VAR);
+ }
+}
+
+
+
+Variable* Scope::Lookup(Handle<String> name) {
+ return locals_.Lookup(name);
+}
+
+
+Variable* Scope::DeclareFunctionVar(Handle<String> name) {
+ ASSERT(is_function_scope() && function_ == NULL);
+ function_ = new Variable(this, name, Variable::CONST, true, false);
+ return function_;
+}
+
+
+Variable* Scope::Declare(Handle<String> name, Variable::Mode mode) {
+ // DYNAMIC variables are introduces during variable allocation,
+ // INTERNAL variables are allocated explicitly, and TEMPORARY
+ // variables are allocated via NewTemporary().
+ ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ return locals_.Declare(this, name, mode, true, false);
+}
+
+
+void Scope::AddParameter(Variable* var) {
+ ASSERT(is_function_scope());
+ ASSERT(Lookup(var->name()) == var);
+ params_.Add(var);
+}
+
+
+VariableProxy* Scope::NewUnresolved(Handle<String> name, bool inside_with) {
+ // Note that we must not share the unresolved variables with
+ // the same name because they may be removed selectively via
+ // RemoveUnresolved().
+ VariableProxy* proxy = new VariableProxy(name, false, inside_with);
+ unresolved_.Add(proxy);
+ return proxy;
+}
+
+
+void Scope::RemoveUnresolved(VariableProxy* var) {
+ // Most likely (always?) any variable we want to remove
+ // was just added before, so we search backwards.
+ for (int i = unresolved_.length(); i-- > 0;) {
+ if (unresolved_[i] == var) {
+ unresolved_.Remove(i);
+ return;
+ }
+ }
+}
+
+
+VariableProxy* Scope::NewTemporary(Handle<String> name) {
+ Variable* var = new Variable(this, name, Variable::TEMPORARY, true, false);
+ VariableProxy* tmp = new VariableProxy(name, false, false);
+ tmp->BindTo(var);
+ temps_.Add(var);
+ return tmp;
+}
+
+
+void Scope::AddDeclaration(Declaration* declaration) {
+ decls_.Add(declaration);
+}
+
+
+void Scope::SetIllegalRedeclaration(Expression* expression) {
+ // Only set the illegal redeclaration expression the
+ // first time the function is called.
+ if (!HasIllegalRedeclaration()) {
+ illegal_redecl_ = expression;
+ }
+ ASSERT(HasIllegalRedeclaration());
+}
+
+
+void Scope::VisitIllegalRedeclaration(Visitor* visitor) {
+ ASSERT(HasIllegalRedeclaration());
+ illegal_redecl_->Accept(visitor);
+}
+
+
+template<class Allocator>
+void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
+ // Collect variables in this scope.
+ // Note that the function_ variable - if present - is not
+ // collected here but handled separately in ScopeInfo
+ // which is the current user of this function).
+ for (int i = 0; i < temps_.length(); i++) {
+ Variable* var = temps_[i];
+ if (var->var_uses()->is_used()) {
+ locals->Add(var);
+ }
+ }
+ for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) {
+ Variable* var = reinterpret_cast<Variable*>(p->value);
+ if (var->var_uses()->is_used()) {
+ locals->Add(var);
+ }
+ }
+}
+
+
+// Make sure the method gets instantiated by the template system.
+template void Scope::CollectUsedVariables(
+ List<Variable*, FreeStoreAllocationPolicy>* locals);
+template void Scope::CollectUsedVariables(
+ List<Variable*, PreallocatedStorage>* locals);
+
+
+void Scope::AllocateVariables() {
+ ASSERT(outer_scope_ == NULL); // eval or global scopes only
+
+ // 1) Propagate scope information.
+ // If we are in an eval scope, we may have other outer scopes about
+ // which we don't know anything at this point. Thus we must be conservative
+ // and assume they may invoke eval themselves. Eventually we could capture
+ // this information in the ScopeInfo and then use it here (by traversing
+ // the call chain stack, at compile time).
+ PropagateScopeInfo(is_eval_scope());
+
+ // 2) Resolve variables.
+ Scope* global_scope = NULL;
+ if (is_global_scope()) global_scope = this;
+ ResolveVariablesRecursively(global_scope);
+
+ // 3) Allocate variables.
+ AllocateVariablesRecursively();
+}
+
+
+bool Scope::SupportsEval() const {
+ return scope_calls_eval_ || inner_scope_calls_eval_;
+}
+
+
+bool Scope::AllowsLazyCompilation() const {
+ return !force_eager_compilation_ && HasTrivialOuterContext();
+}
+
+
+bool Scope::HasTrivialContext() const {
+ // A function scope has a trivial context if it always is the global
+ // context. We iteratively scan out the context chain to see if
+ // there is anything that makes this scope non-trivial; otherwise we
+ // return true.
+ for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
+ if (scope->is_eval_scope()) return false;
+ if (scope->scope_inside_with_) return false;
+ if (scope->num_heap_slots_ > 0) return false;
+ }
+ return true;
+}
+
+
+bool Scope::HasTrivialOuterContext() const {
+ Scope* outer = outer_scope_;
+ if (outer == NULL) return true;
+ // Note that the outer context may be trivial in general, but the current
+ // scope may be inside a 'with' statement in which case the outer context
+ // for this scope is not trivial.
+ return !scope_inside_with_ && outer->HasTrivialContext();
+}
+
+
+int Scope::ContextChainLength(Scope* scope) {
+ int n = 0;
+ for (Scope* s = this; s != scope; s = s->outer_scope_) {
+ ASSERT(s != NULL); // scope must be in the scope chain
+ if (s->num_heap_slots() > 0) n++;
+ }
+ return n;
+}
+
+
+#ifdef DEBUG
+static const char* Header(Scope::Type type) {
+ switch (type) {
+ case Scope::EVAL_SCOPE: return "eval";
+ case Scope::FUNCTION_SCOPE: return "function";
+ case Scope::GLOBAL_SCOPE: return "global";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+static void Indent(int n, const char* str) {
+ PrintF("%*s%s", n, "", str);
+}
+
+
+static void PrintName(Handle<String> name) {
+ SmartPointer<char> s = name->ToCString(DISALLOW_NULLS);
+ PrintF("%s", *s);
+}
+
+
+static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
+ if (var->var_uses()->is_used() || var->rewrite() != NULL) {
+ Indent(indent, Variable::Mode2String(var->mode()));
+ PrintF(" ");
+ PrintName(var->name());
+ PrintF("; // ");
+ if (var->rewrite() != NULL) PrintF("%s, ", printer->Print(var->rewrite()));
+ if (var->is_accessed_from_inner_scope()) PrintF("inner scope access, ");
+ PrintF("var ");
+ var->var_uses()->Print();
+ PrintF(", obj ");
+ var->obj_uses()->Print();
+ PrintF("\n");
+ }
+}
+
+
+void Scope::Print(int n) {
+ int n0 = (n > 0 ? n : 0);
+ int n1 = n0 + 2; // indentation
+
+ // Print header.
+ Indent(n0, Header(type_));
+ if (scope_name_->length() > 0) {
+ PrintF(" ");
+ PrintName(scope_name_);
+ }
+
+ // Print parameters, if any.
+ if (is_function_scope()) {
+ PrintF(" (");
+ for (int i = 0; i < params_.length(); i++) {
+ if (i > 0) PrintF(", ");
+ PrintName(params_[i]->name());
+ }
+ PrintF(")");
+ }
+
+ PrintF(" {\n");
+
+ // Function name, if any (named function literals, only).
+ if (function_ != NULL) {
+ Indent(n1, "// (local) function name: ");
+ PrintName(function_->name());
+ PrintF("\n");
+ }
+
+ // Scope info.
+ if (HasTrivialOuterContext()) {
+ Indent(n1, "// scope has trivial outer context\n");
+ }
+ if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
+ if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
+ if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
+ if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
+ if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
+ if (num_stack_slots_ > 0) { Indent(n1, "// ");
+ PrintF("%d stack slots\n", num_stack_slots_); }
+ if (num_heap_slots_ > 0) { Indent(n1, "// ");
+ PrintF("%d heap slots\n", num_heap_slots_); }
+
+ // Print locals.
+ PrettyPrinter printer;
+ Indent(n1, "// function var\n");
+ if (function_ != NULL) {
+ PrintVar(&printer, n1, function_);
+ }
+
+ Indent(n1, "// temporary vars\n");
+ for (int i = 0; i < temps_.length(); i++) {
+ PrintVar(&printer, n1, temps_[i]);
+ }
+
+ Indent(n1, "// local vars\n");
+ for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) {
+ Variable* var = reinterpret_cast<Variable*>(p->value);
+ PrintVar(&printer, n1, var);
+ }
+
+ Indent(n1, "// nonlocal vars\n");
+ for (int i = 0; i < nonlocals_.length(); i++)
+ PrintVar(&printer, n1, nonlocals_[i]);
+
+ // Print inner scopes (disable by providing negative n).
+ if (n >= 0) {
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ PrintF("\n");
+ inner_scopes_[i]->Print(n1);
+ }
+ }
+
+ Indent(n0, "}\n");
+}
+#endif // DEBUG
+
+
+Variable* Scope::NonLocal(Handle<String> name) {
+ // Space optimization: reuse existing non-local with the same name.
+ for (int i = 0; i < nonlocals_.length(); i++) {
+ Variable* var = nonlocals_[i];
+ if (var->name().is_identical_to(name)) {
+ ASSERT(var->mode() == Variable::DYNAMIC);
+ return var;
+ }
+ }
+
+ // Otherwise create a new new-local and add it to the list.
+ Variable* var = new Variable(
+ NULL /* we don't know the scope */,
+ name, Variable::DYNAMIC, true, false);
+ nonlocals_.Add(var);
+
+ // Allocate it by giving it a dynamic lookup.
+ var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
+
+ return var;
+}
+
+
+// Lookup a variable starting with this scope. The result is either
+// the statically resolved (local!) variable belonging to an outer scope,
+// or NULL. It may be NULL because a) we couldn't find a variable, or b)
+// because the variable is just a guess (and may be shadowed by another
+// variable that is introduced dynamically via an 'eval' call or a 'with'
+// statement).
+Variable* Scope::LookupRecursive(Handle<String> name, bool inner_lookup) {
+ // If we find a variable, but the current scope calls 'eval', the found
+ // variable may not be the correct one (the 'eval' may introduce a
+ // property with the same name). In that case, remember that the variable
+ // found is just a guess.
+ bool guess = scope_calls_eval_;
+
+ // Try to find the variable in this scope.
+ Variable* var = Lookup(name);
+
+ if (var != NULL) {
+ // We found a variable. If this is not an inner lookup, we are done.
+ // (Even if there is an 'eval' in this scope which introduces the
+ // same variable again, the resulting variable remains the same.
+ // Note that enclosing 'with' statements are handled at the call site.)
+ if (!inner_lookup)
+ return var;
+
+ } else {
+ // We did not find a variable locally. Check against the function variable,
+ // if any. We can do this for all scopes, since the function variable is
+ // only present - if at all - for function scopes.
+ //
+ // This lookup corresponds to a lookup in the "intermediate" scope sitting
+ // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+ // the name of named function literal is kept in an intermediate scope
+ // inbetween this scope and the next outer scope.)
+ if (function_ != NULL && function_->name().is_identical_to(name)) {
+ var = function_;
+
+ } else if (outer_scope_ != NULL) {
+ var = outer_scope_->LookupRecursive(name, true /* inner lookup */);
+ // We may have found a variable in an outer scope. However, if
+ // the current scope is inside a 'with', the actual variable may
+ // be a property introduced via the 'with' statement. Then, the
+ // variable we may have found is just a guess.
+ if (scope_inside_with_)
+ guess = true;
+ }
+
+ // If we did not find a variable, we are done.
+ if (var == NULL)
+ return NULL;
+ }
+
+ ASSERT(var != NULL);
+
+ // If this is a lookup from an inner scope, mark the variable.
+ if (inner_lookup)
+ var->is_accessed_from_inner_scope_ = true;
+
+ // If the variable we have found is just a guess, invalidate the result.
+ if (guess)
+ var = NULL;
+
+ return var;
+}
+
+
+void Scope::ResolveVariable(Scope* global_scope, VariableProxy* proxy) {
+ ASSERT(global_scope == NULL || global_scope->is_global_scope());
+
+ // If the proxy is already resolved there's nothing to do
+ // (functions and consts may be resolved by the parser).
+ if (proxy->var() != NULL) return;
+
+ // Otherwise, try to resolve the variable.
+ Variable* var = LookupRecursive(proxy->name(), false);
+
+ if (proxy->inside_with()) {
+ // If we are inside a local 'with' statement, all bets are off
+ // and we cannot resolve the proxy to a local variable even if
+ // we found an outer matching variable.
+ // Note that we must do a lookup anyway, because if we find one,
+ // we must mark that variable as potentially accessed from this
+ // inner scope (the property may not be in the 'with' object).
+ var = NonLocal(proxy->name());
+
+ } else {
+ // We are not inside a local 'with' statement.
+
+ if (var == NULL) {
+ // We did not find the variable. We have a global variable
+ // if we are in the global scope (we know already that we
+ // are outside a 'with' statement) or if there is no way
+ // that the variable might be introduced dynamically (through
+ // a local or outer eval() call, or an outer 'with' statement),
+ // or we don't know about the outer scope (because we are
+ // in an eval scope).
+ if (!is_global_scope() &&
+ (is_eval_scope() || outer_scope_calls_eval_ ||
+ scope_calls_eval_ || scope_inside_with_)) {
+ // We must look up the variable at runtime, and we don't
+ // know anything else.
+ var = NonLocal(proxy->name());
+
+ } else {
+ // We must have a global variable.
+ ASSERT(global_scope != NULL);
+ var = new Variable(global_scope, proxy->name(),
+ Variable::DYNAMIC, true, false);
+ // Ideally we simply rewrite these variables into property
+ // accesses. Unfortunately, we cannot do this here at the
+ // moment because then we can't differentiate between
+ // global variable ('x') and global property ('this.x') access.
+ // If 'x' doesn't exist, the former leads to an error, while the
+ // latter returns undefined. Sigh...
+ // var->rewrite_ = new Property(new Literal(env_->global()),
+ // new Literal(proxy->name()));
+ }
+ }
+ }
+
+ proxy->BindTo(var);
+}
+
+
+void Scope::ResolveVariablesRecursively(Scope* global_scope) {
+ ASSERT(global_scope == NULL || global_scope->is_global_scope());
+
+ // Resolve unresolved variables for this scope.
+ for (int i = 0; i < unresolved_.length(); i++) {
+ ResolveVariable(global_scope, unresolved_[i]);
+ }
+
+ // Resolve unresolved variables for inner scopes.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ inner_scopes_[i]->ResolveVariablesRecursively(global_scope);
+ }
+}
+
+
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval) {
+ if (outer_scope_calls_eval) {
+ outer_scope_calls_eval_ = true;
+ }
+
+ bool b = scope_calls_eval_ || outer_scope_calls_eval_;
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* inner_scope = inner_scopes_[i];
+ if (inner_scope->PropagateScopeInfo(b)) {
+ inner_scope_calls_eval_ = true;
+ }
+ if (inner_scope->force_eager_compilation_) {
+ force_eager_compilation_ = true;
+ }
+ }
+
+ return scope_calls_eval_ || inner_scope_calls_eval_;
+}
+
+
+bool Scope::MustAllocate(Variable* var) {
+ // Give var a read/write use if there is a chance it might be
+ // accessed via an eval() call, or if it is a global variable.
+ // This is only possible if the variable has a visible name.
+ if ((var->is_this() || var->name()->length() > 0) &&
+ (var->is_accessed_from_inner_scope_ ||
+ scope_calls_eval_ || inner_scope_calls_eval_ ||
+ scope_contains_with_ || var->is_global())) {
+ var->var_uses()->RecordAccess(1);
+ }
+ return var->var_uses()->is_used();
+}
+
+
+bool Scope::MustAllocateInContext(Variable* var) {
+ // If var is accessed from an inner scope, or if there is a
+ // possibility that it might be accessed from the current or
+ // an inner scope (through an eval() call), it must be allocated
+ // in the context.
+ // Exceptions: Global variables and temporary variables must
+ // never be allocated in the (FixedArray part of the) context.
+ return
+ var->mode() != Variable::TEMPORARY &&
+ (var->is_accessed_from_inner_scope_ ||
+ scope_calls_eval_ || inner_scope_calls_eval_ ||
+ scope_contains_with_ || var->is_global());
+}
+
+
+bool Scope::HasArgumentsParameter() {
+ for (int i = 0; i < params_.length(); i++) {
+ if (params_[i]->name().is_identical_to(Factory::arguments_symbol()))
+ return true;
+ }
+ return false;
+}
+
+
+void Scope::AllocateStackSlot(Variable* var) {
+ var->rewrite_ = new Slot(var, Slot::LOCAL, num_stack_slots_++);
+}
+
+
+void Scope::AllocateHeapSlot(Variable* var) {
+ var->rewrite_ = new Slot(var, Slot::CONTEXT, num_heap_slots_++);
+}
+
+
+void Scope::AllocateParameterLocals() {
+ ASSERT(is_function_scope());
+ Variable* arguments = Lookup(Factory::arguments_symbol());
+ ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
+ if (MustAllocate(arguments) && !HasArgumentsParameter()) {
+ // 'arguments' is used. Unless there is also a parameter called
+ // 'arguments', we must be conservative and access all parameters via
+ // the arguments object: The i'th parameter is rewritten into
+ // '.arguments[i]' (*). If we have a parameter named 'arguments', a
+ // (new) value is always assigned to it via the function
+ // invocation. Then 'arguments' denotes that specific parameter value
+ // and cannot be used to access the parameters, which is why we don't
+ // need to rewrite in that case.
+ //
+ // (*) Instead of having a parameter called 'arguments', we may have an
+ // assignment to 'arguments' in the function body, at some arbitrary
+ // point in time (possibly through an 'eval()' call!). After that
+ // assignment any re-write of parameters would be invalid (was bug
+ // 881452). Thus, we introduce a shadow '.arguments'
+ // variable which also points to the arguments object. For rewrites we
+ // use '.arguments' which remains valid even if we assign to
+ // 'arguments'. To summarize: If we need to rewrite, we allocate an
+ // 'arguments' object dynamically upon function invocation. The compiler
+ // introduces 2 local variables 'arguments' and '.arguments', both of
+ // which originally point to the arguments object that was
+ // allocated. All parameters are rewritten into property accesses via
+ // the '.arguments' variable. Thus, any changes to properties of
+ // 'arguments' are reflected in the variables and vice versa. If the
+ // 'arguments' variable is changed, '.arguments' still points to the
+ // correct arguments object and the rewrites still work.
+
+ // We are using 'arguments'. Tell the code generator that is needs to
+ // allocate the arguments object by setting 'arguments_'.
+ arguments_ = new VariableProxy(Factory::arguments_symbol(), false, false);
+ arguments_->BindTo(arguments);
+
+ // We also need the '.arguments' shadow variable. Declare it and create
+ // and bind the corresponding proxy. It's ok to declare it only now
+ // because it's a local variable that is allocated after the parameters
+ // have been allocated.
+ //
+ // Note: This is "almost" at temporary variable but we cannot use
+ // NewTemporary() because the mode needs to be INTERNAL since this
+ // variable may be allocated in the heap-allocated context (temporaries
+ // are never allocated in the context).
+ Variable* arguments_shadow =
+ new Variable(this, Factory::arguments_shadow_symbol(),
+ Variable::INTERNAL, true, false);
+ arguments_shadow_ =
+ new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
+ arguments_shadow_->BindTo(arguments_shadow);
+ temps_.Add(arguments_shadow);
+
+ // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
+ for (int i = 0; i < params_.length(); i++) {
+ Variable* var = params_[i];
+ ASSERT(var->scope() == this);
+ if (MustAllocate(var)) {
+ if (MustAllocateInContext(var)) {
+ // It is ok to set this only now, because arguments is a local
+ // variable that is allocated after the parameters have been
+ // allocated.
+ arguments_shadow->is_accessed_from_inner_scope_ = true;
+ }
+ var->rewrite_ =
+ new Property(arguments_shadow_,
+ new Literal(Handle<Object>(Smi::FromInt(i))),
+ kNoPosition);
+ arguments_shadow->var_uses()->RecordUses(var->var_uses());
+ }
+ }
+
+ } else {
+ // The arguments object is not used, so we can access parameters directly.
+ // The same parameter may occur multiple times in the parameters_ list.
+ // If it does, and if it is not copied into the context object, it must
+ // receive the highest parameter index for that parameter; thus iteration
+ // order is relevant!
+ for (int i = 0; i < params_.length(); i++) {
+ Variable* var = params_[i];
+ ASSERT(var->scope() == this);
+ if (MustAllocate(var)) {
+ if (MustAllocateInContext(var)) {
+ ASSERT(var->rewrite_ == NULL ||
+ (var->slot() != NULL && var->slot()->type() == Slot::CONTEXT));
+ if (var->rewrite_ == NULL) {
+ // Only set the heap allocation if the parameter has not
+ // been allocated yet.
+ AllocateHeapSlot(var);
+ }
+ } else {
+ ASSERT(var->rewrite_ == NULL ||
+ (var->slot() != NULL &&
+ var->slot()->type() == Slot::PARAMETER));
+ // Set the parameter index always, even if the parameter
+ // was seen before! (We need to access the actual parameter
+ // supplied for the last occurrence of a multiply declared
+ // parameter.)
+ var->rewrite_ = new Slot(var, Slot::PARAMETER, i);
+ }
+ }
+ }
+ }
+}
+
+
+void Scope::AllocateNonParameterLocal(Variable* var) {
+ ASSERT(var->scope() == this);
+ ASSERT(var->rewrite_ == NULL ||
+ (!var->IsVariable(Factory::result_symbol())) ||
+ (var->slot() == NULL || var->slot()->type() != Slot::LOCAL));
+ if (MustAllocate(var) && var->rewrite_ == NULL) {
+ if (MustAllocateInContext(var)) {
+ AllocateHeapSlot(var);
+ } else {
+ AllocateStackSlot(var);
+ }
+ }
+}
+
+
+DECLARE_bool(usage_computation);
+
+void Scope::AllocateNonParameterLocals() {
+ // Each variable occurs exactly once in the locals_ list; all
+ // variables that have no rewrite yet are non-parameter locals.
+
+ // Sort them according to use such that the locals with more uses
+ // get allocated first.
+ if (FLAG_usage_computation) {
+ // This is currently not implemented.
+ }
+
+ for (int i = 0; i < temps_.length(); i++) {
+ AllocateNonParameterLocal(temps_[i]);
+ }
+
+ for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) {
+ Variable* var = reinterpret_cast<Variable*>(p->value);
+ AllocateNonParameterLocal(var);
+ }
+
+ // Note: For now, function_ must be allocated at the very end. If
+ // it gets allocated in the context, it must be the last slot in the
+ // context, because of the current ScopeInfo implementation (see
+ // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
+ if (function_ != NULL) {
+ AllocateNonParameterLocal(function_);
+ }
+}
+
+
+void Scope::AllocateVariablesRecursively() {
+ // The number of slots required for variables.
+ num_stack_slots_ = 0;
+ num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+
+ // Allocate variables for inner scopes.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ inner_scopes_[i]->AllocateVariablesRecursively();
+ }
+
+ // Allocate variables for this scope.
+ // Parameters must be allocated first, if any.
+ if (is_function_scope()) AllocateParameterLocals();
+ AllocateNonParameterLocals();
+
+ // Allocate context if necessary.
+ bool must_have_local_context = false;
+ if (scope_calls_eval_ || scope_contains_with_) {
+ // The context for the eval() call or 'with' statement in this scope.
+ // Unless we are in the global or an eval scope, we need a local
+ // context even if we didn't statically allocate any locals in it,
+ // and the compiler will access the context variable. If we are
+ // not in an inner scope, the scope is provided from the outside.
+ must_have_local_context = is_function_scope();
+ }
+
+ // If we didn't allocate any locals in the local context, then we only
+ // need the minimal number of slots if we must have a local context.
+ if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
+ !must_have_local_context) {
+ num_heap_slots_ = 0;
+ }
+
+ // Allocation done.
+ ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCOPES_H_
+#define V8_SCOPES_H_
+
+#include "ast.h"
+#include "hashmap.h"
+
+namespace v8 { namespace internal {
+
+
+// A hash map to support fast local variable declaration and lookup.
+
+class LocalsMap: public HashMap {
+ public:
+ LocalsMap();
+
+ // Dummy constructor. This constructor doesn't set up the map
+ // properly so don't use it unless you have a good reason.
+ explicit LocalsMap(bool gotta_love_static_overloading);
+
+ virtual ~LocalsMap();
+
+ Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode,
+ bool is_valid_LHS, bool is_this);
+
+ Variable* Lookup(Handle<String> name);
+};
+
+
+// Global invariants after AST construction: Each reference (i.e. identifier)
+// to a JavaScript variable (including global properties) is represented by a
+// VariableProxy node. Immediately after AST construction and before variable
+// allocation, most VariableProxy nodes are "unresolved", i.e. not bound to a
+// corresponding variable (though some are bound during parse time). Variable
+// allocation binds each unresolved VariableProxy to one Variable and assigns
+// a location. Note that many VariableProxy nodes may refer to the same Java-
+// Script variable.
+
+class Scope: public ZoneObject {
+ public:
+ // ---------------------------------------------------------------------------
+ // Construction
+
+ enum Type {
+ EVAL_SCOPE, // the top-level scope for an 'eval' source
+ FUNCTION_SCOPE, // the top-level scope for a function
+ GLOBAL_SCOPE // the top-level scope for a program or a top-level eval
+ };
+
+ Scope();
+ Scope(Scope* outer_scope, Type type);
+
+ virtual ~Scope() { }
+
+ // The scope name is only used for printing/debugging.
+ void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
+
+ void Initialize(bool inside_with);
+
+
+ // ---------------------------------------------------------------------------
+ // Declarations
+
+ // Lookup a variable in this scope. Returns the variable or NULL if not found.
+ virtual Variable* Lookup(Handle<String> name);
+
+ // Declare the function variable for a function literal. This variable
+ // is in an intermediate scope between this function scope and the the
+ // outer scope. Only possible for function scopes; at most one variable.
+ Variable* DeclareFunctionVar(Handle<String> name);
+
+ // Declare a variable in this scope. If the variable has been
+ // declared before, the previously declared variable is returned.
+ virtual Variable* Declare(Handle<String> name, Variable::Mode mode);
+
+ // Add a parameter to the parameter list. The parameter must have been
+ // declared via Declare. The same parameter may occur more then once in
+ // the parameter list; they must be added in source order, from left to
+ // right.
+ void AddParameter(Variable* var);
+
+ // Create a new unresolved variable.
+ virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with);
+
+ // Remove a unresolved variable. During parsing, an unresolved variable
+ // may have been added optimistically, but then only the variable name
+ // was used (typically for labels). If the variable was not declared, the
+ // addition introduced a new unresolved variable which may end up being
+ // allocated globally as a "ghost" variable. RemoveUnresolved removes
+ // such a variable again if it was added; otherwise this is a no-op.
+ void RemoveUnresolved(VariableProxy* var);
+
+ // Creates a new temporary variable in this scope and binds a proxy to it.
+ // The name is only used for printing and cannot be used to find the variable.
+ // In particular, the only way to get hold of the temporary is by keeping the
+ // VariableProxy* around.
+ virtual VariableProxy* NewTemporary(Handle<String> name);
+
+ // Adds the specific declaration node to the list of declarations in
+ // this scope. The declarations are processed as part of entering
+ // the scope; see codegen.cc:ProcessDeclarations.
+ void AddDeclaration(Declaration* declaration);
+
+ // ---------------------------------------------------------------------------
+ // Illegal redeclaration support.
+
+ // Set an expression node that will be executed when the scope is
+ // entered. We only keep track of one illegal redeclaration node per
+ // scope - the first one - so if you try to set it multiple times
+ // the additional requests will be silently ignored.
+ void SetIllegalRedeclaration(Expression* expression);
+
+ // Visit the illegal redeclaration expression. Do not call if the
+ // scope doesn't have an illegal redeclaration node.
+ void VisitIllegalRedeclaration(Visitor* visitor);
+
+ // Check if the scope has (at least) one illegal redeclaration.
+ bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
+
+
+ // ---------------------------------------------------------------------------
+ // Scope-specific info.
+
+ // Inform the scope that the corresponding code contains a with statement.
+ void RecordWithStatement() { scope_contains_with_ = true; }
+
+ // Inform the scope that the corresponding code contains an eval call.
+ void RecordEvalCall() { scope_calls_eval_ = true; }
+
+
+ // ---------------------------------------------------------------------------
+ // Predicates.
+
+ // Specific scope types.
+ bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
+ bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
+ bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
+
+ // The scope immediately surrounding this scope, or NULL.
+ Scope* outer_scope() const { return outer_scope_; }
+
+
+ // ---------------------------------------------------------------------------
+ // Accessors.
+
+ // The variable corresponding to the (function) receiver.
+ VariableProxy* receiver() const { return receiver_; }
+
+ // The variable holding the function literal for named function
+ // literals, or NULL.
+ // Only valid for function scopes.
+ Variable* function() const {
+ ASSERT(is_function_scope());
+ return function_;
+ }
+
+ // Parameters. The left-most parameter has index 0.
+ // Only valid for function scopes.
+ Variable* parameter(int index) const {
+ ASSERT(is_function_scope());
+ return params_[index];
+ }
+
+ int num_parameters() const { return params_.length(); }
+
+ // The local variable 'arguments' if we need to allocate it; NULL otherwise.
+ // If arguments() exist, arguments_shadow() exists, too.
+ VariableProxy* arguments() const { return arguments_; }
+
+ // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
+ // If arguments_shadow() exist, arguments() exists, too.
+ VariableProxy* arguments_shadow() const { return arguments_shadow_; }
+
+ // Declarations list.
+ ZoneList<Declaration*>* declarations() { return &decls_; }
+
+
+
+ // ---------------------------------------------------------------------------
+ // Variable allocation.
+
+ // Collect all used locals in this scope.
+ template<class Allocator>
+ void CollectUsedVariables(List<Variable*, Allocator>* locals);
+
+ // Resolve and fill in the allocation information for all variables in
+ // this scopes. Must be called *after* all scopes have been processed
+ // (parsed) to ensure that unresolved variables can be resolved properly.
+ void AllocateVariables();
+
+ // Result of variable allocation.
+ int num_stack_slots() const { return num_stack_slots_; }
+ int num_heap_slots() const { return num_heap_slots_; }
+
+ // True if this scope supports calling eval (has a properly
+ // initialized context).
+ bool SupportsEval() const;
+
+ // Make sure this scope and all outer scopes are eagerly compiled.
+ void ForceEagerCompilation() { force_eager_compilation_ = true; }
+
+ // Determine if we can use lazy compilation for this scope.
+ bool AllowsLazyCompilation() const;
+
+ // True if the outer context of this scope is always the global context.
+ bool HasTrivialOuterContext() const;
+
+ // The number of contexts between this and scope; zero if this == scope.
+ int ContextChainLength(Scope* scope);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging.
+
+#ifdef DEBUG
+ void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
+#endif
+
+ // ---------------------------------------------------------------------------
+ // Implementation.
+ protected:
+ friend class ParserFactory;
+
+ // Scope tree.
+ Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
+ ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
+
+ // The scope type.
+ Type type_;
+
+ // Debugging support.
+ Handle<String> scope_name_;
+
+ // The variables declared in this scope:
+ // all user-declared variables (incl. parameters)
+ LocalsMap locals_;
+ // compiler-allocated (user-invisible) temporaries
+ ZoneList<Variable*> temps_;
+ // parameter list in source order
+ ZoneList<Variable*> params_;
+ // variables that must be looked up dynamically
+ ZoneList<Variable*> nonlocals_;
+ // unresolved variables referred to from this scope
+ ZoneList<VariableProxy*> unresolved_;
+ // declarations
+ ZoneList<Declaration*> decls_;
+ // convenience variable
+ VariableProxy* receiver_;
+ // function variable, if any; function scopes only
+ Variable* function_;
+ // convenience variable; function scopes only
+ VariableProxy* arguments_;
+ // convenience variable; function scopes only
+ VariableProxy* arguments_shadow_;
+
+ // Illegal redeclaration.
+ Expression* illegal_redecl_;
+
+ // Scope-specific information.
+ bool scope_inside_with_; // this scope is inside a 'with' of some outer scope
+ bool scope_contains_with_; // this scope contains a 'with' statement
+ bool scope_calls_eval_; // this scope contains an 'eval' call
+
+ // Computed via PropagateScopeInfo.
+ bool outer_scope_calls_eval_;
+ bool inner_scope_calls_eval_;
+ bool force_eager_compilation_;
+
+ // Computed via AllocateVariables; function scopes only.
+ int num_stack_slots_;
+ int num_heap_slots_;
+
+ // Create a non-local variable with a given name.
+ // These variables are looked up dynamically at runtime.
+ Variable* NonLocal(Handle<String> name);
+
+ // Variable resolution.
+ Variable* LookupRecursive(Handle<String> name, bool inner_lookup);
+ void ResolveVariable(Scope* global_scope, VariableProxy* proxy);
+ void ResolveVariablesRecursively(Scope* global_scope);
+
+ // Scope analysis.
+ bool PropagateScopeInfo(bool outer_scope_calls_eval);
+ bool HasTrivialContext() const;
+
+ // Predicates.
+ bool MustAllocate(Variable* var);
+ bool MustAllocateInContext(Variable* var);
+ bool HasArgumentsParameter();
+
+ // Variable allocation.
+ void AllocateStackSlot(Variable* var);
+ void AllocateHeapSlot(Variable* var);
+ void AllocateParameterLocals();
+ void AllocateNonParameterLocal(Variable* var);
+ void AllocateNonParameterLocals();
+ void AllocateVariablesRecursively();
+};
+
+
+class DummyScope : public Scope {
+ public:
+ DummyScope() {
+ outer_scope_ = this;
+ }
+
+ virtual Variable* Lookup(Handle<String> name) { return NULL; }
+ virtual Variable* Declare(Handle<String> name, Variable::Mode mode) {
+ return NULL;
+ }
+ virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
+ return NULL;
+ }
+ virtual VariableProxy* NewTemporary(Handle<String> name) { return NULL; }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_SCOPES_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "ic-inl.h"
+#include "natives.h"
+#include "platform.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "v8threads.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DEFINE_bool(debug_serialization, false,
+ "write debug information into the snapshot.");
+#endif
+
+
+// Encoding: a RelativeAddress must be able to fit in a pointer:
+// it is encoded as an Address with (from MS to LS bits):
+// 27 bits identifying a word in the space, in one of three formats:
+// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
+// - NEW space: 27 bits of word offset
+// - LO space: 27 bits of page number
+// 3 bits to encode the AllocationSpace
+// 2 bits identifying this as a HeapObject
+
+const int kSpaceShift = kHeapObjectTagSize;
+const int kSpaceBits = kSpaceTagSize;
+const int kSpaceMask = kSpaceTagMask;
+
+const int kOffsetShift = kSpaceShift + kSpaceBits;
+const int kOffsetBits = 11;
+const int kOffsetMask = (1 << kOffsetBits) - 1;
+
+const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
+const int kPageShift = kOffsetShift + kOffsetBits;
+const int kPageMask = (1 << kPageBits) - 1;
+
+const int kPageAndOffsetShift = kOffsetShift;
+const int kPageAndOffsetBits = kPageBits + kOffsetBits;
+const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
+
+
+static inline AllocationSpace Space(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ return static_cast<AllocationSpace>((encoded >> kSpaceShift) & kSpaceMask);
+}
+
+
+static inline int PageIndex(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ return (encoded >> kPageShift) & kPageMask;
+}
+
+
+static inline int PageOffset(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ return ((encoded >> kOffsetShift) & kOffsetMask) << kObjectAlignmentBits;
+}
+
+
+static inline int NewSpaceOffset(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ return ((encoded >> kPageAndOffsetShift) & kPageAndOffsetMask) <<
+ kObjectAlignmentBits;
+}
+
+
+static inline int LargeObjectIndex(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ return (encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
+}
+
+
+// A RelativeAddress encodes a heap address that is independent of
+// the actual memory addresses in real heap. The general case (for the
+// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
+// triple. The NEW space has page number == 0, because there are no
+// pages. The LARGE_OBJECT space has page offset = 0, since there is
+// exactly one object per page. RelativeAddresses are encodable as
+// Addresses, so that they can replace the map() pointers of
+// HeapObjects. The encoded Addresses are also encoded as HeapObjects
+// and allow for marking (is_marked() see mark(), clear_mark()...) as
+// used by the Mark-Compact collector.
+
+class RelativeAddress {
+ public:
+ RelativeAddress(AllocationSpace space, int page_index, int page_offset)
+ : space_(space), page_index_(page_index), page_offset_(page_offset) {}
+
+ // Return the encoding of 'this' as an Address. Decode with constructor.
+ Address Encode() const;
+
+ AllocationSpace space() const { return space_; }
+ int page_index() const { return page_index_; }
+ int page_offset() const { return page_offset_; }
+
+ bool in_paged_space() const {
+ return space_ == CODE_SPACE || space_ == OLD_SPACE || space_ == MAP_SPACE;
+ }
+
+ void next_address(int offset) { page_offset_ += offset; }
+ void next_page(int init_offset = 0) {
+ page_index_++;
+ page_offset_ = init_offset;
+ }
+
+#ifdef DEBUG
+ void Verify();
+#endif
+
+ private:
+ AllocationSpace space_;
+ int page_index_;
+ int page_offset_;
+};
+
+
+Address RelativeAddress::Encode() const {
+ ASSERT(page_index_ >= 0);
+ int word_offset = 0;
+ int result = 0;
+ switch (space_) {
+ case MAP_SPACE:
+ case OLD_SPACE:
+ case CODE_SPACE:
+ ASSERT_EQ(0, page_index_ & ~kPageMask);
+ word_offset = page_offset_ >> kObjectAlignmentBits;
+ ASSERT_EQ(0, word_offset & ~kOffsetMask);
+ result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
+ break;
+ case NEW_SPACE:
+ ASSERT_EQ(0, page_index_);
+ word_offset = page_offset_ >> kObjectAlignmentBits;
+ ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
+ result = word_offset << kPageAndOffsetShift;
+ break;
+ case LO_SPACE:
+ ASSERT_EQ(0, page_offset_);
+ ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
+ result = page_index_ << kPageAndOffsetShift;
+ break;
+ }
+ // OR in AllocationSpace and kHeapObjectTag
+ ASSERT_EQ(0, space_ & ~kSpaceMask);
+ result |= (space_ << kSpaceShift) | kHeapObjectTag;
+ return reinterpret_cast<Address>(result);
+}
+
+
+#ifdef DEBUG
+void RelativeAddress::Verify() {
+ ASSERT(page_offset_ >= 0 && page_index_ >= 0);
+ switch (space_) {
+ case MAP_SPACE:
+ case OLD_SPACE:
+ case CODE_SPACE:
+ ASSERT(Page::kObjectStartOffset <= page_offset_ &&
+ page_offset_ <= Page::kPageSize);
+ break;
+ case NEW_SPACE:
+ ASSERT(page_index_ == 0);
+ break;
+ case LO_SPACE:
+ ASSERT(page_offset_ == 0);
+ break;
+ }
+}
+#endif
+
+// A SimulatedHeapSpace simulates the allocation of objects in a page in
+// the heap. It uses linear allocation - that is, it doesn't simulate the
+// use of a free list. This simulated
+// allocation must exactly match that done by Heap.
+
+class SimulatedHeapSpace {
+ public:
+ // The default constructor initializes to an invalid state.
+ SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
+
+ // Sets 'this' to the first address in 'space' that would be
+ // returned by allocation in an empty heap.
+ void InitEmptyHeap(AllocationSpace space);
+
+ // Sets 'this' to the next address in 'space' that would be returned
+ // by allocation in the current heap. Intended only for testing
+ // serialization and deserialization in the current address space.
+ void InitCurrentHeap(AllocationSpace space);
+
+ // Returns the RelativeAddress where the next
+ // object of 'size' bytes will be allocated, and updates 'this' to
+ // point to the next free address beyond that object.
+ RelativeAddress Allocate(int size);
+
+ private:
+ RelativeAddress current_;
+};
+
+
+void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
+ switch (space) {
+ case MAP_SPACE:
+ case OLD_SPACE:
+ case CODE_SPACE:
+ current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
+ break;
+ case NEW_SPACE:
+ case LO_SPACE:
+ current_ = RelativeAddress(space, 0, 0);
+ break;
+ }
+}
+
+
+void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
+ switch (space) {
+ case MAP_SPACE:
+ case OLD_SPACE:
+ case CODE_SPACE: {
+ PagedSpace* ps;
+ if (space == MAP_SPACE) {
+ ps = Heap::map_space();
+ } else if (space == OLD_SPACE) {
+ ps = Heap::old_space();
+ } else {
+ ASSERT(space == CODE_SPACE);
+ ps = Heap::code_space();
+ }
+ Address top = ps->top();
+ Page* top_page = Page::FromAllocationTop(top);
+ int page_index = 0;
+ PageIterator it(ps, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ if (it.next() == top_page) break;
+ page_index++;
+ }
+ current_ = RelativeAddress(space, page_index, top_page->Offset(top));
+ break;
+ }
+ case NEW_SPACE:
+ current_ =
+ RelativeAddress(space, 0, Heap::NewSpaceTop() - Heap::NewSpaceStart());
+ break;
+ case LO_SPACE:
+ int page_index = 0;
+ for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
+ page_index++;
+ }
+ current_ = RelativeAddress(space, page_index, 0);
+ break;
+ }
+}
+
+
+RelativeAddress SimulatedHeapSpace::Allocate(int size) {
+#ifdef DEBUG
+ current_.Verify();
+#endif
+ int alloc_size = OBJECT_SIZE_ALIGN(size);
+ if (current_.in_paged_space() &&
+ current_.page_offset() + alloc_size > Page::kPageSize) {
+ ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
+ current_.next_page(Page::kObjectStartOffset);
+ }
+ RelativeAddress result = current_;
+ if (current_.space() == LO_SPACE) {
+ current_.next_page();
+ } else {
+ current_.next_address(alloc_size);
+ }
+#ifdef DEBUG
+ current_.Verify();
+ result.Verify();
+#endif
+ return result;
+}
+
+// -----------------------------------------------------------------------------
+// Coding of external references.
+
+// The encoding of an external reference. The type is in the high word.
+// The id is in the low word.
+static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
+ return static_cast<uint32_t>(type) << 16 | id;
+}
+
+
+static int* GetInternalPointer(StatsCounter* counter) {
+ // All counters refer to dummy_counter, if deserializing happens without
+ // setting up counters.
+ static int dummy_counter = 0;
+ return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
+}
+
+
+// ExternalReferenceTable is a helper class that defines the relationship
+// between external references and their encodings. It is used to build
+// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
+class ExternalReferenceTable {
+ public:
+ static ExternalReferenceTable* instance() {
+ if (!instance_) instance_ = new ExternalReferenceTable();
+ return instance_;
+ }
+
+ int size() const { return refs_.length(); }
+
+ Address address(int i) { return refs_[i].address; }
+
+ uint32_t code(int i) { return refs_[i].code; }
+
+ const char* name(int i) { return refs_[i].name; }
+
+ int max_id(int code) { return max_id_[code]; }
+
+ private:
+ static ExternalReferenceTable* instance_;
+
+ ExternalReferenceTable();
+
+ struct ExternalReferenceEntry {
+ Address address;
+ uint32_t code;
+ const char* name;
+ };
+
+ void Add(Address address, TypeCode type, uint16_t id, const char* name) {
+ CHECK_NE(NULL, address);
+ ExternalReferenceEntry entry;
+ entry.address = address;
+ entry.code = EncodeExternal(type, id);
+ entry.name = name;
+ CHECK_NE(0, entry.code);
+ refs_.Add(entry);
+ if (id > max_id_[type]) max_id_[type] = id;
+ }
+
+ List<ExternalReferenceEntry> refs_;
+ int max_id_[kTypeCodeCount];
+};
+
+
+ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
+
+
+ExternalReferenceTable::ExternalReferenceTable() : refs_(64) {
+ for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
+ max_id_[type_code] = 0;
+ }
+
+ // Define all entries in the table.
+
+ // Builtins
+#define DEF_ENTRY_C(name, ignore) \
+ Add(Builtins::c_function_address(Builtins::c_##name), \
+ C_BUILTIN, \
+ Builtins::c_##name, \
+ "Builtins::" #name);
+
+ BUILTIN_LIST_C(DEF_ENTRY_C)
+#undef DEF_ENTRY_C
+
+#define DEF_ENTRY_C(name, ignore) \
+ Add(Builtins::builtin_address(Builtins::name), \
+ BUILTIN, \
+ Builtins::name, \
+ "Builtins::" #name);
+#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name, _)
+
+ BUILTIN_LIST_C(DEF_ENTRY_C)
+ BUILTIN_LIST_A(DEF_ENTRY_A)
+#undef DEF_ENTRY_C
+#undef DEF_ENTRY_A
+
+ // Runtime functions
+#define RUNTIME_ENTRY(name, nargs) \
+ Add(Runtime::FunctionForId(Runtime::k##name)->entry, \
+ RUNTIME_FUNCTION, \
+ Runtime::k##name, \
+ "Runtime::" #name);
+
+ RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
+#undef RUNTIME_ENTRY
+
+ // IC utilities
+#define IC_ENTRY(name) \
+ Add(IC::AddressFromUtilityId(IC::k##name), \
+ IC_UTILITY, \
+ IC::k##name, \
+ "IC::" #name);
+
+ IC_UTIL_LIST(IC_ENTRY)
+#undef IC_ENTRY
+
+ // Debug addresses
+ Add(Debug_Address(Debug::k_after_break_target_address).address(),
+ DEBUG_ADDRESS,
+ Debug::k_after_break_target_address << kDebugIdShift,
+ "Debug::after_break_target_address()");
+ Add(Debug_Address(Debug::k_debug_break_return_address).address(),
+ DEBUG_ADDRESS,
+ Debug::k_debug_break_return_address << kDebugIdShift,
+ "Debug::debug_break_return_address()");
+ const char* debug_register_format = "Debug::register_address(%i)";
+ size_t dr_format_length = strlen(debug_register_format);
+ for (int i = 0; i < kNumJSCallerSaved; ++i) {
+ char* name = NewArray<char>(dr_format_length + 1);
+ OS::SNPrintF(name, dr_format_length, debug_register_format, i);
+ Add(Debug_Address(Debug::k_register_address, i).address(),
+ DEBUG_ADDRESS,
+ Debug::k_register_address << kDebugIdShift | i,
+ name);
+ }
+
+ // Stat counters
+#define COUNTER_ENTRY(name, caption) \
+ Add(reinterpret_cast<Address>(GetInternalPointer(&Counters::name)), \
+ STATS_COUNTER, \
+ Counters::k_##name, \
+ "Counters::" #name);
+
+ STATS_COUNTER_LIST_1(COUNTER_ENTRY)
+ STATS_COUNTER_LIST_2(COUNTER_ENTRY)
+#undef COUNTER_ENTRY
+
+ // Top addresses
+ const char* top_address_format = "Top::get_address_from_id(%i)";
+ size_t top_format_length = strlen(top_address_format);
+ for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
+ char* name = NewArray<char>(top_format_length + 1);
+ OS::SNPrintF(name, top_format_length, top_address_format, i);
+ Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, name);
+ }
+
+ // Extensions
+ Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1,
+ "GCExtension::GC");
+ Add(FUNCTION_ADDR(PrintExtension::Print), EXTENSION, 2,
+ "PrintExtension::Print");
+ Add(FUNCTION_ADDR(LoadExtension::Load), EXTENSION, 3,
+ "LoadExtension::Load");
+ Add(FUNCTION_ADDR(QuitExtension::Quit), EXTENSION, 4,
+ "QuitExtension::Quit");
+
+ // Accessors
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+ Add((Address)&Accessors::name, \
+ ACCESSOR, \
+ Accessors::k##name, \
+ "Accessors::" #name);
+
+ ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+
+ // Stub cache tables
+ Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
+ STUB_CACHE_TABLE,
+ 1,
+ "StubCache::primary_->key");
+ Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
+ STUB_CACHE_TABLE,
+ 2,
+ "StubCache::primary_->value");
+ Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
+ STUB_CACHE_TABLE,
+ 3,
+ "StubCache::secondary_->key");
+ Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
+ STUB_CACHE_TABLE,
+ 4,
+ "StubCache::secondary_->value");
+
+ // Runtime entries
+ Add(FUNCTION_ADDR(Runtime::PerformGC),
+ RUNTIME_ENTRY,
+ 1,
+ "Runtime::PerformGC");
+ Add(FUNCTION_ADDR(StackFrameIterator::RestoreCalleeSavedForTopHandler),
+ RUNTIME_ENTRY,
+ 2,
+ "StackFrameIterator::RestoreCalleeSavedForTopHandler");
+
+ // Miscellaneous
+ Add(ExternalReference::builtin_passed_function().address(),
+ UNCLASSIFIED,
+ 1,
+ "Builtins::builtin_passed_function");
+ Add(ExternalReference::the_hole_value_location().address(),
+ UNCLASSIFIED,
+ 2,
+ "Factory::the_hole_value().location()");
+ Add(ExternalReference::address_of_stack_guard_limit().address(),
+ UNCLASSIFIED,
+ 3,
+ "StackGuard::address_of_limit()");
+ Add(ExternalReference::debug_break().address(),
+ UNCLASSIFIED,
+ 4,
+ "Debug::Break()");
+ Add(ExternalReference::new_space_start().address(),
+ UNCLASSIFIED,
+ 5,
+ "Heap::NewSpaceStart()");
+ Add(ExternalReference::new_space_allocation_limit_address().address(),
+ UNCLASSIFIED,
+ 6,
+ "Heap::NewSpaceAllocationLimitAddress()");
+ Add(ExternalReference::new_space_allocation_top_address().address(),
+ UNCLASSIFIED,
+ 7,
+ "Heap::NewSpaceAllocationTopAddress()");
+ Add(ExternalReference::debug_step_in_fp_address().address(),
+ UNCLASSIFIED,
+ 8,
+ "Debug::step_in_fp_addr()");
+}
+
+
+ExternalReferenceEncoder::ExternalReferenceEncoder()
+ : encodings_(Match) {
+ ExternalReferenceTable* external_references =
+ ExternalReferenceTable::instance();
+ for (int i = 0; i < external_references->size(); ++i) {
+ Put(external_references->address(i), i);
+ }
+}
+
+
+uint32_t ExternalReferenceEncoder::Encode(Address key) const {
+ int index = IndexOf(key);
+ return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
+}
+
+
+const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
+ int index = IndexOf(key);
+ return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
+}
+
+
+int ExternalReferenceEncoder::IndexOf(Address key) const {
+ if (key == NULL) return -1;
+ HashMap::Entry* entry =
+ const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
+ return entry == NULL ? -1 : reinterpret_cast<int>(entry->value);
+}
+
+
+void ExternalReferenceEncoder::Put(Address key, int index) {
+ HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
+ entry->value = reinterpret_cast<void *>(index);
+}
+
+
+ExternalReferenceDecoder::ExternalReferenceDecoder()
+ : encodings_(NewArray<Address*>(kTypeCodeCount)) {
+ ExternalReferenceTable* external_references =
+ ExternalReferenceTable::instance();
+ for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
+ int max = external_references->max_id(type) + 1;
+ encodings_[type] = NewArray<Address>(max + 1);
+ }
+ for (int i = 0; i < external_references->size(); ++i) {
+ Put(external_references->code(i), external_references->address(i));
+ }
+}
+
+
+ExternalReferenceDecoder::~ExternalReferenceDecoder() {
+ for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
+ DeleteArray(encodings_[type]);
+ }
+ DeleteArray(encodings_);
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of Serializer
+
+
+// Helper class to write the bytes of the serialized heap.
+
+class SnapshotWriter {
+ public:
+ SnapshotWriter() {
+ len_ = 0;
+ max_ = 8 << 10; // 8K initial size
+ str_ = NewArray<char>(max_);
+ }
+
+ ~SnapshotWriter() {
+ DeleteArray(str_);
+ }
+
+ void GetString(char** str, int* len) {
+ *str = NewArray<char>(len_);
+ memcpy(*str, str_, len_);
+ *len = len_;
+ }
+
+ void Reserve(int bytes, int pos);
+
+ void PutC(char c) {
+ InsertC(c, len_);
+ }
+
+ void PutInt(int i) {
+ InsertInt(i, len_);
+ }
+
+ void PutBytes(const byte* a, int size) {
+ InsertBytes(a, len_, size);
+ }
+
+ void PutString(const char* s) {
+ InsertString(s, len_);
+ }
+
+ int InsertC(char c, int pos) {
+ Reserve(1, pos);
+ str_[pos] = c;
+ len_++;
+ return pos + 1;
+ }
+
+ int InsertInt(int i, int pos) {
+ return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
+ }
+
+ int InsertBytes(const byte* a, int pos, int size) {
+ Reserve(size, pos);
+ memcpy(&str_[pos], a, size);
+ len_ += size;
+ return pos + size;
+ }
+
+ int InsertString(const char* s, int pos);
+
+ int length() { return len_; }
+
+ private:
+ char* str_; // the snapshot
+ int len_; // the curent length of str_
+ int max_; // the allocated size of str_
+};
+
+
+void SnapshotWriter::Reserve(int bytes, int pos) {
+ CHECK(0 <= pos && pos <= len_);
+ while (len_ + bytes >= max_) {
+ max_ *= 2;
+ char* old = str_;
+ str_ = NewArray<char>(max_);
+ memcpy(str_, old, len_);
+ DeleteArray(old);
+ }
+ if (pos < len_) {
+ char* old = str_;
+ str_ = NewArray<char>(max_);
+ memcpy(str_, old, pos);
+ memcpy(str_ + pos + bytes, old + pos, len_ - pos);
+ DeleteArray(old);
+ }
+}
+
+int SnapshotWriter::InsertString(const char* s, int pos) {
+ int size = strlen(s);
+ pos = InsertC('[', pos);
+ pos = InsertInt(size, pos);
+ pos = InsertC(']', pos);
+ return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
+}
+
+
+Serializer::Serializer()
+ : global_handles_(4) {
+ root_ = true;
+ roots_ = 0;
+ objects_ = 0;
+ reference_encoder_ = NULL;
+ writer_ = new SnapshotWriter();
+ for (int i = 0; i <= LAST_SPACE; i++) {
+ allocator_[i] = new SimulatedHeapSpace();
+ }
+}
+
+
+Serializer::~Serializer() {
+ for (int i = 0; i <= LAST_SPACE; i++) {
+ delete allocator_[i];
+ }
+ if (reference_encoder_) delete reference_encoder_;
+ delete writer_;
+}
+
+
+bool Serializer::serialization_enabled_ = true;
+
+
+#ifdef DEBUG
+static const int kMaxTagLength = 32;
+
+void Serializer::Synchronize(const char* tag) {
+ if (FLAG_debug_serialization) {
+ int length = strlen(tag);
+ ASSERT(length <= kMaxTagLength);
+ writer_->PutC('S');
+ writer_->PutInt(length);
+ writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
+ }
+}
+#endif
+
+
+void Serializer::InitializeAllocators() {
+ for (int i = 0; i <= LAST_SPACE; i++) {
+ allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
+ }
+}
+
+
+void Serializer::Serialize() {
+ // No active threads.
+ CHECK_EQ(NULL, ThreadState::FirstInUse());
+ // No active or weak handles.
+ CHECK(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
+ // We need a counter function during serialization to resolve the
+ // references to counters in the code on the heap.
+ CHECK(StatsTable::HasCounterFunction());
+ CHECK(enabled());
+ InitializeAllocators();
+ reference_encoder_ = new ExternalReferenceEncoder();
+ PutHeader();
+ Heap::IterateRoots(this);
+ PutLog();
+ PutContextStack();
+ disable();
+}
+
+
+void Serializer::Finalize(char** str, int* len) {
+ writer_->GetString(str, len);
+}
+
+
+// Serialize roots by writing them into the stream. Serialize pointers
+// in HeapObjects by changing them to the encoded address where the
+// object will be allocated on deserialization
+
+void Serializer::VisitPointers(Object** start, Object** end) {
+ bool root = root_;
+ root_ = false;
+ for (Object** p = start; p < end; ++p) {
+ bool serialized;
+ if (root) {
+ roots_++;
+ Address a = Encode(*p, &serialized);
+ // If the object was not just serialized,
+ // write its encoded address instead.
+ if (!serialized) PutEncodedAddress(a);
+ } else {
+ // Rewrite the pointer in the HeapObject.
+ *p = reinterpret_cast<Object*>(Encode(*p, &serialized));
+ }
+ }
+ root_ = root;
+}
+
+
+void Serializer::VisitExternalReferences(Address* start, Address* end) {
+ for (Address* p = start; p < end; ++p) {
+ uint32_t code = reference_encoder_->Encode(*p);
+ CHECK(*p == NULL ? code == 0 : code != 0);
+ *p = reinterpret_cast<Address>(code);
+ }
+}
+
+
+void Serializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+ Address target = rinfo->target_address();
+ uint32_t encoding = reference_encoder_->Encode(target);
+ CHECK(target == NULL ? encoding == 0 : encoding != 0);
+ uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->pc());
+ *pc = encoding;
+}
+
+
+class GlobalHandlesRetriever: public ObjectVisitor {
+ public:
+ explicit GlobalHandlesRetriever(List<Object**>* handles)
+ : global_handles_(handles) {}
+
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (; start != end; ++start) {
+ global_handles_->Add(start);
+ }
+ }
+
+ private:
+ List<Object**>* global_handles_;
+};
+
+
+void Serializer::PutFlags() {
+ writer_->PutC('F');
+ List<char*>* argv = FlagList::argv();
+ writer_->PutInt(argv->length());
+ writer_->PutC('[');
+ for (int i = 0; i < argv->length(); i++) {
+ if (i > 0) writer_->PutC('|');
+ writer_->PutString((*argv)[i]);
+ DeleteArray((*argv)[i]);
+ }
+ writer_->PutC(']');
+ flags_end_ = writer_->length();
+ delete argv;
+}
+
+
+void Serializer::PutHeader() {
+ PutFlags();
+ writer_->PutC('D');
+#ifdef DEBUG
+ writer_->PutC(FLAG_debug_serialization ? '1' : '0');
+#else
+ writer_->PutC('0');
+#endif
+ // Write sizes of paged memory spaces.
+ writer_->PutC('S');
+ writer_->PutC('[');
+ writer_->PutInt(Heap::old_space()->Size());
+ writer_->PutC('|');
+ writer_->PutInt(Heap::code_space()->Size());
+ writer_->PutC('|');
+ writer_->PutInt(Heap::map_space()->Size());
+ writer_->PutC(']');
+ // Write global handles.
+ writer_->PutC('G');
+ writer_->PutC('[');
+ GlobalHandlesRetriever ghr(&global_handles_);
+ GlobalHandles::IterateRoots(&ghr);
+ for (int i = 0; i < global_handles_.length(); i++) {
+ writer_->PutC('N');
+ }
+ writer_->PutC(']');
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ DECLARE_bool(log_code);
+ DECLARE_string(logfile);
+#endif
+
+
+void Serializer::PutLog() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (FLAG_log_code) {
+ Logger::TearDown();
+ int pos = writer_->InsertC('L', flags_end_);
+ bool exists;
+ Vector<const char> log = ReadFile(FLAG_logfile, &exists);
+ writer_->InsertString(log.start(), pos);
+ log.Dispose();
+ }
+#endif
+}
+
+
+static int IndexOf(const List<Object**>& list, Object** element) {
+ for (int i = 0; i < list.length(); i++) {
+ if (list[i] == element) return i;
+ }
+ return -1;
+}
+
+
+void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
+ writer_->PutC('[');
+ writer_->PutInt(stack.length());
+ for (int i = stack.length() - 1; i >= 0; i--) {
+ writer_->PutC('|');
+ int gh_index = IndexOf(global_handles_, stack[i].location());
+ CHECK_GE(gh_index, 0);
+ writer_->PutInt(gh_index);
+ }
+ writer_->PutC(']');
+}
+
+
+void Serializer::PutContextStack() {
+ List<Handle<Object> > entered_contexts(2);
+ while (HandleScopeImplementer::instance()->HasEnteredContexts()) {
+ Handle<Object> context =
+ HandleScopeImplementer::instance()->RemoveLastEnteredContext();
+ entered_contexts.Add(context);
+ }
+ PutGlobalHandleStack(entered_contexts);
+ List<Handle<Object> > security_contexts(2);
+ while (HandleScopeImplementer::instance()->HasSecurityContexts()) {
+ Handle<Object> context =
+ HandleScopeImplementer::instance()->RemoveLastSecurityContext();
+ security_contexts.Add(context);
+ }
+ PutGlobalHandleStack(security_contexts);
+}
+
+
+void Serializer::PutEncodedAddress(Address addr) {
+ writer_->PutC('P');
+ writer_->PutInt(reinterpret_cast<int>(addr));
+}
+
+
+Address Serializer::Encode(Object* o, bool* serialized) {
+ *serialized = false;
+ if (o->IsSmi()) {
+ return reinterpret_cast<Address>(o);
+ } else {
+ HeapObject* obj = HeapObject::cast(o);
+ if (is_marked(obj)) {
+ // Already serialized: encoded address is in map.
+ intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
+ return reinterpret_cast<Address>(clear_mark_bit(map_word));
+ } else {
+ // First visit: serialize the object.
+ *serialized = true;
+ return PutObject(obj);
+ }
+ }
+}
+
+
+Address Serializer::PutObject(HeapObject* obj) {
+ Map* map = obj->map();
+ InstanceType type = map->instance_type();
+ int size = obj->SizeFromMap(map);
+
+ // Simulate the allocation of obj to predict where it will be
+ // allocated during deserialization.
+ Address addr = Allocate(obj).Encode();
+
+ if (type == CODE_TYPE) {
+ Code* code = Code::cast(obj);
+ // Ensure Code objects contain Object pointers, not Addresses.
+ code->ConvertICTargetsFromAddressToObject();
+ LOG(CodeMoveEvent(code->address(), addr));
+ }
+
+ // Put the encoded address in the map() of the object, and mark the
+ // object. Do this to break recursion before visiting any pointers
+ // in the object.
+ obj->set_map(reinterpret_cast<Map*>(addr));
+ set_mark(obj);
+
+ // Write out the object prologue: type, size, and simulated address of obj.
+ writer_->PutC('[');
+ CHECK_EQ(0, size & kObjectAlignmentMask);
+ writer_->PutInt(type);
+ writer_->PutInt(size >> kObjectAlignmentBits);
+ PutEncodedAddress(addr); // encodes AllocationSpace
+
+ // Get the map's encoded address, possibly serializing it on first
+ // visit. Note that we do not update obj->map(), since
+ // it already contains the forwarding address of 'obj'.
+ bool serialized;
+ Address map_addr = Encode(map, &serialized);
+
+ // Visit all the pointers in the object other than the map. This
+ // will rewrite these pointers in place in the body of the object
+ // with their encoded RelativeAddresses, and recursively serialize
+ // any as-yet-unvisited objects.
+ obj->IterateBody(type, size, this);
+
+ // Mark end of recursively embedded objects, start of object body.
+ writer_->PutC('|');
+ // Write out the encoded address for the map.
+ PutEncodedAddress(map_addr);
+
+ // Write out the raw contents of the object following the map
+ // pointer containing the now-updated pointers. No compression, but
+ // fast to deserialize.
+ writer_->PutBytes(obj->address() + HeapObject::kSize,
+ size - HeapObject::kSize);
+
+#ifdef DEBUG
+ if (FLAG_debug_serialization) {
+ // Write out the object epilogue to catch synchronization errors.
+ PutEncodedAddress(addr);
+ writer_->PutC(']');
+ }
+#endif
+
+ objects_++;
+ return addr;
+}
+
+
+RelativeAddress Serializer::Allocate(HeapObject* obj) {
+ // Find out which AllocationSpace 'obj' is in.
+ AllocationSpace s;
+ bool found = false;
+ for (int i = 0; !found && i <= LAST_SPACE; i++) {
+ s = static_cast<AllocationSpace>(i);
+ found = Heap::InSpace(obj, s);
+ }
+ CHECK(found);
+ int size = obj->Size();
+ return allocator_[s]->Allocate(size);
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of Deserializer
+
+
+static const int kInitArraySize = 32;
+
+
+Deserializer::Deserializer(char* str, int len)
+ : reader_(str, len),
+ map_pages_(kInitArraySize), old_pages_(kInitArraySize),
+ code_pages_(kInitArraySize), large_objects_(kInitArraySize),
+ global_handles_(4) {
+ root_ = true;
+ roots_ = 0;
+ objects_ = 0;
+ reference_decoder_ = NULL;
+#ifdef DEBUG
+ expect_debug_information_ = false;
+#endif
+}
+
+
+Deserializer::~Deserializer() {
+ if (reference_decoder_) delete reference_decoder_;
+}
+
+
+void Deserializer::ExpectEncodedAddress(Address expected) {
+ Address a = GetEncodedAddress();
+ USE(a);
+ ASSERT(a == expected);
+}
+
+
+#ifdef DEBUG
+void Deserializer::Synchronize(const char* tag) {
+ if (expect_debug_information_) {
+ char buf[kMaxTagLength];
+ reader_.ExpectC('S');
+ int length = reader_.GetInt();
+ ASSERT(length <= kMaxTagLength);
+ reader_.GetBytes(reinterpret_cast<Address>(buf), length);
+ ASSERT_EQ(strlen(tag), length);
+ ASSERT(strncmp(tag, buf, length) == 0);
+ }
+}
+#endif
+
+
+void Deserializer::Deserialize() {
+ // No active threads.
+ ASSERT_EQ(NULL, ThreadState::FirstInUse());
+ // No active handles.
+ ASSERT(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ reference_decoder_ = new ExternalReferenceDecoder();
+ // By setting linear allocation only, we forbid the use of free list
+ // allocation which is not predicted by SimulatedAddress.
+ Heap::SetLinearAllocationOnly(true);
+ GetHeader();
+ Heap::IterateRoots(this);
+ GetContextStack();
+ Heap::SetLinearAllocationOnly(false);
+ Heap::RebuildRSets();
+}
+
+
+void Deserializer::VisitPointers(Object** start, Object** end) {
+ bool root = root_;
+ root_ = false;
+ for (Object** p = start; p < end; ++p) {
+ if (root) {
+ roots_++;
+ // Read the next object or pointer from the stream
+ // pointer in the stream.
+ int c = reader_.GetC();
+ if (c == '[') {
+ *p = GetObject(); // embedded object
+ } else {
+ ASSERT(c == 'P'); // pointer to previously serialized object
+ *p = Resolve(reinterpret_cast<Address>(reader_.GetInt()));
+ }
+ } else {
+ // A pointer internal to a HeapObject that we've already
+ // read: resolve it to a true address (or Smi)
+ *p = Resolve(reinterpret_cast<Address>(*p));
+ }
+ }
+ root_ = root;
+}
+
+
+void Deserializer::VisitExternalReferences(Address* start, Address* end) {
+ for (Address* p = start; p < end; ++p) {
+ uint32_t code = reinterpret_cast<uint32_t>(*p);
+ *p = reference_decoder_->Decode(code);
+ }
+}
+
+
+void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+ uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->pc());
+ uint32_t encoding = *pc;
+ Address target = reference_decoder_->Decode(encoding);
+ rinfo->set_target_address(target);
+}
+
+
+DECLARE_bool(use_ic);
+DECLARE_bool(debug_code);
+DECLARE_bool(lazy);
+
+void Deserializer::GetFlags() {
+ reader_.ExpectC('F');
+ int argc = reader_.GetInt() + 1;
+ char** argv = NewArray<char*>(argc);
+ reader_.ExpectC('[');
+ for (int i = 1; i < argc; i++) {
+ if (i > 1) reader_.ExpectC('|');
+ argv[i] = reader_.GetString();
+ }
+ reader_.ExpectC(']');
+ has_log_ = false;
+ for (int i = 1; i < argc; i++) {
+ if (strcmp("--log-code", argv[i]) == 0) {
+ has_log_ = true;
+ } else if (strcmp("--nouse-ic", argv[i]) == 0) {
+ FLAG_use_ic = false;
+ } else if (strcmp("--debug-code", argv[i]) == 0) {
+ FLAG_debug_code = true;
+ } else if (strcmp("--nolazy", argv[i]) == 0) {
+ FLAG_lazy = false;
+ }
+ DeleteArray(argv[i]);
+ }
+
+ DeleteArray(argv);
+}
+
+
+void Deserializer::GetLog() {
+ if (has_log_) {
+ reader_.ExpectC('L');
+ char* snapshot_log = reader_.GetString();
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (FLAG_log_code) {
+ LOG(Preamble(snapshot_log));
+ }
+#endif
+ DeleteArray(snapshot_log);
+ }
+}
+
+
+static void InitPagedSpace(PagedSpace* space,
+ int capacity,
+ List<Page*>* page_list) {
+ space->EnsureCapacity(capacity);
+ // TODO(1240712): PagedSpace::EnsureCapacity can return false due to
+ // a failure to allocate from the OS to expand the space.
+ PageIterator it(space, PageIterator::ALL_PAGES);
+ while (it.has_next()) page_list->Add(it.next());
+}
+
+
+void Deserializer::GetHeader() {
+ reader_.ExpectC('D');
+#ifdef DEBUG
+ expect_debug_information_ = reader_.GetC() == '1';
+#else
+ // In release mode, don't attempt to read a snapshot containing
+ // synchronization tags.
+ if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
+#endif
+ // Ensure sufficient capacity in paged memory spaces to avoid growth
+ // during deserialization.
+ reader_.ExpectC('S');
+ reader_.ExpectC('[');
+ InitPagedSpace(Heap::old_space(), reader_.GetInt(), &old_pages_);
+ reader_.ExpectC('|');
+ InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
+ reader_.ExpectC('|');
+ InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
+ reader_.ExpectC(']');
+ // Create placeholders for global handles later to be fill during
+ // IterateRoots.
+ reader_.ExpectC('G');
+ reader_.ExpectC('[');
+ int c = reader_.GetC();
+ while (c != ']') {
+ ASSERT(c == 'N');
+ global_handles_.Add(GlobalHandles::Create(NULL).location());
+ c = reader_.GetC();
+ }
+}
+
+
+void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
+ reader_.ExpectC('[');
+ int length = reader_.GetInt();
+ for (int i = 0; i < length; i++) {
+ reader_.ExpectC('|');
+ int gh_index = reader_.GetInt();
+ stack->Add(global_handles_[gh_index]);
+ }
+ reader_.ExpectC(']');
+}
+
+
+void Deserializer::GetContextStack() {
+ List<Handle<Object> > entered_contexts(2);
+ GetGlobalHandleStack(&entered_contexts);
+ for (int i = 0; i < entered_contexts.length(); i++) {
+ HandleScopeImplementer::instance()->AddEnteredContext(entered_contexts[i]);
+ }
+ List<Handle<Object> > security_contexts(2);
+ GetGlobalHandleStack(&security_contexts);
+ for (int i = 0; i < security_contexts.length(); i++) {
+ HandleScopeImplementer::instance()->
+ AddSecurityContext(security_contexts[i]);
+ }
+}
+
+
+Address Deserializer::GetEncodedAddress() {
+ reader_.ExpectC('P');
+ return reinterpret_cast<Address>(reader_.GetInt());
+}
+
+
+Object* Deserializer::GetObject() {
+ // Read the prologue: type, size and encoded address.
+ InstanceType type = static_cast<InstanceType>(reader_.GetInt());
+ int size = reader_.GetInt() << kObjectAlignmentBits;
+ Address a = GetEncodedAddress();
+
+ // Get a raw object of the right size in the right space.
+ Object* o = Heap::AllocateRaw(size, Space(a));
+ ASSERT(!o->IsFailure());
+ // Check that the simulation of heap allocation was correct.
+ ASSERT(o == Resolve(a));
+
+ // Read any recursively embedded objects.
+ int c = reader_.GetC();
+ while (c == '[') {
+ GetObject();
+ c = reader_.GetC();
+ }
+ ASSERT(c == '|');
+
+ // Read, resolve and set the map pointer: don't rely on map being initialized.
+ Address map_addr = GetEncodedAddress();
+ Map* map = reinterpret_cast<Map*>(Resolve(map_addr));
+ HeapObject* obj = reinterpret_cast<HeapObject*>(o);
+ obj->set_map(map);
+
+ // Read the uninterpreted contents of the object after the map
+ reader_.GetBytes(obj->address() + HeapObject::kSize,
+ size - HeapObject::kSize);
+
+#ifdef DEBUG
+ if (expect_debug_information_) {
+ // Read in the epilogue to check that we're still synchronized
+ ExpectEncodedAddress(a);
+ reader_.ExpectC(']');
+ }
+#endif
+
+ // Resolve the encoded pointers we just read in
+ obj->IterateBody(type, size, this);
+
+ if (type == CODE_TYPE) {
+ Code* code = Code::cast(obj);
+ // Convert relocations from Object* to Address in Code objects
+ code->ConvertICTargetsFromObjectToAddress();
+ LOG(CodeMoveEvent(a, code->address()));
+ }
+ objects_++;
+ return o;
+}
+
+
+static inline Object* ResolvePaged(int page_index,
+ int page_offset,
+ PagedSpace* space,
+ List<Page*>* page_list) {
+#ifdef DEBUG
+ space->CheckLinearAllocationOnly();
+#endif
+
+ ASSERT(page_index < page_list->length());
+ Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
+ return HeapObject::FromAddress(address);
+}
+
+
+template<typename T>
+void ConcatReversed(List<T> * target, const List<T> & source) {
+ for (int i = source.length() - 1; i >= 0; i--) {
+ target->Add(source[i]);
+ }
+}
+
+
+Object* Deserializer::Resolve(Address encoded) {
+ Object* o = reinterpret_cast<Object*>(encoded);
+ if (o->IsSmi()) return o;
+
+ // Encoded addresses of HeapObjects always have 'HeapObject' tags.
+ ASSERT(o->IsHeapObject());
+
+ switch (Space(encoded)) {
+ // For Map space and Old space, we cache the known Pages in
+ // map_pages and old_pages respectively. Even though MapSpace
+ // keeps a list of page addresses, we don't rely on it since
+ // GetObject uses AllocateRaw, and that appears not to update
+ // the page list.
+ case MAP_SPACE:
+ return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+ Heap::map_space(), &map_pages_);
+ case OLD_SPACE:
+ return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+ Heap::old_space(), &old_pages_);
+ case CODE_SPACE:
+ return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+ Heap::code_space(), &code_pages_);
+ case NEW_SPACE:
+ return HeapObject::FromAddress(Heap::NewSpaceStart() +
+ NewSpaceOffset(encoded));
+ case LO_SPACE:
+ // Cache the known large_objects, allocated one per 'page'
+ int index = LargeObjectIndex(encoded);
+ if (index >= large_objects_.length()) {
+ int new_object_count =
+ Heap::lo_space()->PageCount() - large_objects_.length();
+ List<Object*> new_objects(new_object_count);
+ LargeObjectIterator it(Heap::lo_space());
+ for (int i = 0; i < new_object_count; i++) {
+ new_objects.Add(it.next());
+ }
+#ifdef DEBUG
+ for (int i = large_objects_.length() - 1; i >= 0; i--) {
+ ASSERT(it.next() == large_objects_[i]);
+ }
+#endif
+ ConcatReversed(&large_objects_, new_objects);
+ ASSERT(index < large_objects_.length());
+ }
+ return large_objects_[index]; // s.page_offset() is ignored.
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SERIALIZE_H_
+#define V8_SERIALIZE_H_
+
+#include "hashmap.h"
+
+namespace v8 { namespace internal {
+
+// A TypeCode is used to distinguish different kinds of external reference.
+// It is a single bit to make testing for types easy.
+enum TypeCode {
+ UNCLASSIFIED, // One-of-a-kind references.
+ BUILTIN,
+ RUNTIME_FUNCTION,
+ IC_UTILITY,
+ DEBUG_ADDRESS,
+ STATS_COUNTER,
+ TOP_ADDRESS,
+ C_BUILTIN,
+ EXTENSION,
+ ACCESSOR,
+ RUNTIME_ENTRY,
+ STUB_CACHE_TABLE
+};
+
+const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kFirstTypeCode = UNCLASSIFIED;
+
+const int kReferenceIdBits = 16;
+const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
+const int kReferenceTypeShift = kReferenceIdBits;
+const int kDebugRegisterBits = 4;
+const int kDebugIdShift = kDebugRegisterBits;
+
+
+class ExternalReferenceEncoder {
+ public:
+ ExternalReferenceEncoder();
+
+ uint32_t Encode(Address key) const;
+
+ const char* NameOfAddress(Address key) const;
+
+ private:
+ HashMap encodings_;
+ static uint32_t Hash(Address key) {
+ return reinterpret_cast<uint32_t>(key) >> 2;
+ }
+
+ int IndexOf(Address key) const;
+
+ static bool Match(void* key1, void* key2) { return key1 == key2; }
+
+ void Put(Address key, int index);
+};
+
+
+class ExternalReferenceDecoder {
+ public:
+ ExternalReferenceDecoder();
+ ~ExternalReferenceDecoder();
+
+ Address Decode(uint32_t key) const {
+ if (key == 0) return NULL;
+ return *Lookup(key);
+ }
+
+ private:
+ Address** encodings_;
+
+ Address* Lookup(uint32_t key) const {
+ int type = key >> kReferenceTypeShift;
+ ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
+ int id = key & kReferenceIdMask;
+ return &encodings_[type][id];
+ }
+
+ void Put(uint32_t key, Address value) {
+ *Lookup(key) = value;
+ }
+};
+
+
+// A Serializer recursively visits objects to construct a serialized
+// representation of the Heap stored in a string. Serialization is
+// destructive. We use a similar mechanism to the GC to ensure that
+// each object is visited once, namely, we modify the map pointer of
+// each visited object to contain the relative address in the
+// appropriate space where that object will be allocated when the heap
+// is deserialized.
+
+
+// Helper classes defined in serialize.cc.
+class RelativeAddress;
+class SimulatedHeapSpace;
+class SnapshotWriter;
+
+
+class Serializer: public ObjectVisitor {
+ public:
+ Serializer();
+
+ virtual ~Serializer();
+
+ // Serialize the current state of the heap. This operation destroys the
+ // heap contents and the contents of the roots into the heap.
+ void Serialize();
+
+ // Returns the serialized buffer. Ownership is transferred to the
+ // caller. Only the destructor and getters may be called after this call.
+ void Finalize(char** str, int* len);
+
+ int roots() { return roots_; }
+ int objects() { return objects_; }
+
+#ifdef DEBUG
+ // insert "tag" into the serialized stream
+ virtual void Synchronize(const char* tag);
+#endif
+
+ static bool enabled() { return serialization_enabled_; }
+
+ static void disable() { serialization_enabled_ = false; }
+
+ private:
+ virtual void VisitPointers(Object** start, Object** end);
+ virtual void VisitExternalReferences(Address* start, Address* end);
+ virtual void VisitRuntimeEntry(RelocInfo* rinfo);
+
+ void PutEncodedAddress(Address addr);
+ // Write the global flags into the file.
+ void PutFlags();
+ // Write global information into the header of the file.
+ void PutHeader();
+ // Write the contents of the log into the file.
+ void PutLog();
+ // Serialize 'obj', and return its encoded RelativeAddress.
+ Address PutObject(HeapObject* obj);
+ // Write a stack of handles to the file bottom first.
+ void PutGlobalHandleStack(const List<Handle<Object> >& stack);
+ // Write the context stack into the file.
+ void PutContextStack();
+
+ // Return the encoded RelativeAddress where this object will be
+ // allocated on deserialization. On the first visit of 'o',
+ // serialize its contents. On return, *serialized will be true iff
+ // 'o' has just been serialized.
+ Address Encode(Object* o, bool* serialized);
+
+ // Simulate the allocation of 'obj', returning the address where it will
+ // be allocated on deserialization
+ RelativeAddress Allocate(HeapObject* obj);
+
+ void InitializeAllocators();
+
+ SnapshotWriter* writer_;
+ bool root_; // serializing a root?
+ int roots_; // number of roots visited
+ int objects_; // number of objects serialized
+
+ static bool serialization_enabled_;
+
+ int flags_end_; // The position right after the flags.
+
+ // An array of per-space SimulatedHeapSpacees used as memory allocators.
+ SimulatedHeapSpace* allocator_[LAST_SPACE+1];
+ // A list of global handles at serialization time.
+ List<Object**> global_handles_;
+
+ ExternalReferenceEncoder* reference_encoder_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(Serializer);
+};
+
+// Helper class to read the bytes of the serialized heap.
+
+class SnapshotReader {
+ public:
+ SnapshotReader(char* str, int len): str_(str), end_(str + len) {}
+
+ void ExpectC(char expected) {
+ int c = GetC();
+ USE(c);
+ ASSERT(c == expected);
+ }
+
+ int GetC() {
+ if (str_ >= end_) return EOF;
+ return *str_++;
+ }
+
+ int GetInt() {
+ int result = *reinterpret_cast<int*>(str_);
+ str_ += sizeof(result);
+ return result;
+ }
+
+ void GetBytes(Address a, int size) {
+ ASSERT(str_ + size <= end_);
+ memcpy(a, str_, size);
+ str_ += size;
+ }
+
+ char* GetString() {
+ ExpectC('[');
+ int size = GetInt();
+ ExpectC(']');
+ char* s = NewArray<char>(size + 1);
+ GetBytes(reinterpret_cast<Address>(s), size);
+ s[size] = 0;
+ return s;
+ }
+
+ private:
+ char* str_;
+ char* end_;
+};
+
+
+// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+
+class Deserializer: public ObjectVisitor {
+ public:
+ // Create a deserializer. The snapshot is held in str and has size len.
+ // Ownership of str is not assumed by the Deserializer.
+ Deserializer(char* str, int len);
+
+ virtual ~Deserializer();
+
+ // Read the flags from the header of the file, and set those that
+ // should be inhereted from the snapshot.
+ void GetFlags();
+
+ // Read saved profiling information from the file and log it if required.
+ void GetLog();
+
+ // Deserialize the snapshot into an empty heap.
+ void Deserialize();
+
+ int roots() { return roots_; }
+ int objects() { return objects_; }
+
+#ifdef DEBUG
+ // Check for the presence of "tag" in the serialized stream
+ virtual void Synchronize(const char* tag);
+#endif
+
+ private:
+ virtual void VisitPointers(Object** start, Object** end);
+ virtual void VisitExternalReferences(Address* start, Address* end);
+ virtual void VisitRuntimeEntry(RelocInfo* rinfo);
+
+ Address GetEncodedAddress();
+
+ // Read other global information (except flags) from the header of the file.
+ void GetHeader();
+ // Read a stack of handles from the file bottom first.
+ void GetGlobalHandleStack(List<Handle<Object> >* stack);
+ // Read the context stack from the file.
+ void GetContextStack();
+
+ Object* GetObject();
+
+ // Get the encoded address. In debug mode we make sure
+ // it matches the given expectations.
+ void ExpectEncodedAddress(Address expected);
+
+ // Given an encoded address (the result of
+ // RelativeAddress::Encode), return the object to which it points,
+ // which will be either an Smi or a HeapObject in the current heap.
+ Object* Resolve(Address encoded_address);
+
+ SnapshotReader reader_;
+ bool root_; // Deserializing a root?
+ int roots_; // number of roots visited
+ int objects_; // number of objects serialized
+
+ bool has_log_; // The file has log information.
+
+ // Resolve caches the following:
+ List<Page*> map_pages_; // All pages in the map space.
+ List<Page*> old_pages_; // All pages in the old space.
+ List<Page*> code_pages_;
+ List<Object*> large_objects_; // All known large objects.
+ // A list of global handles at deserialization time.
+ List<Object**> global_handles_;
+
+ ExternalReferenceDecoder* reference_decoder_;
+
+#ifdef DEBUG
+ bool expect_debug_information_;
+#endif
+
+ DISALLOW_EVIL_CONSTRUCTORS(Deserializer);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SERIALIZE_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A simple interactive shell. Enable with --shell.
+
+#include "../public/debug.h"
+
+namespace v8 { namespace internal {
+
+// Debug event handler for interactive debugging.
+void handle_debug_event(v8::DebugEvent event,
+ v8::Handle<v8::Object> exec_state,
+ v8::Handle<v8::Object> event_data,
+ v8::Handle<Value> data);
+
+
+class Shell {
+ public:
+ static void PrintObject(v8::Handle<v8::Value> obj);
+ // Run the read-eval loop, executing code in the specified
+ // environment.
+ static void Run(v8::Handle<v8::Context> context);
+};
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "disasm.h"
+#include "constants-arm.h"
+#include "simulator-arm.h"
+
+#if !defined(__arm__)
+
+// Only build the simulator if not compiling for real ARM hardware.
+namespace assembler { namespace arm {
+
+using ::v8::internal::Object;
+using ::v8::internal::PrintF;
+using ::v8:: internal::ReadLine;
+using ::v8:: internal::DeleteArray;
+
+
+DEFINE_bool(trace_sim, false, "trace simulator execution");
+
+
+// The Debugger class is used by the simulator while debugging simulated ARM
+// code.
+class Debugger {
+ public:
+ explicit Debugger(Simulator* sim);
+ ~Debugger();
+
+ void Stop(Instr* instr);
+ void Debug();
+
+ private:
+ static const instr_t kBreakpointInstr =
+ (AL << 28 | 7 << 25 | 1 << 24 | break_point);
+
+ Simulator* sim_;
+
+ bool GetValue(char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instr* breakpc);
+ bool DeleteBreakpoint(Instr* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+
+Debugger::Debugger(Simulator* sim) {
+ sim_ = sim;
+}
+
+
+Debugger::~Debugger() {
+}
+
+
+void Debugger::Stop(Instr* instr) {
+ const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff);
+ PrintF("Simulator hit %s\n", str);
+ sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
+ Debug();
+}
+
+
+static char* reg_names[] = { "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "pc", "lr", "sp", "ip",
+ "fp", "sl", ""};
+
+static int reg_nums[] = { 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 15, 14, 13, 12,
+ 11, 10};
+
+
+static int RegNameToRegNum(char* name) {
+ int reg = 0;
+ while (*reg_names[reg] != 0) {
+ if (strcmp(reg_names[reg], name) == 0) {
+ return reg_nums[reg];
+ }
+ reg++;
+ }
+ return -1;
+}
+
+
+bool Debugger::GetValue(char* desc, int32_t* value) {
+ int regnum = RegNameToRegNum(desc);
+ if (regnum >= 0) {
+ if (regnum == 15) {
+ *value = sim_->get_pc();
+ } else {
+ *value = sim_->get_register(regnum);
+ }
+ return true;
+ } else {
+ return sscanf(desc, "%i", value) == 1;
+ }
+ return false;
+}
+
+
+bool Debugger::SetBreakpoint(Instr* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != NULL) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+
+bool Debugger::DeleteBreakpoint(Instr* breakpc) {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = NULL;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+
+void Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+
+void Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+
+void Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+
+ // make sure to have a proper terminating character if reaching the limit
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::Disassembler dasm;
+ char buffer[256]; // use a reasonably large buffer
+ dasm.InstructionDecode(buffer, sizeof(buffer),
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%x %s\n", sim_->get_pc(), buffer);
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int args = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ PrintF("%s: %d 0x%x\n", arg1, value, value);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print value\n");
+ }
+ } else if ((strcmp(cmd, "po") == 0)
+ || (strcmp(cmd, "printobject") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ USE(obj);
+ PrintF("%s: \n", arg1);
+#if defined(DEBUG)
+ obj->PrintLn();
+#endif // defined(DEBUG)
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject value\n");
+ }
+ } else if (strcmp(cmd, "disasm") == 0) {
+ disasm::Disassembler dasm;
+ char buffer[256]; // use a reasonably large buffer
+
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (args == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instr::kInstrSize);
+ } else if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * Instr::kInstrSize);
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instr::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, sizeof(buffer), cur);
+ PrintF(" 0x%x %s\n", cur, buffer);
+ cur += Instr::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ asm("int $3");
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break addr\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(NULL)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("N flag: %d; ", sim_->n_flag_);
+ PrintF("Z flag: %d; ", sim_->z_flag_);
+ PrintF("C flag: %d; ", sim_->c_flag_);
+ PrintF("V flag: %d\n", sim_->v_flag_);
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ DeleteArray(line);
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+Simulator::Simulator() {
+ // Setup simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
+ stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_pc_ = NULL;
+ break_instr_ = 0;
+
+ // Setup architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < num_registers; i++) {
+ registers_[i] = 0;
+ }
+ n_flag_ = false;
+ z_flag_ = false;
+ c_flag_ = false;
+ v_flag_ = false;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+ // The lr and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_lr;
+ registers_[lr] = bad_lr;
+}
+
+
+// This is the Simulator singleton. Currently only one thread is supported by
+// V8. If we had multiple threads, then we should have a Simulator instance on
+// a per thread basis.
+static Simulator* the_sim = NULL;
+
+
+// Get the active Simulator for the current thread. See comment above about
+// using a singleton currently.
+Simulator* Simulator::current() {
+ if (the_sim == NULL) {
+ the_sim = new Simulator();
+ }
+ return the_sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+ ASSERT((reg >= 0) && (reg < num_registers));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+ registers_[reg] = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+ ASSERT((reg >= 0) && (reg < num_registers));
+ return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const {
+ return registers_[pc];
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 256;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instr* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%x: %s\n",
+ instr, format);
+ UNIMPLEMENTED();
+}
+
+
+// Checks if the current instruction should be executed based on its
+// condition bits.
+bool Simulator::ConditionallyExecute(Instr* instr) {
+ switch (instr->ConditionField()) {
+ case EQ: return z_flag_;
+ case NE: return !z_flag_;
+ case CS: return c_flag_;
+ case CC: return !c_flag_;
+ case MI: return n_flag_;
+ case PL: return !n_flag_;
+ case VS: return v_flag_;
+ case VC: return !v_flag_;
+ case HI: return c_flag_ && !z_flag_;
+ case LS: return !c_flag_ || z_flag_;
+ case GE: return n_flag_ == v_flag_;
+ case LT: return n_flag_ != v_flag_;
+ case GT: return !z_flag_ && (n_flag_ == v_flag_);
+ case LE: return z_flag_ || (n_flag_ != v_flag_);
+ case AL: return true;
+ default: UNREACHABLE();
+ }
+ return false;
+}
+
+
+// Calculate and set the Negative and Zero flags.
+void Simulator::SetNZFlags(int32_t val) {
+ n_flag_ = (val < 0);
+ z_flag_ = (val == 0);
+}
+
+
+// Set the Carry flag.
+void Simulator::SetCFlag(bool val) {
+ c_flag_ = val;
+}
+
+
+// Set the oVerflow flag.
+void Simulator::SetVFlag(bool val) {
+ v_flag_ = val;
+}
+
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ uint32_t urest = 0xffffffffU - uleft;
+
+ return (uright > urest);
+}
+
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+
+ return (uright > uleft);
+}
+
+
+// Calculate V flag value for additions and subtractions.
+bool Simulator::OverflowFrom(int32_t alu_out,
+ int32_t left, int32_t right, bool addition) {
+ bool overflow;
+ if (addition) {
+ // operands have the same sign
+ overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+ // and operands and result have different sign
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ } else {
+ // operands have different signs
+ overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+ // and first operand and result have different signs
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ }
+ return overflow;
+}
+
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with register.
+int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
+ Shift shift = instr->ShiftField();
+ int shift_amount = instr->ShiftAmountField();
+ int32_t result = get_register(instr->RmField());
+ if (instr->Bit(4) == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ UNIMPLEMENTED();
+ return result;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
+ }
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ if (result < 0) {
+ result = 0xffffffff;
+ *carry_out = true;
+ } else {
+ result = 0;
+ *carry_out = false;
+ }
+ } else {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ result = 0;
+ *carry_out = c_flag_;
+ } else {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ }
+ break;
+ }
+
+ case ROR: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ // by register
+ int rs = instr->RsField();
+ shift_amount = get_register(rs) &0xff;
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ } else {
+ ASSERT(shift_amount >= 32);
+ if (result < 0) {
+ *carry_out = true;
+ result = 0xffffffff;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ } else if (shift_amount == 32) {
+ *carry_out = (result & 1) == 1;
+ result = 0;
+ } else {
+ ASSERT(shift_amount > 32);
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ } else if (shift_amount == 32) {
+ *carry_out = (result < 0);
+ result = 0;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case ROR: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ return result;
+}
+
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with immediate.
+int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
+ int rotate = instr->RotateField() * 2;
+ int immed8 = instr->Immed8Field();
+ int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
+ return imm;
+}
+
+
+static int count_bits(int bit_vector) {
+ int count = 0;
+ while (bit_vector != 0) {
+ if (bit_vector & 1 != 0) {
+ count++;
+ }
+ bit_vector >>= 1;
+ }
+ return count;
+}
+
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::HandleRList(Instr* instr, bool load) {
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int rlist = instr->RlistField();
+ int num_regs = count_bits(rlist);
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ switch (instr->PUField()) {
+ case 0: {
+ // Print("da");
+ UNIMPLEMENTED();
+ break;
+ }
+ case 1: {
+ // Print("ia");
+ start_address = rn_val;
+ end_address = rn_val + (num_regs * 4) - 4;
+ rn_val = rn_val + (num_regs * 4);
+ break;
+ }
+ case 2: {
+ // Print("db");
+ start_address = rn_val - (num_regs * 4);
+ end_address = rn_val - 4;
+ rn_val = start_address;
+ break;
+ }
+ case 3: {
+ // Print("ib");
+ UNIMPLEMENTED();
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ int reg = 0;
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ if (load) {
+ set_register(reg, *address);
+ } else {
+ *address = get_register(reg);
+ }
+ address += 1;
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ ASSERT(end_address == ((intptr_t)address) - 4);
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
+
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instr* instr) {
+ switch (instr->SwiField()) {
+ case call_rt_r5: {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
+ intptr_t arg0 = get_register(r0);
+ intptr_t arg1 = get_register(r1);
+ int64_t result = target(arg0, arg1);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ set_register(r0, lo_res);
+ set_register(r1, hi_res);
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ }
+ case call_rt_r2: {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(get_register(r2));
+ intptr_t arg0 = get_register(r0);
+ intptr_t arg1 = get_register(r1);
+ int64_t result = target(arg0, arg1);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ set_register(r0, lo_res);
+ set_register(r1, hi_res);
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ }
+ case break_point: {
+ Debugger dbg(this);
+ dbg.Debug();
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+// Handle execution based on instruction types.
+
+// Instruction types 0 and 1 are both rolled into one function because they
+// only differ in the handling of the shifter_operand.
+void Simulator::DecodeType01(Instr* instr) {
+ int type = instr->TypeField();
+ if ((type == 0) && instr->IsSpecialType0()) {
+ // multiply instruction or extra loads and stores
+ if (instr->Bits(7, 4) == 9) {
+ if (instr->Bit(24) == 0) {
+ // multiply instructions
+ int rd = instr->RdField();
+ int rm = instr->RmField();
+ int rs = instr->RsField();
+ int32_t rs_val = get_register(rs);
+ int32_t rm_val = get_register(rm);
+ if (instr->Bit(23) == 0) {
+ if (instr->Bit(21) == 0) {
+ // Format(instr, "mul'cond's 'rd, 'rm, 'rs");
+ int32_t alu_out = rm_val * rs_val;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ }
+ } else {
+ Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
+ }
+ } else {
+ // Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
+ int rn = instr->RnField();
+ int32_t hi_res = 0;
+ int32_t lo_res = 0;
+ if (instr->Bit(22) == 0) {
+ // signed multiply
+ UNIMPLEMENTED();
+ } else {
+ // unsigned multiply
+ uint64_t left_op = rm_val;
+ uint64_t right_op = rs_val;
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
+ }
+ set_register(rn, hi_res);
+ set_register(rd, lo_res);
+ if (instr->HasS()) {
+ UNIMPLEMENTED();
+ }
+ }
+ } else {
+ UNIMPLEMENTED(); // not used by V8
+ }
+ } else {
+ // extra load/store instructions
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t addr = 0;
+ if (instr->Bit(22) == 0) {
+ int rm = instr->RmField();
+ int32_t rm_val = get_register(rm);
+ switch (instr->PUField()) {
+ case 0: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= rm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += rm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+ rn_val -= rm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+ rn_val += rm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ int32_t imm_val = (instr->ImmedHField() << 4) | instr->ImmedLField();
+ switch (instr->PUField()) {
+ case 0: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= imm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += imm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+ rn_val -= imm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+ rn_val += imm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ if (instr->HasH()) {
+ if (instr->HasSign()) {
+ int16_t* haddr = reinterpret_cast<int16_t*>(addr);
+ if (instr->HasL()) {
+ int16_t val = *haddr;
+ set_register(rd, val);
+ } else {
+ int16_t val = get_register(rd);
+ *haddr = val;
+ }
+ } else {
+ uint16_t* haddr = reinterpret_cast<uint16_t*>(addr);
+ if (instr->HasL()) {
+ uint16_t val = *haddr;
+ set_register(rd, val);
+ } else {
+ uint16_t val = get_register(rd);
+ *haddr = val;
+ }
+ }
+ } else {
+ // signed byte loads
+ ASSERT(instr->HasSign());
+ ASSERT(instr->HasL());
+ int8_t* baddr = reinterpret_cast<int8_t*>(addr);
+ int8_t val = *baddr;
+ set_register(rd, val);
+ }
+ return;
+ }
+ } else {
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t shifter_operand = 0;
+ bool shifter_carry_out = 0;
+ if (type == 0) {
+ shifter_operand = GetShiftRm(instr, &shifter_carry_out);
+ } else {
+ ASSERT(instr->TypeField() == 1);
+ shifter_operand = GetImm(instr, &shifter_carry_out);
+ }
+ int32_t alu_out;
+
+ switch (instr->OpcodeField()) {
+ case AND: {
+ // Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "and'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val & shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case EOR: {
+ // Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "eor'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val ^ shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case SUB: {
+ // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "sub'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val - shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+ }
+ break;
+ }
+
+ case RSB: {
+ // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
+ alu_out = shifter_operand - rn_val;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(shifter_operand, rn_val));
+ SetVFlag(OverflowFrom(alu_out, shifter_operand, rn_val, false));
+ }
+ break;
+ }
+
+ case ADD: {
+ // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "add'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val + shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(CarryFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
+ break;
+ }
+
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+ Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+ Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
+ Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+
+ case TST: {
+ if (instr->HasS()) {
+ // Format(instr, "tst'cond 'rn, 'shift_rm");
+ // Format(instr, "tst'cond 'rn, 'imm");
+ alu_out = rn_val & shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case TEQ: {
+ if (instr->HasS()) {
+ // Format(instr, "teq'cond 'rn, 'shift_rm");
+ // Format(instr, "teq'cond 'rn, 'imm");
+ alu_out = rn_val ^ shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case CMP: {
+ if (instr->HasS()) {
+ // Format(instr, "cmp'cond 'rn, 'shift_rm");
+ // Format(instr, "cmp'cond 'rn, 'imm");
+ alu_out = rn_val - shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'shift_rm");
+ Format(instr, "cmn'cond 'rn, 'imm");
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case ORR: {
+ // Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "orr'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val | shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case MOV: {
+ // Format(instr, "mov'cond's 'rd, 'shift_rm");
+ // Format(instr, "mov'cond's 'rd, 'imm");
+ alu_out = shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case BIC: {
+ // Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "bic'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val & ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case MVN: {
+ // Format(instr, "mvn'cond's 'rd, 'shift_rm");
+ // Format(instr, "mvn'cond's 'rd, 'imm");
+ alu_out = ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+void Simulator::DecodeType2(Instr* instr) {
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t im_val = instr->Offset12Field();
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case 0: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= im_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += im_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ rn_val -= im_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ rn_val += im_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ if (instr->HasB()) {
+ byte* baddr = reinterpret_cast<byte*>(addr);
+ if (instr->HasL()) {
+ byte val = *baddr;
+ set_register(rd, val);
+ } else {
+ byte val = get_register(rd);
+ *baddr = val;
+ }
+ } else {
+ intptr_t* iaddr = reinterpret_cast<intptr_t*>(addr);
+ if (instr->HasL()) {
+ set_register(rd, *iaddr);
+ } else {
+ *iaddr = get_register(rd);
+ }
+ }
+}
+
+
+void Simulator::DecodeType3(Instr* instr) {
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ bool shifter_carry_out = 0;
+ int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case 0: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ break;
+ }
+ case 1: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+ addr = rn_val - shifter_operand;
+ if (instr->HasW()) {
+ set_register(rn, addr);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ addr = rn_val + shifter_operand;
+ if (instr->HasW()) {
+ set_register(rn, addr);
+ }
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ if (instr->HasB()) {
+ UNIMPLEMENTED();
+ } else {
+ intptr_t* iaddr = reinterpret_cast<intptr_t*>(addr);
+ if (instr->HasL()) {
+ set_register(rd, *iaddr);
+ } else {
+ *iaddr = get_register(rd);
+ }
+ }
+}
+
+
+void Simulator::DecodeType4(Instr* instr) {
+ ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
+ if (instr->HasL()) {
+ // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ HandleRList(instr, true);
+ } else {
+ // Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ HandleRList(instr, false);
+ }
+}
+
+
+void Simulator::DecodeType5(Instr* instr) {
+ // Format(instr, "b'l'cond 'target");
+ int off = (instr->SImmed24Field() << 2) + 8;
+ intptr_t pc = get_pc();
+ if (instr->HasLink()) {
+ set_register(lr, pc + Instr::kInstrSize);
+ }
+ set_pc(pc+off);
+}
+
+
+void Simulator::DecodeType6(Instr* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::DecodeType7(Instr* instr) {
+ if (instr->Bit(24) == 1) {
+ // Format(instr, "swi 'swi");
+ SoftwareInterrupt(instr);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instr* instr) {
+ pc_modified_ = false;
+ if (instr->ConditionField() == special_condition) {
+ Debugger dbg(this);
+ dbg.Stop(instr);
+ return;
+ }
+ if (FLAG_trace_sim) {
+ disasm::Disassembler dasm;
+ char buffer[256]; // use a reasonably large buffer
+ dasm.InstructionDecode(buffer,
+ sizeof(buffer),
+ reinterpret_cast<byte*>(instr));
+ PrintF(" 0x%x %s\n", instr, buffer);
+ }
+ if (ConditionallyExecute(instr)) {
+ switch (instr->TypeField()) {
+ case 0:
+ case 1: {
+ DecodeType01(instr);
+ break;
+ }
+ case 2: {
+ DecodeType2(instr);
+ break;
+ }
+ case 3: {
+ DecodeType3(instr);
+ break;
+ }
+ case 4: {
+ DecodeType4(instr);
+ break;
+ }
+ case 5: {
+ DecodeType5(instr);
+ break;
+ }
+ case 6: {
+ DecodeType6(instr);
+ break;
+ }
+ case 7: {
+ DecodeType7(instr);
+ break;
+ }
+ default: {
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ }
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ }
+}
+
+
+DEFINE_int(stop_sim_at, -1, "Simulator stop after x number of instructions");
+
+
+//
+void Simulator::execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+ while (program_counter != end_sim_pc) {
+ Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ icount_++;
+ if (icount_ == FLAG_stop_sim_at) {
+ Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+}
+
+
+Object* Simulator::call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
+ int32_t p3, int32_t p4) {
+ // Setup parameters
+ set_register(r0, p0);
+ set_register(r1, p1);
+ set_register(r2, p2);
+ set_register(r3, p3);
+ intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
+ *(--stack_pointer) = p4;
+ set_register(sp, reinterpret_cast<int32_t>(stack_pointer));
+
+ // Prepare to execute the code at entry
+ set_register(pc, entry);
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(lr, end_sim_pc);
+
+ // Start the simulation
+ execute();
+
+ int result = get_register(r0);
+ return reinterpret_cast<Object*>(result);
+}
+
+} } // namespace assembler::arm
+
+#endif // !defined(__arm__)
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Declares a Simulator for ARM instructions if we are not generating a native
+// ARM binary. This Simulator allows us to run and debug ARM code generation on
+// regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a ARM HW platform.
+
+#ifndef V8_SIMULATOR_ARM_H_
+#define V8_SIMULATOR_ARM_H_
+
+#if defined(__arm__)
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ entry(p0, p1, p2, p3, p4)
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (reinterpret_cast<uintptr_t>(this) - limit)
+
+#else // defined(__arm__)
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ assembler::arm::Simulator::current()->call((int32_t)entry, (int32_t)p0, \
+ (int32_t)p1, (int32_t)p2, (int32_t)p3, (int32_t)p4)
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (assembler::arm::Simulator::current()->StackLimit())
+
+
+#include "constants-arm.h"
+
+
+namespace assembler { namespace arm {
+
+class Simulator {
+ public:
+ friend class Debugger;
+
+ enum Register {
+ no_reg = -1,
+ r0 = 0, r1, r2, r3, r4, r5, r6, r7,
+ r8, r9, r10, r11, r12, r13, r14, r15,
+ num_registers,
+ sp = 13,
+ lr = 14,
+ pc = 15
+ };
+
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* current();
+
+ // Accessors for register state. Reading the pc value adheres to the ARM
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int32_t value);
+ int32_t get_register(int reg) const;
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ // Executes ARM instructions until the PC reaches end_sim_pc.
+ void execute();
+
+ // V8 generally calls into generated code with 5 parameters. This is a
+ // convenience funtion, which sets up the simulator state and grabs the
+ // result on return.
+ v8::internal::Object* call(int32_t entry, int32_t p0, int32_t p1,
+ int32_t p2, int32_t p3, int32_t p4);
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_lr = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the lr is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2
+ };
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instr* instr, const char* format);
+
+ // Checks if the current instruction should be executed based on its
+ // condition bits.
+ bool ConditionallyExecute(Instr* instr);
+
+ // Helper functions to set the conditional flags in the architecture state.
+ void SetNZFlags(int32_t val);
+ void SetCFlag(bool val);
+ void SetVFlag(bool val);
+ bool CarryFrom(int32_t left, int32_t right);
+ bool BorrowFrom(int32_t left, int32_t right);
+ bool OverflowFrom(int32_t alu_out,
+ int32_t left,
+ int32_t right,
+ bool addition);
+
+ // Helper functions to decode common "addressing" modes
+ int32_t GetShiftRm(Instr* instr, bool* carry_out);
+ int32_t GetImm(Instr* instr, bool* carry_out);
+ void HandleRList(Instr* instr, bool load);
+ void SoftwareInterrupt(Instr* instr);
+
+ // Executing is handled based on the instruction type.
+ void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
+ void DecodeType2(Instr* instr);
+ void DecodeType3(Instr* instr);
+ void DecodeType4(Instr* instr);
+ void DecodeType5(Instr* instr);
+ void DecodeType6(Instr* instr);
+ void DecodeType7(Instr* instr);
+
+ // Executes one instruction.
+ void InstructionDecode(Instr* instr);
+
+ // architecture state
+ int32_t registers_[16];
+ bool n_flag_;
+ bool z_flag_;
+ bool c_flag_;
+ bool v_flag_;
+
+ // simulator support
+ char* stack_;
+ bool pc_modified_;
+ int icount_;
+
+ // registered breakpoints
+ Instr* break_pc_;
+ instr_t break_instr_;
+};
+
+} } // namespace assembler::arm
+
+#endif // defined(__arm__)
+
+#endif // V8_SIMULATOR_ARM_H_
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Since there is no simulator for the ia32 architecture this file is empty.
+
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SIMULATOR_IA32_H_
+#define V8_SIMULATOR_IA32_H_
+
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ entry(p0, p1, p2, p3, p4);
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (reinterpret_cast<uintptr_t>(this) - limit)
+
+#endif // V8_SIMULATOR_IA32_H_
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SMART_POINTER_H_
+#define V8_SMART_POINTER_H_
+
+namespace v8 { namespace internal {
+
+
+// A 'scoped pointer' that calls delete[] on its pointer when the
+// destructor is called.
+template<typename T>
+class SmartPointer {
+ public:
+ // Construct a scoped pointer from a plain one.
+ inline SmartPointer(T* pointer) : p(pointer) {}
+
+
+ // When the destructor of the scoped pointer is executed the plain pointer
+ // is deleted using delete[]. This implies that you must allocate with
+ // new[...], not new(...).
+ inline ~SmartPointer() { if (p) delete [] p; }
+
+
+ // Copy constructor removes the pointer from the original to avoid double
+ // freeing.
+ inline SmartPointer(const SmartPointer<T>& rhs) : p(rhs.p) {
+ const_cast<SmartPointer<T>&>(rhs).p = NULL;
+ }
+
+
+ // You can get the underlying pointer out with the * operator.
+ inline T* operator*() { return p; }
+
+
+ // You can use -> as if it was a plain pointer.
+ inline T* operator->() { return p; }
+
+
+ // We don't have implicit conversion to a T* since that hinders migration:
+ // You would not be able to change a method from returning a T* to
+ // returning an SmartPointer<T> and then get errors wherever it is used.
+
+
+ // If you want to take out the plain pointer and don't want it automatically
+ // deleted then call Detach(). Afterwards, the smart pointer is empty
+ // (NULL).
+ inline T* Detach() {
+ T* temp = p;
+ p = NULL;
+ return temp;
+ }
+
+
+ // Assignment requires an empty (NULL) SmartPointer as the receiver. Like
+ // the copy constructor it removes the pointer in the original to avoid
+ // double freeing.
+ inline SmartPointer& operator=(const SmartPointer<T>& rhs) {
+ ASSERT(p == NULL);
+ p = rhs.p;
+ const_cast<SmartPointer<T>&>(rhs).p = NULL;
+ return *this;
+ }
+
+
+ private:
+ T* p;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SMART_POINTER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The common functionality when building with or without snapshots.
+
+#include "v8.h"
+
+#include "api.h"
+#include "serialize.h"
+#include "snapshot.h"
+
+namespace v8 { namespace internal {
+
+bool Snapshot::Deserialize(char* content, int len) {
+ Deserializer des(content, len);
+ des.GetFlags();
+ return V8::Initialize(&des);
+}
+
+
+bool Snapshot::Initialize(const char* snapshot_file) {
+ if (snapshot_file) {
+ int len;
+ char* str = ReadChars(snapshot_file, &len);
+ if (!str) return false;
+ bool result = Deserialize(str, len);
+ DeleteArray(str);
+ return result;
+ } else if (size_ > 0) {
+ return Deserialize(data_, size_);
+ }
+ return false;
+}
+
+
+bool Snapshot::WriteToFile(const char* snapshot_file) {
+ Serializer ser;
+ ser.Serialize();
+ char* str;
+ int len;
+ ser.Finalize(&str, &len);
+
+ int written = WriteChars(snapshot_file, str, len);
+
+ DeleteArray(str);
+ return written == len;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Used for building without snapshots.
+
+#include "v8.h"
+
+#include "snapshot.h"
+
+namespace v8 { namespace internal {
+
+char Snapshot::data_[] = { 0 };
+int Snapshot::size_ = 0;
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SNAPSHOT_H_
+#define V8_SNAPSHOT_H_
+
+namespace v8 { namespace internal {
+
+class Snapshot {
+ public:
+ // Initialize the VM from the given snapshot file. If snapshot_file is
+ // NULL, use the internal snapshot instead. Returns false if no snapshot
+ // could be found.
+ static bool Initialize(const char* snapshot_file = NULL);
+
+ // Disable the use of the internal snapshot.
+ static void DisableInternal() { size_ = 0; }
+
+ // Write snapshot to the given file. Returns true if snapshot was written
+ // successfully.
+ static bool WriteToFile(const char* snapshot_file);
+
+ private:
+ static char data_[];
+ static int size_;
+
+ static bool Deserialize(char* content, int len);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SNAPSHOT_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPACES_INL_H_
+#define V8_SPACES_INL_H_
+
+#include "memory.h"
+#include "spaces.h"
+
+namespace v8 { namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+
+bool HeapObjectIterator::has_next() {
+ if (cur_addr_ < cur_limit_) {
+ return true; // common case
+ }
+ ASSERT(cur_addr_ == cur_limit_);
+ return HasNextInNextPage(); // slow path
+}
+
+
+HeapObject* HeapObjectIterator::next() {
+ ASSERT(has_next());
+
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_limit_);
+
+ return obj;
+}
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+bool PageIterator::has_next() {
+ return cur_page_ != stop_page_;
+}
+
+
+Page* PageIterator::next() {
+ ASSERT(has_next());
+ Page* result = cur_page_;
+ cur_page_ = cur_page_->next_page();
+ return result;
+}
+
+
+// -----------------------------------------------------------------------------
+// Page
+
+Page* Page::next_page() {
+ return MemoryAllocator::GetNextPage(this);
+}
+
+
+Address Page::AllocationTop() {
+ PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ if (Heap::old_space() == owner) {
+ return Heap::old_space()->PageAllocationTop(this);
+ } else if (Heap::code_space() == owner) {
+ return Heap::code_space()->PageAllocationTop(this);
+ } else {
+ ASSERT(Heap::map_space() == owner);
+ return Heap::map_space()->PageAllocationTop(this);
+ }
+}
+
+
+void Page::ClearRSet() {
+ // This method can be called in all rset states.
+ memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
+}
+
+
+// Give an address a (32-bits):
+// | page address | words (6) | bit offset (5) | pointer alignment (2) |
+// The rset address is computed as:
+// page_address + words * 4
+
+Address Page::ComputeRSetBitPosition(Address address, int offset,
+ uint32_t* bitmask) {
+ ASSERT(Page::is_rset_in_use());
+
+ Page* page = Page::FromAddress(address);
+ uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
+ kObjectAlignmentBits);
+ *bitmask = 1 << (bit_offset % kBitsPerInt);
+
+ Address rset_address =
+ page->address() + (bit_offset / kBitsPerInt) * kIntSize;
+ // The remembered set address is either in the normal remembered set range
+ // of a page or else we have a large object page.
+ ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
+ || page->IsLargeObjectPage());
+
+ if (rset_address >= page->RSetEnd()) {
+ // We have a large object page, and the remembered set address is actually
+ // past the end of the object. The address of the remembered set in this
+ // case is the extra remembered set start address at the address of the
+ // end of the object:
+ // (page->ObjectAreaStart() + object size)
+ // plus the offset of the computed remembered set address from the start
+ // of the object:
+ // (rset_address - page->ObjectAreaStart()).
+ // Ie, we can just add the object size.
+ ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
+ rset_address +=
+ FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+ + Array::kLengthOffset));
+ }
+ return rset_address;
+}
+
+
+void Page::SetRSet(Address address, int offset) {
+ uint32_t bitmask = 0;
+ Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
+ Memory::uint32_at(rset_address) |= bitmask;
+
+ ASSERT(IsRSetSet(address, offset));
+}
+
+
+// Clears the corresponding remembered set bit for a given address.
+void Page::UnsetRSet(Address address, int offset) {
+ uint32_t bitmask = 0;
+ Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
+ Memory::uint32_at(rset_address) &= ~bitmask;
+
+ ASSERT(!IsRSetSet(address, offset));
+}
+
+
+bool Page::IsRSetSet(Address address, int offset) {
+ uint32_t bitmask = 0;
+ Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
+ return (Memory::uint32_at(rset_address) & bitmask) != 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+
+bool MemoryAllocator::IsValidChunk(int chunk_id) {
+ if (!IsValidChunkId(chunk_id)) return false;
+
+ ChunkInfo& c = chunks_[chunk_id];
+ return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
+}
+
+
+bool MemoryAllocator::IsValidChunkId(int chunk_id) {
+ return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
+}
+
+
+bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
+ ASSERT(p->is_valid());
+
+ int chunk_id = GetChunkId(p);
+ if (!IsValidChunkId(chunk_id)) return false;
+
+ ChunkInfo& c = chunks_[chunk_id];
+ return (c.address() <= p->address()) &&
+ (p->address() < c.address() + c.size()) &&
+ (space == c.owner());
+}
+
+
+Page* MemoryAllocator::GetNextPage(Page* p) {
+ ASSERT(p->is_valid());
+ int raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
+ return Page::FromAddress(AddressFrom<Address>(raw_addr));
+}
+
+
+int MemoryAllocator::GetChunkId(Page* p) {
+ ASSERT(p->is_valid());
+ return p->opaque_header & Page::kPageAlignmentMask;
+}
+
+
+void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
+ ASSERT(prev->is_valid());
+ int chunk_id = prev->opaque_header & Page::kPageAlignmentMask;
+ ASSERT_PAGE_ALIGNED(next->address());
+ prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
+}
+
+
+PagedSpace* MemoryAllocator::PageOwner(Page* page) {
+ int chunk_id = GetChunkId(page);
+ ASSERT(IsValidChunk(chunk_id));
+ return chunks_[chunk_id].owner();
+}
+
+
+// -----------------------------------------------------------------------------
+// Space
+
+bool PagedSpace::Contains(Address addr) {
+ Page* p = Page::FromAddress(addr);
+ ASSERT(p->is_valid());
+
+ return MemoryAllocator::IsPageInSpace(p, this);
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectChunk
+
+HeapObject* LargeObjectChunk::GetObject() {
+ // Round the chunk address up to the nearest page-aligned address
+ // and return the heap object in that page.
+ Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
+ return HeapObject::FromAddress(page->ObjectAreaStart());
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+
+int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
+ int extra_rset_bits =
+ RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
+ kBitsPerInt);
+ return extra_rset_bits / kBitsPerByte;
+}
+
+
+Object* NewSpace::AllocateRawInternal(int size_in_bytes,
+ AllocationInfo* alloc_info) {
+ Address new_top = alloc_info->top + size_in_bytes;
+ if (new_top > alloc_info->limit) {
+ return Failure::RetryAfterGC(size_in_bytes, NEW_SPACE);
+ }
+
+ Object* obj = HeapObject::FromAddress(alloc_info->top);
+ alloc_info->top = new_top;
+#ifdef DEBUG
+ SemiSpace* space =
+ (alloc_info == &allocation_info_) ? to_space_ : from_space_;
+ ASSERT(space->low() <= alloc_info->top
+ && alloc_info->top <= space->high()
+ && alloc_info->limit == space->high());
+#endif
+ return obj;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_SPACES_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "mark-compact.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+DECLARE_bool(heap_stats);
+DEFINE_bool(collect_heap_spill_statistics, false,
+ "report heap spill statistics along with heap_stats "
+ "(requires heap_stats)");
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+DECLARE_bool(log_gc);
+#endif
+
+// For paged spaces, top and limit should always be in the same page and top
+// should not be greater than limit.
+#define ASSERT_PAGED_ALLOCATION_INFO(info) \
+ ASSERT((Page::FromAllocationTop((info).top) == \
+ Page::FromAllocationTop((info).limit)) \
+ &&((info).top <= (info).limit))
+
+
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+ ASSERT((space)->low() <= (info).top \
+ && (info).top <= (space)->high() \
+ && (info).limit == (space)->high())
+
+// ----------------------------------------------------------------------------
+// SpaceIterator
+
+SpaceIterator::SpaceIterator() : current_space_(NEW_SPACE), iterator_(NULL) {
+ // SpaceIterator depends on AllocationSpace enumeration starts with NEW_SPACE.
+ ASSERT(NEW_SPACE == 0);
+}
+
+
+SpaceIterator::~SpaceIterator() {
+ // Delete active iterator if any.
+ if (iterator_ != NULL) delete iterator_;
+}
+
+
+bool SpaceIterator::has_next() {
+ // Iterate until no more spaces.
+ return current_space_ != LAST_SPACE;
+}
+
+
+ObjectIterator* SpaceIterator::next() {
+ if (iterator_ != NULL) {
+ delete iterator_;
+ iterator_ = NULL;
+ // Move to the next space
+ current_space_++;
+ if (current_space_ > LAST_SPACE) {
+ return NULL;
+ }
+ }
+
+ // Return iterator for the new current space.
+ return CreateIterator();
+}
+
+
+// Create an iterator for the space to iterate.
+ObjectIterator* SpaceIterator::CreateIterator() {
+ ASSERT(iterator_ == NULL);
+
+ switch (current_space_) {
+ case NEW_SPACE:
+ iterator_ = new SemiSpaceIterator(Heap::new_space());
+ break;
+ case OLD_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::old_space());
+ break;
+ case CODE_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::code_space());
+ break;
+ case MAP_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::map_space());
+ break;
+ case LO_SPACE:
+ iterator_ = new LargeObjectIterator(Heap::lo_space());
+ break;
+ }
+
+ // Return the newly allocated iterator;
+ ASSERT(iterator_ != NULL);
+ return iterator_;
+}
+
+
+// ----------------------------------------------------------------------------
+// HeapObjectIterator
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
+ Initialize(space->bottom(), space->top(), NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
+ HeapObjectCallback size_func) {
+ Initialize(space->bottom(), space->top(), size_func);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
+ Initialize(start, space->top(), NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
+ HeapObjectCallback size_func) {
+ Initialize(start, space->top(), size_func);
+}
+
+
+void HeapObjectIterator::Initialize(Address cur, Address end,
+ HeapObjectCallback size_f) {
+ cur_addr_ = cur;
+ end_addr_ = end;
+ end_page_ = Page::FromAllocationTop(end);
+ size_func_ = size_f;
+ Page* p = Page::FromAllocationTop(cur_addr_);
+ cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
+
+#ifdef DEBUG
+ Verify();
+#endif
+}
+
+
+bool HeapObjectIterator::HasNextInNextPage() {
+ if (cur_addr_ == end_addr_) return false;
+
+ Page* cur_page = Page::FromAllocationTop(cur_addr_);
+ cur_page = cur_page->next_page();
+ ASSERT(cur_page->is_valid());
+
+ cur_addr_ = cur_page->ObjectAreaStart();
+ cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
+
+ ASSERT(cur_addr_ < cur_limit_);
+#ifdef DEBUG
+ Verify();
+#endif
+ return true;
+}
+
+
+#ifdef DEBUG
+void HeapObjectIterator::Verify() {
+ Page* p = Page::FromAllocationTop(cur_addr_);
+ ASSERT(p == Page::FromAllocationTop(cur_limit_));
+ ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+PageIterator::PageIterator(PagedSpace* space, Mode mode) {
+ cur_page_ = space->first_page_;
+ switch (mode) {
+ case PAGES_IN_USE:
+ stop_page_ = space->AllocationTopPage()->next_page();
+ break;
+ case PAGES_USED_BY_MC:
+ stop_page_ = space->MCRelocationTopPage()->next_page();
+ break;
+ case ALL_PAGES:
+ stop_page_ = Page::FromAddress(NULL);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Page
+
+#ifdef DEBUG
+Page::RSetState Page::rset_state_ = Page::IN_USE;
+#endif
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+int MemoryAllocator::capacity_ = 0;
+int MemoryAllocator::size_ = 0;
+
+VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
+
+// 270 is an estimate based on the static default heap size of a pair of 256K
+// semispaces and a 64M old generation.
+const int kEstimatedNumberOfChunks = 270;
+List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
+ kEstimatedNumberOfChunks);
+List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
+int MemoryAllocator::max_nof_chunks_ = 0;
+int MemoryAllocator::top_ = 0;
+
+
+void MemoryAllocator::Push(int free_chunk_id) {
+ ASSERT(max_nof_chunks_ > 0);
+ ASSERT(top_ < max_nof_chunks_);
+ free_chunk_ids_[top_++] = free_chunk_id;
+}
+
+
+int MemoryAllocator::Pop() {
+ ASSERT(top_ > 0);
+ return free_chunk_ids_[--top_];
+}
+
+
+bool MemoryAllocator::Setup(int capacity) {
+ capacity_ = RoundUp(capacity, Page::kPageSize);
+
+ // Over-estimate the size of chunks_ array. It assumes the expansion of old
+ // space is always in the unit of a chunk (kChunkSize) except the last
+ // expansion.
+ //
+ // Due to alignment, allocated space might be one page less than required
+ // number (kPagesPerChunk) of pages for old spaces.
+ //
+ // Reserve two chunk ids for semispaces, one for map space and one for old
+ // space.
+ max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 4;
+ if (max_nof_chunks_ > kMaxNofChunks) return false;
+
+ size_ = 0;
+ ChunkInfo info; // uninitialized element.
+ for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
+ chunks_.Add(info);
+ free_chunk_ids_.Add(i);
+ }
+ top_ = max_nof_chunks_;
+ return true;
+}
+
+
+void MemoryAllocator::TearDown() {
+ for (int i = 0; i < max_nof_chunks_; i++) {
+ if (chunks_[i].address() != NULL) DeleteChunk(i);
+ }
+ chunks_.Clear();
+ free_chunk_ids_.Clear();
+
+ if (initial_chunk_ != NULL) {
+ LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
+ delete initial_chunk_;
+ initial_chunk_ = NULL;
+ }
+
+ ASSERT(top_ == max_nof_chunks_); // all chunks are free
+ top_ = 0;
+ capacity_ = 0;
+ size_ = 0;
+ max_nof_chunks_ = 0;
+}
+
+
+void* MemoryAllocator::AllocateRawMemory(const size_t requested,
+ size_t* allocated) {
+ if (size_ + static_cast<int>(requested) > capacity_) return NULL;
+
+ void* mem = OS::Allocate(requested, allocated);
+ int alloced = *allocated;
+ size_ += alloced;
+ Counters::memory_allocated.Increment(alloced);
+ return mem;
+}
+
+
+void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+ OS::Free(mem, length);
+ Counters::memory_allocated.Decrement(length);
+ size_ -= length;
+ ASSERT(size_ >= 0);
+}
+
+
+void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
+ ASSERT(initial_chunk_ == NULL);
+
+ initial_chunk_ = new VirtualMemory(requested);
+ CHECK(initial_chunk_ != NULL);
+ if (!initial_chunk_->IsReserved()) {
+ delete initial_chunk_;
+ initial_chunk_ = NULL;
+ return NULL;
+ }
+
+ // We are sure that we have mapped a block of requested addresses.
+ ASSERT(initial_chunk_->size() == requested);
+ LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
+ size_ += requested;
+ return initial_chunk_->address();
+}
+
+
+static int PagesInChunk(Address start, size_t size) {
+ // The first page starts on the first page-aligned address from start onward
+ // and the last page ends on the last page-aligned address before
+ // start+size. Page::kPageSize is a power of two so we can divide by
+ // shifting.
+ return (RoundDown(start + size, Page::kPageSize)
+ - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
+}
+
+
+Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
+ PagedSpace* owner) {
+ if (requested_pages <= 0) return Page::FromAddress(NULL);
+ size_t chunk_size = requested_pages * Page::kPageSize;
+
+ // There is not enough space to guarantee the desired number pages can be
+ // allocated.
+ if (size_ + static_cast<int>(chunk_size) > capacity_) {
+ // Request as many pages as we can.
+ chunk_size = capacity_ - size_;
+ requested_pages = chunk_size >> Page::kPageSizeBits;
+
+ if (requested_pages <= 0) return Page::FromAddress(NULL);
+ }
+
+ void* chunk = AllocateRawMemory(chunk_size, &chunk_size);
+ if (chunk == NULL) return Page::FromAddress(NULL);
+ LOG(NewEvent("PagedChunk", chunk, chunk_size));
+
+ *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+ if (*allocated_pages == 0) {
+ FreeRawMemory(chunk, chunk_size);
+ LOG(DeleteEvent("PagedChunk", chunk));
+ return Page::FromAddress(NULL);
+ }
+
+ int chunk_id = Pop();
+ chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
+
+ return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+}
+
+
+Page* MemoryAllocator::CommitPages(Address start, size_t size,
+ PagedSpace* owner, int* num_pages) {
+ ASSERT(start != NULL);
+ *num_pages = PagesInChunk(start, size);
+ ASSERT(*num_pages > 0);
+ ASSERT(initial_chunk_ != NULL);
+ ASSERT(initial_chunk_->address() <= start);
+ ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ + initial_chunk_->size());
+
+ if (!initial_chunk_->Commit(start, size)) {
+ return Page::FromAddress(NULL);
+ }
+ Counters::memory_allocated.Increment(size);
+
+ // So long as we correctly overestimated the number of chunks we should not
+ // run out of chunk ids.
+ CHECK(!OutOfChunkIds());
+ int chunk_id = Pop();
+ chunks_[chunk_id].init(start, size, owner);
+ return InitializePagesInChunk(chunk_id, *num_pages, owner);
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start, size_t size) {
+ ASSERT(start != NULL);
+ ASSERT(size > 0);
+ ASSERT(initial_chunk_ != NULL);
+ ASSERT(initial_chunk_->address() <= start);
+ ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ + initial_chunk_->size());
+
+ if (!initial_chunk_->Commit(start, size)) return false;
+ Counters::memory_allocated.Increment(size);
+ return true;
+}
+
+
+Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner) {
+ ASSERT(IsValidChunk(chunk_id));
+ ASSERT(pages_in_chunk > 0);
+
+ Address chunk_start = chunks_[chunk_id].address();
+
+ Address low = RoundUp(chunk_start, Page::kPageSize);
+
+#ifdef DEBUG
+ size_t chunk_size = chunks_[chunk_id].size();
+ Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+ ASSERT(pages_in_chunk <=
+ ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
+#endif
+
+ Address page_addr = low;
+ for (int i = 0; i < pages_in_chunk; i++) {
+ Page* p = Page::FromAddress(page_addr);
+ p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ p->is_normal_page = 1;
+ page_addr += Page::kPageSize;
+ }
+
+ // Set the next page of the last page to 0.
+ Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+ last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+ return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FreePages(Page* p) {
+ if (!p->is_valid()) return p;
+
+ // Find the first page in the same chunk as 'p'
+ Page* first_page = FindFirstPageInSameChunk(p);
+ Page* page_to_return = Page::FromAddress(NULL);
+
+ if (p != first_page) {
+ // Find the last page in the same chunk as 'prev'.
+ Page* last_page = FindLastPageInSameChunk(p);
+ first_page = GetNextPage(last_page); // first page in next chunk
+
+ // set the next_page of last_page to NULL
+ SetNextPage(last_page, Page::FromAddress(NULL));
+ page_to_return = p; // return 'p' when exiting
+ }
+
+ while (first_page->is_valid()) {
+ int chunk_id = GetChunkId(first_page);
+ ASSERT(IsValidChunk(chunk_id));
+
+ // Find the first page of the next chunk before deleting this chunk.
+ first_page = GetNextPage(FindLastPageInSameChunk(first_page));
+
+ // Free the current chunk.
+ DeleteChunk(chunk_id);
+ }
+
+ return page_to_return;
+}
+
+
+void MemoryAllocator::DeleteChunk(int chunk_id) {
+ ASSERT(IsValidChunk(chunk_id));
+
+ ChunkInfo& c = chunks_[chunk_id];
+
+ // We cannot free a chunk contained in the initial chunk because it was not
+ // allocated with AllocateRawMemory. Instead we uncommit the virtual
+ // memory.
+ bool in_initial_chunk = false;
+ if (initial_chunk_ != NULL) {
+ Address start = static_cast<Address>(initial_chunk_->address());
+ Address end = start + initial_chunk_->size();
+ in_initial_chunk = (start <= c.address()) && (c.address() < end);
+ }
+
+ if (in_initial_chunk) {
+ // TODO(1240712): VirtualMemory::Uncommit has a return value which
+ // is ignored here.
+ initial_chunk_->Uncommit(c.address(), c.size());
+ Counters::memory_allocated.Decrement(c.size());
+ } else {
+ LOG(DeleteEvent("PagedChunk", c.address()));
+ FreeRawMemory(c.address(), c.size());
+ }
+ c.init(NULL, 0, NULL);
+ Push(chunk_id);
+}
+
+
+Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
+ int chunk_id = GetChunkId(p);
+ ASSERT(IsValidChunk(chunk_id));
+
+ Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
+ return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
+ int chunk_id = GetChunkId(p);
+ ASSERT(IsValidChunk(chunk_id));
+
+ Address chunk_start = chunks_[chunk_id].address();
+ size_t chunk_size = chunks_[chunk_id].size();
+
+ Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+ ASSERT(chunk_start <= p->address() && p->address() < high);
+
+ return Page::FromAddress(high - Page::kPageSize);
+}
+
+
+#ifdef DEBUG
+void MemoryAllocator::ReportStatistics() {
+ float pct = static_cast<float>(capacity_ - size_) / capacity_;
+ PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
+ capacity_, size_, static_cast<int>(pct*100));
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// PagedSpace implementation
+
+PagedSpace::PagedSpace(int max_capacity, AllocationSpace id) {
+ ASSERT(id == OLD_SPACE || id == CODE_SPACE || id == MAP_SPACE);
+ max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
+ * Page::kObjectAreaSize;
+ identity_ = id;
+ accounting_stats_.Clear();
+
+ allocation_mode_ = LINEAR;
+
+ allocation_info_.top = NULL;
+ allocation_info_.limit = NULL;
+
+ mc_forwarding_info_.top = NULL;
+ mc_forwarding_info_.limit = NULL;
+}
+
+
+bool PagedSpace::Setup(Address start, size_t size) {
+ if (HasBeenSetup()) return false;
+
+ int num_pages = 0;
+ // Try to use the virtual memory range passed to us. If it is too small to
+ // contain at least one page, ignore it and allocate instead.
+ if (PagesInChunk(start, size) > 0) {
+ first_page_ = MemoryAllocator::CommitPages(start, size, this, &num_pages);
+ } else {
+ int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
+ max_capacity_ / Page::kObjectAreaSize);
+ first_page_ =
+ MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
+ if (!first_page_->is_valid()) return false;
+ }
+
+ // We are sure that the first page is valid and that we have at least one
+ // page.
+ ASSERT(first_page_->is_valid());
+ ASSERT(num_pages > 0);
+ accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
+ ASSERT(Capacity() <= max_capacity_);
+
+ for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+ p->ClearRSet();
+ }
+
+ // Use first_page_ for allocation.
+ SetAllocationInfo(&allocation_info_, first_page_);
+
+ return true;
+}
+
+
+bool PagedSpace::HasBeenSetup() {
+ return (Capacity() > 0);
+}
+
+
+void PagedSpace::TearDown() {
+ first_page_ = MemoryAllocator::FreePages(first_page_);
+ ASSERT(!first_page_->is_valid());
+
+ accounting_stats_.Clear();
+}
+
+
+void PagedSpace::ClearRSet() {
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ it.next()->ClearRSet();
+ }
+}
+
+
+Object* PagedSpace::FindObject(Address addr) {
+#ifdef DEBUG
+ // Note: this function can only be called before or after mark-compact GC
+ // because it accesses map pointers.
+ ASSERT(!MarkCompactCollector::in_use());
+#endif
+
+ if (!Contains(addr)) return Failure::Exception();
+
+ Page* p = Page::FromAddress(addr);
+ Address cur = p->ObjectAreaStart();
+ Address end = p->AllocationTop();
+ while (cur < end) {
+ HeapObject* obj = HeapObject::FromAddress(cur);
+ Address next = cur + obj->Size();
+ if ((cur <= addr) && (addr < next)) return obj;
+ cur = next;
+ }
+
+ return Failure::Exception();
+}
+
+
+void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
+ alloc_info->top = p->ObjectAreaStart();
+ alloc_info->limit = p->ObjectAreaEnd();
+ ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+}
+
+
+void PagedSpace::MCResetRelocationInfo() {
+ // Set page indexes.
+ int i = 0;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ Page* p = it.next();
+ p->mc_page_index = i++;
+ }
+
+ // Set mc_forwarding_info_ to the first page in the space.
+ SetAllocationInfo(&mc_forwarding_info_, first_page_);
+ // All the bytes in the space are 'available'. We will rediscover
+ // allocated and wasted bytes during GC.
+ accounting_stats_.Reset();
+}
+
+
+void PagedSpace::SetLinearAllocationOnly(bool linear_only) {
+ if (linear_only) {
+ // Note that the free_list is not cleared. If we switch back to
+ // FREE_LIST mode it will be available for use. Resetting it
+ // requires correct accounting for the wasted bytes.
+ allocation_mode_ = LINEAR_ONLY;
+ } else {
+ ASSERT(allocation_mode_ == LINEAR_ONLY);
+ allocation_mode_ = LINEAR;
+ }
+}
+
+
+int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
+#ifdef DEBUG
+ // The Contains function considers the address at the beginning of a
+ // page in the page, MCSpaceOffsetForAddress considers it is in the
+ // previous page.
+ if (Page::IsAlignedToPageSize(addr)) {
+ ASSERT(Contains(addr - kPointerSize));
+ } else {
+ ASSERT(Contains(addr));
+ }
+#endif
+
+ // If addr is at the end of a page, it belongs to previous page
+ Page* p = Page::IsAlignedToPageSize(addr)
+ ? Page::FromAllocationTop(addr)
+ : Page::FromAddress(addr);
+ int index = p->mc_page_index;
+ return (index * Page::kPageSize) + p->Offset(addr);
+}
+
+
+bool PagedSpace::Expand(Page* last_page) {
+ ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
+ ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+
+ if (Capacity() == max_capacity_) return false;
+
+ ASSERT(Capacity() < max_capacity_);
+ // Last page must be valid and its next page is invalid.
+ ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
+
+ int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
+ if (available_pages <= 0) return false;
+
+ int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
+ Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
+ if (!p->is_valid()) return false;
+
+ accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
+ ASSERT(Capacity() <= max_capacity_);
+
+ MemoryAllocator::SetNextPage(last_page, p);
+
+ // Clear remembered set of new pages.
+ while (p->is_valid()) {
+ p->ClearRSet();
+ p = p->next_page();
+ }
+
+ return true;
+}
+
+
+#ifdef DEBUG
+int PagedSpace::CountTotalPages() {
+ int count = 0;
+ for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+ count++;
+ }
+ return count;
+}
+#endif
+
+
+void PagedSpace::Shrink() {
+ // Release half of free pages.
+ Page* top_page = AllocationTopPage();
+ ASSERT(top_page->is_valid());
+
+ // Loop over the pages from the top page to the end of the space to count
+ // the number of pages to keep and find the last page to keep.
+ int free_pages = 0;
+ int pages_to_keep = 0; // Of the free pages.
+ Page* last_page_to_keep = top_page;
+ Page* current_page = top_page->next_page();
+ // Loop over the pages to the end of the space.
+ while (current_page->is_valid()) {
+ // Keep every odd-numbered page, one page for every two in the space.
+ if ((free_pages & 0x1) == 1) {
+ pages_to_keep++;
+ last_page_to_keep = last_page_to_keep->next_page();
+ }
+ free_pages++;
+ current_page = current_page->next_page();
+ }
+
+ // Free pages after last_page_to_keep, and adjust the next_page link.
+ Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
+ MemoryAllocator::SetNextPage(last_page_to_keep, p);
+
+ // Since pages are only freed in whole chunks, we may have kept more than
+ // pages_to_keep.
+ while (p->is_valid()) {
+ pages_to_keep++;
+ p = p->next_page();
+ }
+
+ // The difference between free_pages and pages_to_keep is the number of
+ // pages actually freed.
+ ASSERT(pages_to_keep <= free_pages);
+ int bytes_freed = (free_pages - pages_to_keep) * Page::kObjectAreaSize;
+ accounting_stats_.ShrinkSpace(bytes_freed);
+
+ ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
+}
+
+
+bool PagedSpace::EnsureCapacity(int capacity) {
+ if (Capacity() >= capacity) return true;
+
+ // Start from the allocation top and loop to the last page in the space.
+ Page* last_page = AllocationTopPage();
+ Page* next_page = last_page->next_page();
+ while (next_page->is_valid()) {
+ last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
+ next_page = last_page->next_page();
+ }
+
+ // Expand the space until it has the required capacity or expansion fails.
+ do {
+ if (!Expand(last_page)) return false;
+ ASSERT(last_page->next_page()->is_valid());
+ last_page =
+ MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
+ } while (Capacity() < capacity);
+
+ return true;
+}
+
+
+#ifdef DEBUG
+void PagedSpace::Print() { }
+#endif
+
+
+// -----------------------------------------------------------------------------
+// NewSpace implementation
+
+NewSpace::NewSpace(int initial_semispace_capacity,
+ int maximum_semispace_capacity) {
+ ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
+ ASSERT(IsPowerOf2(maximum_semispace_capacity));
+ maximum_capacity_ = maximum_semispace_capacity;
+ capacity_ = initial_semispace_capacity;
+ to_space_ = new SemiSpace(capacity_, maximum_capacity_);
+ from_space_ = new SemiSpace(capacity_, maximum_capacity_);
+
+ // Allocate and setup the histogram arrays if necessary.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+ promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+
+#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
+ promoted_histogram_[name].set_name(#name);
+ INSTANCE_TYPE_LIST(SET_NAME)
+#undef SET_NAME
+#endif
+}
+
+
+bool NewSpace::Setup(Address start, int size) {
+ ASSERT(size == 2 * maximum_capacity_);
+ ASSERT(IsAddressAligned(start, size, 0));
+
+ if (to_space_ == NULL
+ || !to_space_->Setup(start, maximum_capacity_)) {
+ return false;
+ }
+ if (from_space_ == NULL
+ || !from_space_->Setup(start + maximum_capacity_, maximum_capacity_)) {
+ return false;
+ }
+
+ start_ = start;
+ address_mask_ = ~(size - 1);
+ object_mask_ = address_mask_ | kHeapObjectTag;
+ object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
+
+ allocation_info_.top = to_space_->low();
+ allocation_info_.limit = to_space_->high();
+ mc_forwarding_info_.top = NULL;
+ mc_forwarding_info_.limit = NULL;
+
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ return true;
+}
+
+
+void NewSpace::TearDown() {
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ if (allocated_histogram_) {
+ DeleteArray(allocated_histogram_);
+ allocated_histogram_ = NULL;
+ }
+ if (promoted_histogram_) {
+ DeleteArray(promoted_histogram_);
+ promoted_histogram_ = NULL;
+ }
+#endif
+
+ start_ = NULL;
+ capacity_ = 0;
+ allocation_info_.top = NULL;
+ allocation_info_.limit = NULL;
+ mc_forwarding_info_.top = NULL;
+ mc_forwarding_info_.limit = NULL;
+
+ if (to_space_ != NULL) {
+ to_space_->TearDown();
+ delete to_space_;
+ to_space_ = NULL;
+ }
+
+ if (from_space_ != NULL) {
+ from_space_->TearDown();
+ delete from_space_;
+ from_space_ = NULL;
+ }
+}
+
+
+void NewSpace::Flip() {
+ SemiSpace* tmp = from_space_;
+ from_space_ = to_space_;
+ to_space_ = tmp;
+}
+
+
+bool NewSpace::Double() {
+ ASSERT(capacity_ <= maximum_capacity_ / 2);
+ // TODO(1240712): Failure to double the from space can result in
+ // semispaces of different sizes. In the event of that failure, the
+ // to space doubling should be rolled back before returning false.
+ if (!to_space_->Double() || !from_space_->Double()) return false;
+ capacity_ *= 2;
+ allocation_info_.limit = to_space_->high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ return true;
+}
+
+
+void NewSpace::ResetAllocationInfo() {
+ allocation_info_.top = to_space_->low();
+ allocation_info_.limit = to_space_->high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::MCResetRelocationInfo() {
+ mc_forwarding_info_.top = from_space_->low();
+ mc_forwarding_info_.limit = from_space_->high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
+}
+
+
+void NewSpace::MCCommitRelocationInfo() {
+ // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
+ // valid allocation info for the to space.
+ allocation_info_.top = mc_forwarding_info_.top;
+ allocation_info_.limit = to_space_->high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+#ifdef DEBUG
+// We do not use the SemispaceIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void NewSpace::Verify() {
+ // The allocation pointer should be in the space or at the very end.
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ // There should be objects packed in from the low address up to the
+ // allocation pointer.
+ Address current = to_space_->low();
+ while (current < top()) {
+ HeapObject* object = HeapObject::FromAddress(current);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(Heap::map_space()->Contains(map));
+
+ // The object should not be code or a map.
+ ASSERT(!object->IsMap());
+ ASSERT(!object->IsCode());
+
+ // The object itself should look OK.
+ object->Verify();
+
+ // All the interior pointers should be contained in the heap.
+ VerifyPointersVisitor visitor;
+ int size = object->Size();
+ object->IterateBody(map->instance_type(), size, &visitor);
+
+ current += size;
+ }
+
+ // The allocation pointer should not be in the middle of an object.
+ ASSERT(current == top());
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpace implementation
+
+SemiSpace::SemiSpace(int initial_capacity, int maximum_capacity)
+ : capacity_(initial_capacity), maximum_capacity_(maximum_capacity),
+ start_(NULL), age_mark_(NULL) {
+}
+
+
+bool SemiSpace::Setup(Address start, int size) {
+ ASSERT(size == maximum_capacity_);
+ if (!MemoryAllocator::CommitBlock(start, capacity_)) return false;
+
+ start_ = start;
+ address_mask_ = ~(size - 1);
+ object_mask_ = address_mask_ | kHeapObjectTag;
+ object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
+
+ age_mark_ = start_;
+ return true;
+}
+
+
+void SemiSpace::TearDown() {
+ start_ = NULL;
+ capacity_ = 0;
+}
+
+
+bool SemiSpace::Double() {
+ if (!MemoryAllocator::CommitBlock(high(), capacity_)) return false;
+ capacity_ *= 2;
+ return true;
+}
+
+
+#ifdef DEBUG
+void SemiSpace::Print() { }
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpaceIterator implementation.
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+ Initialize(space, space->bottom(), space->top(), NULL);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
+ HeapObjectCallback size_func) {
+ Initialize(space, space->bottom(), space->top(), size_func);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
+ Initialize(space, start, space->top(), NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
+ Address end,
+ HeapObjectCallback size_func) {
+ ASSERT(space->ToSpaceContains(start));
+ ASSERT(space->ToSpaceLow() <= end
+ && end <= space->ToSpaceHigh());
+ space_ = space->to_space_;
+ current_ = start;
+ limit_ = end;
+ size_func_ = size_func;
+}
+
+
+#ifdef DEBUG
+// A static array of histogram info for each type.
+static HistogramInfo heap_histograms[LAST_TYPE+1];
+static JSObject::SpillInformation js_spill_information;
+
+// heap_histograms is shared, always clear it before using it.
+static void ClearHistograms() {
+ // We reset the name each time, though it hasn't changed.
+#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
+ INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
+ INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
+#undef CLEAR_HISTOGRAM
+
+ js_spill_information.Clear();
+}
+
+
+static int code_kind_statistics[Code::NUMBER_OF_KINDS];
+
+
+static void ClearCodeKindStatistics() {
+ for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+ code_kind_statistics[i] = 0;
+ }
+}
+
+
+static void ReportCodeKindStatistics() {
+ const char* table[Code::NUMBER_OF_KINDS];
+
+#define CASE(name) \
+ case Code::name: table[Code::name] = #name; \
+ break
+
+ for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+ switch (static_cast<Code::Kind>(i)) {
+ CASE(FUNCTION);
+ CASE(STUB);
+ CASE(BUILTIN);
+ CASE(LOAD_IC);
+ CASE(KEYED_LOAD_IC);
+ CASE(STORE_IC);
+ CASE(KEYED_STORE_IC);
+ CASE(CALL_IC);
+ }
+ }
+
+#undef CASE
+
+ PrintF("\n Code kind histograms: \n");
+ for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+ if (code_kind_statistics[i] > 0) {
+ PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
+ }
+ }
+ PrintF("\n");
+}
+
+
+static int CollectHistogramInfo(HeapObject* obj) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ ASSERT(heap_histograms[type].name() != NULL);
+ heap_histograms[type].increment_number(1);
+ heap_histograms[type].increment_bytes(obj->Size());
+
+ if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
+ JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
+ }
+
+ return obj->Size();
+}
+
+
+static void ReportHistogram(bool print_spill) {
+ PrintF("\n Object Histogram:\n");
+ for (int i = 0; i <= LAST_TYPE; i++) {
+ if (heap_histograms[i].number() > 0) {
+ PrintF(" %-33s%10d (%10d bytes)\n",
+ heap_histograms[i].name(),
+ heap_histograms[i].number(),
+ heap_histograms[i].bytes());
+ }
+ }
+ PrintF("\n");
+
+ // Summarize string types.
+ int string_number = 0;
+ int string_bytes = 0;
+#define INCREMENT(type, size, name) \
+ string_number += heap_histograms[type].number(); \
+ string_bytes += heap_histograms[type].bytes();
+ STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+ if (string_number > 0) {
+ PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
+ string_bytes);
+ }
+
+ if (FLAG_collect_heap_spill_statistics && print_spill) {
+ js_spill_information.Print();
+ }
+}
+#endif // DEBUG
+
+
+// Support for statistics gathering for --heap-stats and --log-gc.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+void NewSpace::ClearHistograms() {
+ for (int i = 0; i <= LAST_TYPE; i++) {
+ allocated_histogram_[i].clear();
+ promoted_histogram_[i].clear();
+ }
+}
+
+// Because the copying collector does not touch garbage objects, we iterate
+// the new space before a collection to get a histogram of allocated objects.
+// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
+// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
+// flag is set.
+void NewSpace::CollectStatistics() {
+ ClearHistograms();
+ SemiSpaceIterator it(this);
+ while (it.has_next()) RecordAllocation(it.next());
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static void DoReportStatistics(HistogramInfo* info, const char* description) {
+ LOG(HeapSampleBeginEvent("NewSpace", description));
+ // Lump all the string types together.
+ int string_number = 0;
+ int string_bytes = 0;
+#define INCREMENT(type, size, name) \
+ string_number += info[type].number(); \
+ string_bytes += info[type].bytes();
+ STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+ if (string_number > 0) {
+ LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+ }
+
+ // Then do the other types.
+ for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+ if (info[i].number() > 0) {
+ LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ info[i].bytes()));
+ }
+ }
+ LOG(HeapSampleEndEvent("NewSpace", description));
+}
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+
+void NewSpace::ReportStatistics() {
+#ifdef DEBUG
+ if (FLAG_heap_stats) {
+ float pct = static_cast<float>(Available()) / Capacity();
+ PrintF(" capacity: %d, available: %d, %%%d\n",
+ Capacity(), Available(), static_cast<int>(pct*100));
+ PrintF("\n Object Histogram:\n");
+ for (int i = 0; i <= LAST_TYPE; i++) {
+ if (allocated_histogram_[i].number() > 0) {
+ PrintF(" %-33s%10d (%10d bytes)\n",
+ allocated_histogram_[i].name(),
+ allocated_histogram_[i].number(),
+ allocated_histogram_[i].bytes());
+ }
+ }
+ PrintF("\n");
+ }
+#endif // DEBUG
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (FLAG_log_gc) {
+ DoReportStatistics(allocated_histogram_, "allocated");
+ DoReportStatistics(promoted_histogram_, "promoted");
+ }
+#endif // ENABLE_LOGGING_AND_PROFILING
+}
+
+
+void NewSpace::RecordAllocation(HeapObject* obj) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ allocated_histogram_[type].increment_number(1);
+ allocated_histogram_[type].increment_bytes(obj->Size());
+}
+
+
+void NewSpace::RecordPromotion(HeapObject* obj) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ promoted_histogram_[type].increment_number(1);
+ promoted_histogram_[type].increment_bytes(obj->Size());
+}
+#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces implementation
+
+void FreeListNode::set_size(int size_in_bytes) {
+ ASSERT(size_in_bytes > 0);
+ ASSERT(IsAligned(size_in_bytes, kPointerSize));
+
+ // We write a map and possibly size information to the block. If the block
+ // is big enough to be a ByteArray with at least one extra word (the next
+ // pointer), we set its map to be the byte array map and its size to an
+ // appropriate array length for the desired size from HeapObject::Size().
+ // If the block is too small (eg, one or two words), to hold both a size
+ // field and a next pointer, we give it a filler map that gives it the
+ // correct size.
+ if (size_in_bytes > Array::kHeaderSize) {
+ set_map(Heap::byte_array_map());
+ ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
+ } else if (size_in_bytes == kPointerSize) {
+ set_map(Heap::one_word_filler_map());
+ } else if (size_in_bytes == 2 * kPointerSize) {
+ set_map(Heap::two_word_filler_map());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+Address FreeListNode::next() {
+ ASSERT(map() == Heap::byte_array_map());
+ return Memory::Address_at(address() + kNextOffset);
+}
+
+
+void FreeListNode::set_next(Address next) {
+ ASSERT(map() == Heap::byte_array_map());
+ Memory::Address_at(address() + kNextOffset) = next;
+}
+
+
+OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
+ Reset();
+}
+
+
+void OldSpaceFreeList::Reset() {
+ available_ = 0;
+ for (int i = 0; i < kFreeListsLength; i++) {
+ free_[i].head_node_ = NULL;
+ }
+ needs_rebuild_ = false;
+ finger_ = kHead;
+ free_[kHead].next_size_ = kEnd;
+}
+
+
+void OldSpaceFreeList::RebuildSizeList() {
+ ASSERT(needs_rebuild_);
+ int cur = kHead;
+ for (int i = cur + 1; i < kFreeListsLength; i++) {
+ if (free_[i].head_node_ != NULL) {
+ free_[cur].next_size_ = i;
+ cur = i;
+ }
+ }
+ free_[cur].next_size_ = kEnd;
+ needs_rebuild_ = false;
+}
+
+
+int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
+#ifdef DEBUG
+ for (int i = 0; i < size_in_bytes; i += kPointerSize) {
+ Memory::Address_at(start + i) = kZapValue;
+ }
+#endif
+ FreeListNode* node = FreeListNode::FromAddress(start);
+ node->set_size(size_in_bytes);
+
+ // Early return to drop too-small blocks on the floor (one or two word
+ // blocks cannot hold a map pointer, a size field, and a pointer to the
+ // next block in the free list).
+ if (size_in_bytes < kMinBlockSize) {
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of an exact free list.
+ int index = size_in_bytes >> kPointerSizeLog2;
+ node->set_next(free_[index].head_node_);
+ free_[index].head_node_ = node->address();
+ available_ += size_in_bytes;
+ needs_rebuild_ = true;
+ return 0;
+}
+
+
+Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
+ ASSERT(0 < size_in_bytes);
+ ASSERT(size_in_bytes <= kMaxBlockSize);
+ ASSERT(IsAligned(size_in_bytes, kPointerSize));
+
+ if (needs_rebuild_) RebuildSizeList();
+ int index = size_in_bytes >> kPointerSizeLog2;
+ // Check for a perfect fit.
+ if (free_[index].head_node_ != NULL) {
+ FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
+ // If this was the last block of its size, remove the size.
+ if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
+ available_ -= size_in_bytes;
+ *wasted_bytes = 0;
+ return node;
+ }
+ // Search the size list for the best fit.
+ int prev = finger_ < index ? finger_ : kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(index < cur);
+ if (cur == kEnd) {
+ // No large enough size in list.
+ *wasted_bytes = 0;
+ return Failure::RetryAfterGC(size_in_bytes, owner_);
+ }
+ int rem = cur - index;
+ int rem_bytes = rem << kPointerSizeLog2;
+ FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
+ FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
+ size_in_bytes);
+ // Distinguish the cases prev < rem < cur and rem <= prev < cur
+ // to avoid many redundant tests and calls to Insert/RemoveSize.
+ if (prev < rem) {
+ // Simple case: insert rem between prev and cur.
+ finger_ = prev;
+ free_[prev].next_size_ = rem;
+ // If this was the last block of size cur, remove the size.
+ if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+ free_[rem].next_size_ = free_[cur].next_size_;
+ } else {
+ free_[rem].next_size_ = cur;
+ }
+ // Add the remainder block.
+ rem_node->set_size(rem_bytes);
+ rem_node->set_next(free_[rem].head_node_);
+ free_[rem].head_node_ = rem_node->address();
+ } else {
+ // If this was the last block of size cur, remove the size.
+ if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+ finger_ = prev;
+ free_[prev].next_size_ = free_[cur].next_size_;
+ }
+ if (rem_bytes < kMinBlockSize) {
+ // Too-small remainder is wasted.
+ rem_node->set_size(rem_bytes);
+ available_ -= size_in_bytes + rem_bytes;
+ *wasted_bytes = rem_bytes;
+ return cur_node;
+ }
+ // Add the remainder block and, if needed, insert its size.
+ rem_node->set_size(rem_bytes);
+ rem_node->set_next(free_[rem].head_node_);
+ free_[rem].head_node_ = rem_node->address();
+ if (rem_node->next() == NULL) InsertSize(rem);
+ }
+ available_ -= size_in_bytes;
+ *wasted_bytes = 0;
+ return cur_node;
+}
+
+
+MapSpaceFreeList::MapSpaceFreeList() {
+ Reset();
+}
+
+
+void MapSpaceFreeList::Reset() {
+ available_ = 0;
+ head_ = NULL;
+}
+
+
+void MapSpaceFreeList::Free(Address start) {
+#ifdef DEBUG
+ for (int i = 0; i < Map::kSize; i += kPointerSize) {
+ Memory::Address_at(start + i) = kZapValue;
+ }
+#endif
+ FreeListNode* node = FreeListNode::FromAddress(start);
+ node->set_size(Map::kSize);
+ node->set_next(head_);
+ head_ = node->address();
+ available_ += Map::kSize;
+}
+
+
+Object* MapSpaceFreeList::Allocate() {
+ if (head_ == NULL) {
+ return Failure::RetryAfterGC(Map::kSize, MAP_SPACE);
+ }
+
+ FreeListNode* node = FreeListNode::FromAddress(head_);
+ head_ = node->next();
+ available_ -= Map::kSize;
+ return node;
+}
+
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
+void OldSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ // Reset relocation info. During a compacting collection, everything in
+ // the space is considered 'available' and we will rediscover live data
+ // and waste during the collection.
+ MCResetRelocationInfo();
+ mc_end_of_relocation_ = bottom();
+ ASSERT(Available() == Capacity());
+ } else {
+ // During a non-compacting collection, everything below the linear
+ // allocation pointer is considered allocated (everything above is
+ // available) and we will rediscover available and wasted bytes during
+ // the collection.
+ accounting_stats_.AllocateBytes(free_list_.available());
+ accounting_stats_.FillWastedBytes(Waste());
+ }
+
+ // Clear the free list and switch to linear allocation if we are in FREE_LIST
+ free_list_.Reset();
+ if (allocation_mode_ == FREE_LIST) allocation_mode_ = LINEAR;
+}
+
+
+void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
+ ASSERT(Contains(address));
+ Address current_top = mc_end_of_relocation_;
+ Page* current_page = Page::FromAllocationTop(current_top);
+
+ // No more objects relocated to this page? Move to the next.
+ ASSERT(current_top <= current_page->mc_relocation_top);
+ if (current_top == current_page->mc_relocation_top) {
+ // The space should already be properly expanded.
+ Page* next_page = current_page->next_page();
+ CHECK(next_page->is_valid());
+ mc_end_of_relocation_ = next_page->ObjectAreaStart();
+ }
+ ASSERT(mc_end_of_relocation_ == address);
+ mc_end_of_relocation_ += size_in_bytes;
+}
+
+
+void OldSpace::MCCommitRelocationInfo() {
+ // Update fast allocation info.
+ allocation_info_.top = mc_forwarding_info_.top;
+ allocation_info_.limit = mc_forwarding_info_.limit;
+ ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+
+ // The space is compacted and we haven't yet built free lists or
+ // wasted any space.
+ ASSERT(Waste() == 0);
+ ASSERT(AvailableFree() == 0);
+
+ // Build the free list for the space.
+ int computed_size = 0;
+ PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+ while (it.has_next()) {
+ Page* p = it.next();
+ // Space below the relocation pointer is allocated.
+ computed_size += p->mc_relocation_top - p->ObjectAreaStart();
+ if (it.has_next()) {
+ // Free the space at the top of the page. We cannot use
+ // p->mc_relocation_top after the call to Free (because Free will clear
+ // remembered set bits).
+ int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
+ if (extra_size > 0) {
+ int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
+ // The bytes we have just "freed" to add to the free list were
+ // already accounted as available.
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
+ }
+ }
+
+ // Make sure the computed size - based on the used portion of the pages in
+ // use - matches the size obtained while computing forwarding addresses.
+ ASSERT(computed_size == Size());
+}
+
+
+Object* OldSpace::AllocateRawInternal(int size_in_bytes,
+ AllocationInfo* alloc_info) {
+ ASSERT(HasBeenSetup());
+
+ if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
+ // Try linear allocation in the current page.
+ Address cur_top = alloc_info->top;
+ Address new_top = cur_top + size_in_bytes;
+ if (new_top <= alloc_info->limit) {
+ Object* obj = HeapObject::FromAddress(cur_top);
+ alloc_info->top = new_top;
+ ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ ASSERT(Size() <= Capacity());
+ return obj;
+ }
+ } else {
+ // For now we should not try free list allocation during m-c relocation.
+ ASSERT(alloc_info == &allocation_info_);
+ int wasted_bytes;
+ Object* object = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ if (!object->IsFailure()) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return object;
+ }
+ }
+ // Fast allocation failed.
+ return SlowAllocateRaw(size_in_bytes, alloc_info);
+}
+
+
+// Slow cases for AllocateRawInternal. In linear allocation mode, try
+// to allocate in the next page in the space. If there are no more
+// pages, switch to free-list allocation if permitted, otherwise try
+// to grow the space. In free-list allocation mode, try to grow the
+// space and switch to linear allocation.
+Object* OldSpace::SlowAllocateRaw(int size_in_bytes,
+ AllocationInfo* alloc_info) {
+ if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
+ Page* top_page = TopPageOf(*alloc_info);
+ // Until we implement free-list allocation during global gc, we have two
+ // cases: one for normal allocation and one for m-c relocation allocation.
+ if (alloc_info == &allocation_info_) { // Normal allocation.
+ int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
+ // Add the extra space at the top of this page to the free list.
+ if (free_size > 0) {
+ int wasted_bytes = free_list_.Free(alloc_info->top, free_size);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ alloc_info->top += free_size;
+ ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+ }
+
+ // Move to the next page in this space if there is one; switch
+ // to free-list allocation, if we can; try to expand the space otherwise
+ if (top_page->next_page()->is_valid()) {
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ } else if (allocation_mode_ == LINEAR) {
+ allocation_mode_ = FREE_LIST;
+ } else if (Expand(top_page)) {
+ ASSERT(top_page->next_page()->is_valid());
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ } else {
+ return Failure::RetryAfterGC(size_in_bytes, identity());
+ }
+ } else { // Allocation during m-c relocation.
+ // During m-c 'allocation' while computing forwarding addresses, we do
+ // not yet add blocks to the free list because they still contain live
+ // objects. We also cache the m-c forwarding allocation pointer in the
+ // current page.
+
+ // If there are no more pages try to expand the space. This can only
+ // happen when promoting objects from the new space.
+ if (!top_page->next_page()->is_valid()) {
+ if (!Expand(top_page)) {
+ return Failure::RetryAfterGC(size_in_bytes, identity());
+ }
+ }
+
+ // Move to the next page.
+ ASSERT(top_page->next_page()->is_valid());
+ top_page->mc_relocation_top = alloc_info->top;
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ }
+ } else { // Free-list allocation.
+ // We failed to allocate from the free list; try to expand the space and
+ // switch back to linear allocation.
+ ASSERT(alloc_info == &allocation_info_);
+ Page* top_page = TopPageOf(*alloc_info);
+ if (!top_page->next_page()->is_valid()) {
+ if (!Expand(top_page)) {
+ return Failure::RetryAfterGC(size_in_bytes, identity());
+ }
+ }
+
+ // We surely have more pages, move to the next page and switch to linear
+ // allocation.
+ ASSERT(top_page->next_page()->is_valid());
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ ASSERT(allocation_mode_ == FREE_LIST);
+ allocation_mode_ = LINEAR;
+ }
+
+ // Perform the allocation.
+ return AllocateRawInternal(size_in_bytes, alloc_info);
+}
+
+
+#ifdef DEBUG
+// We do not assume that the PageIterator works, because it depends on the
+// invariants we are checking during verification.
+void OldSpace::Verify() {
+ // The allocation pointer should be valid, and it should be in a page in the
+ // space.
+ ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+ Page* top_page = Page::FromAllocationTop(allocation_info_.top);
+ ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
+
+ // Loop over all the pages.
+ bool above_allocation_top = false;
+ Page* current_page = first_page_;
+ while (current_page->is_valid()) {
+ if (above_allocation_top) {
+ // We don't care what's above the allocation top.
+ } else {
+ // Unless this is the last page in the space containing allocated
+ // objects, the allocation top should be at the object area end.
+ Address top = current_page->AllocationTop();
+ if (current_page == top_page) {
+ ASSERT(top == allocation_info_.top);
+ // The next page will be above the allocation top.
+ above_allocation_top = true;
+ } else {
+ ASSERT(top == current_page->ObjectAreaEnd());
+ }
+
+ // It should be packed with objects from the bottom to the top.
+ Address current = current_page->ObjectAreaStart();
+ while (current < top) {
+ HeapObject* object = HeapObject::FromAddress(current);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(Heap::map_space()->Contains(map));
+
+ // The object should not be a map.
+ ASSERT(!object->IsMap());
+
+ // The object itself should look OK.
+ // This is blocked by bug #1006953.
+ // object->Verify();
+
+ // All the interior pointers should be contained in the heap and have
+ // their remembered set bits set if they point to new space. Code
+ // objects do not have remembered set bits that we care about.
+ VerifyPointersAndRSetVisitor rset_visitor;
+ VerifyPointersVisitor no_rset_visitor;
+ int size = object->Size();
+ if (object->IsCode()) {
+ Code::cast(object)->ConvertICTargetsFromAddressToObject();
+ object->IterateBody(map->instance_type(), size, &no_rset_visitor);
+ Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+ } else {
+ object->IterateBody(map->instance_type(), size, &rset_visitor);
+ }
+
+ current += size;
+ }
+
+ // The allocation pointer should not be in the middle of an object.
+ ASSERT(current == top);
+ }
+
+ current_page = current_page->next_page();
+ }
+}
+
+
+struct CommentStatistic {
+ const char* comment;
+ int size;
+ int count;
+ void Clear() {
+ comment = NULL;
+ size = 0;
+ count = 0;
+ }
+};
+
+
+// must be small, since an iteration is used for lookup
+const int kMaxComments = 64;
+static CommentStatistic comments_statistics[kMaxComments+1];
+
+
+void PagedSpace::ReportCodeStatistics() {
+ ReportCodeKindStatistics();
+ PrintF("Code comment statistics (\" [ comment-txt : size/ "
+ "count (average)\"):\n");
+ for (int i = 0; i <= kMaxComments; i++) {
+ const CommentStatistic& cs = comments_statistics[i];
+ if (cs.size > 0) {
+ PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
+ cs.size/cs.count);
+ }
+ }
+ PrintF("\n");
+}
+
+
+void PagedSpace::ResetCodeStatistics() {
+ ClearCodeKindStatistics();
+ for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
+ comments_statistics[kMaxComments].comment = "Unknown";
+ comments_statistics[kMaxComments].size = 0;
+ comments_statistics[kMaxComments].count = 0;
+}
+
+
+// Adds comment to 'comment_statistics' table. Performance OK sa long as
+// 'kMaxComments' is small
+static void EnterComment(const char* comment, int delta) {
+ // Do not count empty comments
+ if (delta <= 0) return;
+ CommentStatistic* cs = &comments_statistics[kMaxComments];
+ // Search for a free or matching entry in 'comments_statistics': 'cs'
+ // points to result.
+ for (int i = 0; i < kMaxComments; i++) {
+ if (comments_statistics[i].comment == NULL) {
+ cs = &comments_statistics[i];
+ cs->comment = comment;
+ break;
+ } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
+ cs = &comments_statistics[i];
+ break;
+ }
+ }
+ // Update entry for 'comment'
+ cs->size += delta;
+ cs->count += 1;
+}
+
+
+// Call for each nested comment start (start marked with '[ xxx', end marked
+// with ']'. RelocIterator 'it' must point to a comment reloc info.
+static void CollectCommentStatistics(RelocIterator* it) {
+ ASSERT(!it->done());
+ ASSERT(it->rinfo()->rmode() == comment);
+ const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
+ if (tmp[0] != '[') {
+ // Not a nested comment; skip
+ return;
+ }
+
+ // Search for end of nested comment or a new nested comment
+ const char* const comment_txt =
+ reinterpret_cast<const char*>(it->rinfo()->data());
+ const byte* prev_pc = it->rinfo()->pc();
+ int flat_delta = 0;
+ it->next();
+ while (true) {
+ // All nested comments must be terminated properly, and therefore exit
+ // from loop.
+ ASSERT(!it->done());
+ if (it->rinfo()->rmode() == comment) {
+ const char* const txt =
+ reinterpret_cast<const char*>(it->rinfo()->data());
+ flat_delta += it->rinfo()->pc() - prev_pc;
+ if (txt[0] == ']') break; // End of nested comment
+ // A new comment
+ CollectCommentStatistics(it);
+ // Skip code that was covered with previous comment
+ prev_pc = it->rinfo()->pc();
+ }
+ it->next();
+ }
+ EnterComment(comment_txt, flat_delta);
+}
+
+
+// Collects code size statistics:
+// - by code kind
+// - by code comment
+void PagedSpace::CollectCodeStatistics() {
+ HeapObjectIterator obj_it(this);
+ while (obj_it.has_next()) {
+ HeapObject* obj = obj_it.next();
+ if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ code_kind_statistics[code->kind()] += code->Size();
+ RelocIterator it(code);
+ int delta = 0;
+ const byte* prev_pc = code->instruction_start();
+ while (!it.done()) {
+ if (it.rinfo()->rmode() == comment) {
+ delta += it.rinfo()->pc() - prev_pc;
+ CollectCommentStatistics(&it);
+ prev_pc = it.rinfo()->pc();
+ }
+ it.next();
+ }
+
+ ASSERT(code->instruction_start() <= prev_pc &&
+ prev_pc <= code->relocation_start());
+ delta += code->relocation_start() - prev_pc;
+ EnterComment("NoComment", delta);
+ }
+ }
+}
+
+
+void OldSpace::ReportStatistics() {
+ int pct = Available() * 100 / Capacity();
+ PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
+ Capacity(), Waste(), Available(), pct);
+
+ // Report remembered set statistics.
+ int rset_marked_pointers = 0;
+ int rset_marked_arrays = 0;
+ int rset_marked_array_elements = 0;
+ int cross_gen_pointers = 0;
+ int cross_gen_array_elements = 0;
+
+ PageIterator page_it(this, PageIterator::PAGES_IN_USE);
+ while (page_it.has_next()) {
+ Page* p = page_it.next();
+
+ for (Address rset_addr = p->RSetStart();
+ rset_addr < p->RSetEnd();
+ rset_addr += kIntSize) {
+ int rset = Memory::int_at(rset_addr);
+ if (rset != 0) {
+ // Bits were set
+ int intoff = rset_addr - p->address();
+ int bitoff = 0;
+ for (; bitoff < kBitsPerInt; ++bitoff) {
+ if ((rset & (1 << bitoff)) != 0) {
+ int bitpos = intoff*kBitsPerByte + bitoff;
+ Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
+ Object** obj = reinterpret_cast<Object**>(slot);
+ if (*obj == Heap::fixed_array_map()) {
+ rset_marked_arrays++;
+ FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
+
+ rset_marked_array_elements += fa->length();
+ // Manually inline FixedArray::IterateBody
+ Address elm_start = slot + FixedArray::kHeaderSize;
+ Address elm_stop = elm_start + fa->length() * kPointerSize;
+ for (Address elm_addr = elm_start;
+ elm_addr < elm_stop; elm_addr += kPointerSize) {
+ // Filter non-heap-object pointers
+ Object** elm_p = reinterpret_cast<Object**>(elm_addr);
+ if (Heap::InNewSpace(*elm_p))
+ cross_gen_array_elements++;
+ }
+ } else {
+ rset_marked_pointers++;
+ if (Heap::InNewSpace(*obj))
+ cross_gen_pointers++;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ pct = rset_marked_pointers == 0 ?
+ 0 : cross_gen_pointers * 100 / rset_marked_pointers;
+ PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
+ rset_marked_pointers, cross_gen_pointers, pct);
+ PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
+ PrintF(" elements %d, ", rset_marked_array_elements);
+ pct = rset_marked_array_elements == 0 ? 0
+ : cross_gen_array_elements * 100 / rset_marked_array_elements;
+ PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
+ PrintF(" total rset-marked bits %d\n",
+ (rset_marked_pointers + rset_marked_arrays));
+ pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
+ : (cross_gen_pointers + cross_gen_array_elements) * 100 /
+ (rset_marked_pointers + rset_marked_array_elements);
+ PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
+ (rset_marked_pointers + rset_marked_array_elements),
+ (cross_gen_pointers + cross_gen_array_elements),
+ pct);
+
+ ClearHistograms();
+ HeapObjectIterator obj_it(this);
+ while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+ ReportHistogram(true);
+}
+
+
+// Dump the range of remembered set words between [start, end) corresponding
+// to the pointers starting at object_p. The allocation_top is an object
+// pointer which should not be read past. This is important for large object
+// pages, where some bits in the remembered set range do not correspond to
+// allocated addresses.
+static void PrintRSetRange(Address start, Address end, Object** object_p,
+ Address allocation_top) {
+ Address rset_address = start;
+
+ // If the range starts on on odd numbered word (eg, for large object extra
+ // remembered set ranges), print some spaces.
+ if ((reinterpret_cast<uint32_t>(start) / kIntSize) % 2 == 1) {
+ PrintF(" ");
+ }
+
+ // Loop over all the words in the range.
+ while (rset_address < end) {
+ uint32_t rset_word = Memory::uint32_at(rset_address);
+ int bit_position = 0;
+
+ // Loop over all the bits in the word.
+ while (bit_position < kBitsPerInt) {
+ if (object_p == reinterpret_cast<Object**>(allocation_top)) {
+ // Print a bar at the allocation pointer.
+ PrintF("|");
+ } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
+ // Do not dereference object_p past the allocation pointer.
+ PrintF("#");
+ } else if ((rset_word & (1 << bit_position)) == 0) {
+ // Print a dot for zero bits.
+ PrintF(".");
+ } else if (Heap::InNewSpace(*object_p)) {
+ // Print an X for one bits for pointers to new space.
+ PrintF("X");
+ } else {
+ // Print a circle for one bits for pointers to old space.
+ PrintF("o");
+ }
+
+ // Print a space after every 8th bit except the last.
+ if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
+ PrintF(" ");
+ }
+
+ // Advance to next bit.
+ bit_position++;
+ object_p++;
+ }
+
+ // Print a newline after every odd numbered word, otherwise a space.
+ if ((reinterpret_cast<uint32_t>(rset_address) / kIntSize) % 2 == 1) {
+ PrintF("\n");
+ } else {
+ PrintF(" ");
+ }
+
+ // Advance to next remembered set word.
+ rset_address += kIntSize;
+ }
+}
+
+
+void PagedSpace::DoPrintRSet(const char* space_name) {
+ PageIterator it(this, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ PrintF("%s page 0x%x:\n", space_name, p);
+ PrintRSetRange(p->RSetStart(), p->RSetEnd(),
+ reinterpret_cast<Object**>(p->ObjectAreaStart()),
+ p->AllocationTop());
+ PrintF("\n");
+ }
+}
+
+
+void OldSpace::PrintRSet() { DoPrintRSet("old"); }
+#endif
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+
+void MapSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ // Reset relocation info.
+ MCResetRelocationInfo();
+
+ // Initialize map index entry.
+ int page_count = 0;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ ASSERT_MAP_PAGE_INDEX(page_count);
+
+ Page* p = it.next();
+ ASSERT(p->mc_page_index == page_count);
+
+ page_addresses_[page_count++] = p->address();
+ }
+
+ // During a compacting collection, everything in the space is considered
+ // 'available' (set by the call to MCResetRelocationInfo) and we will
+ // rediscover live and wasted bytes during the collection.
+ ASSERT(Available() == Capacity());
+ } else {
+ // During a non-compacting collection, everything below the linear
+ // allocation pointer except wasted top-of-page blocks is considered
+ // allocated and we will rediscover available bytes during the
+ // collection.
+ accounting_stats_.AllocateBytes(free_list_.available());
+ }
+
+ // Clear the free list and switch to linear allocation if not already
+ // required.
+ free_list_.Reset();
+ if (allocation_mode_ != LINEAR_ONLY) allocation_mode_ = LINEAR;
+}
+
+
+void MapSpace::MCCommitRelocationInfo() {
+ // Update fast allocation info.
+ allocation_info_.top = mc_forwarding_info_.top;
+ allocation_info_.limit = mc_forwarding_info_.limit;
+ ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+
+ // The space is compacted and we haven't yet wasted any space.
+ ASSERT(Waste() == 0);
+
+ // Update allocation_top of each page in use and compute waste.
+ int computed_size = 0;
+ PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+ while (it.has_next()) {
+ Page* page = it.next();
+ Address page_top = page->AllocationTop();
+ computed_size += page_top - page->ObjectAreaStart();
+ if (it.has_next()) {
+ accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
+ }
+ }
+
+ // Make sure the computed size - based on the used portion of the
+ // pages in use - matches the size we adjust during allocation.
+ ASSERT(computed_size == Size());
+}
+
+
+Object* MapSpace::AllocateRawInternal(int size_in_bytes,
+ AllocationInfo* alloc_info) {
+ ASSERT(HasBeenSetup());
+ // When doing free-list allocation, we implicitly assume that we always
+ // allocate a map-sized block.
+ ASSERT(size_in_bytes == Map::kSize);
+
+ if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
+ // Try linear allocation in the current page.
+ Address cur_top = alloc_info->top;
+ Address new_top = cur_top + size_in_bytes;
+ if (new_top <= alloc_info->limit) {
+ Object* obj = HeapObject::FromAddress(cur_top);
+ alloc_info->top = new_top;
+ ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
+
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return obj;
+ }
+ } else {
+ // We should not do free list allocation during m-c compaction.
+ ASSERT(alloc_info == &allocation_info_);
+ Object* object = free_list_.Allocate();
+ if (!object->IsFailure()) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return object;
+ }
+ }
+ // Fast allocation failed.
+ return SlowAllocateRaw(size_in_bytes, alloc_info);
+}
+
+
+// Slow case for AllocateRawInternal. In linear allocation mode, try to
+// allocate in the next page in the space. If there are no more pages, switch
+// to free-list allocation. In free-list allocation mode, try to grow the
+// space and switch to linear allocation.
+Object* MapSpace::SlowAllocateRaw(int size_in_bytes,
+ AllocationInfo* alloc_info) {
+ if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
+ Page* top_page = TopPageOf(*alloc_info);
+
+ // We do not do free-list allocation during compacting GCs.
+ if (alloc_info == &mc_forwarding_info_) {
+ // We expect to always have more pages, because the map space cannot
+ // grow during GC. Move to the next page.
+ CHECK(top_page->next_page()->is_valid());
+ top_page->mc_relocation_top = alloc_info->top;
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ } else { // Normal allocation.
+ // Move to the next page in this space (counting the top-of-page block
+ // as waste) if there is one, otherwise switch to free-list allocation if
+ // permitted, otherwise try to expand the heap
+ if (top_page->next_page()->is_valid() ||
+ (allocation_mode_ == LINEAR_ONLY && Expand(top_page))) {
+ int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
+ ASSERT(free_size == kPageExtra);
+ accounting_stats_.WasteBytes(free_size);
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ } else if (allocation_mode_ == LINEAR) {
+ allocation_mode_ = FREE_LIST;
+ } else {
+ return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
+ }
+ }
+ } else { // Free-list allocation.
+ ASSERT(alloc_info == &allocation_info_);
+ // We failed to allocate from the free list (ie, it must be empty) so try
+ // to expand the space and switch back to linear allocation.
+ Page* top_page = TopPageOf(*alloc_info);
+ if (!top_page->next_page()->is_valid()) {
+ if (!Expand(top_page)) {
+ return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
+ }
+ }
+
+ // We have more pages now so we can move to the next and switch to linear
+ // allocation.
+ ASSERT(top_page->next_page()->is_valid());
+ int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
+ ASSERT(free_size == kPageExtra);
+ accounting_stats_.WasteBytes(free_size);
+ SetAllocationInfo(alloc_info, top_page->next_page());
+ ASSERT(allocation_mode_ == FREE_LIST);
+ allocation_mode_ = LINEAR;
+ }
+
+ // Perform the allocation.
+ return AllocateRawInternal(size_in_bytes, alloc_info);
+}
+
+
+#ifdef DEBUG
+// We do not assume that the PageIterator works, because it depends on the
+// invariants we are checking during verification.
+void MapSpace::Verify() {
+ // The allocation pointer should be valid, and it should be in a page in the
+ // space.
+ ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
+ Page* top_page = Page::FromAllocationTop(allocation_info_.top);
+ ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
+
+ // Loop over all the pages.
+ bool above_allocation_top = false;
+ Page* current_page = first_page_;
+ while (current_page->is_valid()) {
+ if (above_allocation_top) {
+ // We don't care what's above the allocation top.
+ } else {
+ // Unless this is the last page in the space containing allocated
+ // objects, the allocation top should be at a constant offset from the
+ // object area end.
+ Address top = current_page->AllocationTop();
+ if (current_page == top_page) {
+ ASSERT(top == allocation_info_.top);
+ // The next page will be above the allocation top.
+ above_allocation_top = true;
+ } else {
+ ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
+ }
+
+ // It should be packed with objects from the bottom to the top.
+ Address current = current_page->ObjectAreaStart();
+ while (current < top) {
+ HeapObject* object = HeapObject::FromAddress(current);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(Heap::map_space()->Contains(map));
+
+ // The object should be a map or a byte array.
+ ASSERT(object->IsMap() || object->IsByteArray());
+
+ // The object itself should look OK.
+ // This is blocked by bug #1006953.
+ // object->Verify();
+
+ // All the interior pointers should be contained in the heap and
+ // have their remembered set bits set if they point to new space.
+ VerifyPointersAndRSetVisitor visitor;
+ int size = object->Size();
+ object->IterateBody(map->instance_type(), size, &visitor);
+
+ current += size;
+ }
+
+ // The allocation pointer should not be in the middle of an object.
+ ASSERT(current == top);
+ }
+
+ current_page = current_page->next_page();
+ }
+}
+
+
+void MapSpace::ReportStatistics() {
+ int pct = Available() * 100 / Capacity();
+ PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
+ Capacity(), Waste(), Available(), pct);
+
+ // Report remembered set statistics.
+ int rset_marked_pointers = 0;
+ int cross_gen_pointers = 0;
+
+ PageIterator page_it(this, PageIterator::PAGES_IN_USE);
+ while (page_it.has_next()) {
+ Page* p = page_it.next();
+
+ for (Address rset_addr = p->RSetStart();
+ rset_addr < p->RSetEnd();
+ rset_addr += kIntSize) {
+ int rset = Memory::int_at(rset_addr);
+ if (rset != 0) {
+ // Bits were set
+ int intoff = rset_addr - p->address();
+ int bitoff = 0;
+ for (; bitoff < kBitsPerInt; ++bitoff) {
+ if ((rset & (1 << bitoff)) != 0) {
+ int bitpos = intoff*kBitsPerByte + bitoff;
+ Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
+ Object** obj = reinterpret_cast<Object**>(slot);
+ rset_marked_pointers++;
+ if (Heap::InNewSpace(*obj))
+ cross_gen_pointers++;
+ }
+ }
+ }
+ }
+ }
+
+ pct = rset_marked_pointers == 0 ?
+ 0 : cross_gen_pointers * 100 / rset_marked_pointers;
+ PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
+ rset_marked_pointers, cross_gen_pointers, pct);
+
+ ClearHistograms();
+ HeapObjectIterator obj_it(this);
+ while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+ ReportHistogram(false);
+}
+
+
+void MapSpace::PrintRSet() { DoPrintRSet("map"); }
+#endif
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectIterator
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+ current_ = space->first_chunk_;
+ size_func_ = NULL;
+}
+
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
+ HeapObjectCallback size_func) {
+ current_ = space->first_chunk_;
+ size_func_ = size_func;
+}
+
+
+HeapObject* LargeObjectIterator::next() {
+ ASSERT(has_next());
+ HeapObject* object = current_->GetObject();
+ current_ = current_->next();
+ return object;
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectChunk
+
+LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
+ size_t* chunk_size) {
+ size_t requested = ChunkSizeFor(size_in_bytes);
+ void* mem = MemoryAllocator::AllocateRawMemory(requested, chunk_size);
+ if (mem == NULL) return NULL;
+ LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
+ if (*chunk_size < requested) {
+ MemoryAllocator::FreeRawMemory(mem, *chunk_size);
+ LOG(DeleteEvent("LargeObjectChunk", mem));
+ return NULL;
+ }
+ return reinterpret_cast<LargeObjectChunk*>(mem);
+}
+
+
+int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
+ int os_alignment = OS::AllocateAlignment();
+ if (os_alignment < Page::kPageSize)
+ size_in_bytes += (Page::kPageSize - os_alignment);
+ return size_in_bytes + Page::kObjectStartOffset;
+}
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+
+LargeObjectSpace::LargeObjectSpace()
+ : first_chunk_(NULL),
+ size_(0),
+ page_count_(0) {}
+
+
+bool LargeObjectSpace::Setup() {
+ first_chunk_ = NULL;
+ size_ = 0;
+ page_count_ = 0;
+ return true;
+}
+
+
+void LargeObjectSpace::TearDown() {
+ while (first_chunk_ != NULL) {
+ LargeObjectChunk* chunk = first_chunk_;
+ first_chunk_ = first_chunk_->next();
+ LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
+ MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
+ }
+
+ size_ = 0;
+ page_count_ = 0;
+}
+
+
+Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
+ int object_size) {
+ ASSERT(0 < object_size && object_size <= requested_size);
+ size_t chunk_size;
+ LargeObjectChunk* chunk =
+ LargeObjectChunk::New(requested_size, &chunk_size);
+ if (chunk == NULL) {
+ return Failure::RetryAfterGC(requested_size, LO_SPACE);
+ }
+
+ size_ += chunk_size;
+ page_count_++;
+ chunk->set_next(first_chunk_);
+ chunk->set_size(chunk_size);
+ first_chunk_ = chunk;
+
+ // Set the object address and size in the page header and clear its
+ // remembered set.
+ Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+ Address object_address = page->ObjectAreaStart();
+ // Clear the low order bit of the second word in the page to flag it as a
+ // large object page. If the chunk_size happened to be written there, its
+ // low order bit should already be clear.
+ ASSERT((chunk_size & 0x1) == 0);
+ page->is_normal_page &= ~0x1;
+ page->ClearRSet();
+ int extra_bytes = requested_size - object_size;
+ if (extra_bytes > 0) {
+ // The extra memory for the remembered set should be cleared.
+ memset(object_address + object_size, 0, extra_bytes);
+ }
+
+ return HeapObject::FromAddress(object_address);
+}
+
+
+Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, size_in_bytes);
+}
+
+
+Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
+ int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes + extra_rset_bytes, size_in_bytes);
+}
+
+
+// GC support
+Object* LargeObjectSpace::FindObject(Address a) {
+ for (LargeObjectChunk* chunk = first_chunk_;
+ chunk != NULL;
+ chunk = chunk->next()) {
+ Address chunk_address = chunk->address();
+ if (chunk_address <= a && a < chunk_address + chunk->size()) {
+ return chunk->GetObject();
+ }
+ }
+ return Failure::Exception();
+}
+
+
+void LargeObjectSpace::ClearRSet() {
+ ASSERT(Page::is_rset_in_use());
+
+ LargeObjectIterator it(this);
+ while (it.has_next()) {
+ HeapObject* object = it.next();
+ // We only have code, sequential strings, or fixed arrays in large
+ // object space, and only fixed arrays need remembered set support.
+ if (object->IsFixedArray()) {
+ // Clear the normal remembered set region of the page;
+ Page* page = Page::FromAddress(object->address());
+ page->ClearRSet();
+
+ // Clear the extra remembered set.
+ int size = object->Size();
+ int extra_rset_bytes = ExtraRSetBytesFor(size);
+ memset(object->address() + size, 0, extra_rset_bytes);
+ }
+ }
+}
+
+
+void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
+ ASSERT(Page::is_rset_in_use());
+
+ LargeObjectIterator it(this);
+ while (it.has_next()) {
+ // We only have code, sequential strings, or fixed arrays in large
+ // object space, and only fixed arrays can possibly contain pointers to
+ // the young generation.
+ HeapObject* object = it.next();
+ if (object->IsFixedArray()) {
+ // Iterate the normal page remembered set range.
+ Page* page = Page::FromAddress(object->address());
+ Address object_end = object->address() + object->Size();
+ Heap::IterateRSetRange(page->ObjectAreaStart(),
+ Min(page->ObjectAreaEnd(), object_end),
+ page->RSetStart(),
+ copy_object_func);
+
+ // Iterate the extra array elements.
+ if (object_end > page->ObjectAreaEnd()) {
+ Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
+ object_end, copy_object_func);
+ }
+ }
+ }
+}
+
+
+void LargeObjectSpace::FreeUnmarkedObjects() {
+ LargeObjectChunk* previous = NULL;
+ LargeObjectChunk* current = first_chunk_;
+ while (current != NULL) {
+ HeapObject* object = current->GetObject();
+ if (is_marked(object)) {
+ clear_mark(object);
+ previous = current;
+ current = current->next();
+ } else {
+ Address chunk_address = current->address();
+ size_t chunk_size = current->size();
+
+ // Cut the chunk out from the chunk list.
+ current = current->next();
+ if (previous == NULL) {
+ first_chunk_ = current;
+ } else {
+ previous->set_next(current);
+ }
+
+ // Free the chunk.
+ if (object->IsCode()) {
+ LOG(CodeDeleteEvent(object->address()));
+ }
+ size_ -= chunk_size;
+ page_count_--;
+ MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
+ LOG(DeleteEvent("LargeObjectChunk", chunk_address));
+ }
+ }
+}
+
+
+bool LargeObjectSpace::Contains(HeapObject* object) {
+ Address address = object->address();
+ Page* page = Page::FromAddress(address);
+
+ SLOW_ASSERT(!page->IsLargeObjectPage()
+ || !FindObject(address)->IsFailure());
+
+ return page->IsLargeObjectPage();
+}
+
+
+#ifdef DEBUG
+// We do not assume that the large object iterator works, because it depends
+// on the invariants we are checking during verification.
+void LargeObjectSpace::Verify() {
+ for (LargeObjectChunk* chunk = first_chunk_;
+ chunk != NULL;
+ chunk = chunk->next()) {
+ // Each chunk contains an object that starts at the large object page's
+ // object area start.
+ HeapObject* object = chunk->GetObject();
+ Page* page = Page::FromAddress(object->address());
+ ASSERT(object->address() == page->ObjectAreaStart());
+
+ // The first word should be a map, and we expect all map pointers to be
+ // in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(Heap::map_space()->Contains(map));
+
+ // We have only code, sequential strings, fixed arrays, and byte arrays
+ // in large object space.
+ ASSERT(object->IsCode() || object->IsSeqString()
+ || object->IsFixedArray() || object->IsByteArray());
+
+ // The object itself should look OK.
+ // This is blocked by bug #1006953.
+ // object->Verify();
+
+ // Byte arrays and strings don't have interior pointers.
+ if (object->IsCode()) {
+ VerifyPointersVisitor code_visitor;
+ Code::cast(object)->ConvertICTargetsFromAddressToObject();
+ object->IterateBody(map->instance_type(),
+ object->Size(),
+ &code_visitor);
+ Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+ } else if (object->IsFixedArray()) {
+ // We loop over fixed arrays ourselves, rather then using the visitor,
+ // because the visitor doesn't support the start/offset iteration
+ // needed for IsRSetSet.
+ FixedArray* array = FixedArray::cast(object);
+ for (int j = 0; j < array->length(); j++) {
+ Object* element = array->get(j);
+ if (element->IsHeapObject()) {
+ HeapObject* element_object = HeapObject::cast(element);
+ ASSERT(Heap::Contains(element_object));
+ ASSERT(element_object->map()->IsMap());
+ if (Heap::InNewSpace(element_object)) {
+ ASSERT(Page::IsRSetSet(object->address(),
+ FixedArray::kHeaderSize + j * kPointerSize));
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void LargeObjectSpace::Print() {
+ LargeObjectIterator it(this);
+ while (it.has_next()) {
+ it.next()->Print();
+ }
+}
+
+
+void LargeObjectSpace::ReportStatistics() {
+ PrintF(" size: %d\n", size_);
+ int num_objects = 0;
+ ClearHistograms();
+ LargeObjectIterator it(this);
+ while (it.has_next()) {
+ num_objects++;
+ CollectHistogramInfo(it.next());
+ }
+
+ PrintF(" number of objects %d\n", num_objects);
+ if (num_objects > 0) ReportHistogram(false);
+}
+
+
+void LargeObjectSpace::CollectCodeStatistics() {
+ LargeObjectIterator obj_it(this);
+ while (obj_it.has_next()) {
+ HeapObject* obj = obj_it.next();
+ if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ code_kind_statistics[code->kind()] += code->Size();
+ }
+ }
+}
+
+
+void LargeObjectSpace::PrintRSet() {
+ LargeObjectIterator it(this);
+ while (it.has_next()) {
+ HeapObject* object = it.next();
+ if (object->IsFixedArray()) {
+ Page* page = Page::FromAddress(object->address());
+
+ Address allocation_top = object->address() + object->Size();
+ PrintF("large page 0x%x:\n", page);
+ PrintRSetRange(page->RSetStart(), page->RSetEnd(),
+ reinterpret_cast<Object**>(object->address()),
+ allocation_top);
+ int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
+ int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
+ kBitsPerInt);
+ PrintF("------------------------------------------------------------"
+ "-----------\n");
+ PrintRSetRange(allocation_top,
+ allocation_top + extra_rset_bits / kBitsPerByte,
+ reinterpret_cast<Object**>(object->address()
+ + Page::kObjectAreaSize),
+ allocation_top);
+ PrintF("\n");
+ }
+ }
+}
+#endif // DEBUG
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPACES_H_
+#define V8_SPACES_H_
+
+#include "list-inl.h"
+#include "log.h"
+
+namespace v8 { namespace internal {
+
+// -----------------------------------------------------------------------------
+// Heap structures:
+//
+// A JS heap consists of a young generation, an old generation, and a large
+// object space. The young generation is divided into two semispaces. A
+// scavenger implements Cheney's copying algorithm. The old generation is
+// separated into a map space and an old object space. The map space contains
+// all (and only) map objects, the rest of old objects go into the old space.
+// The old generation is collected by a mark-sweep-compact collector.
+//
+// The semispaces of the young generation are contiguous. The old and map
+// spaces consists of a list of pages. A page has a page header, a remembered
+// set area, and an object area. A page size is deliberately chosen as 8K
+// bytes. The first word of a page is an opaque page header that has the
+// address of the next page and its ownership information. The second word may
+// have the allocation top address of this page. The next 248 bytes are
+// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
+// remembered set bit corresponds to a pointer in the object area.
+//
+// There is a separate large object space for objects larger than
+// Page::kMaxHeapObjectSize, so that they do not have to move during
+// collection. The large object space is paged and uses the same remembered
+// set implementation. Pages in large object space may be larger than 8K.
+//
+// NOTE: The mark-compact collector rebuilds the remembered set after a
+// collection. It reuses first a few words of the remembered set for
+// bookkeeping relocation information.
+
+
+// Some assertion macros used in the debugging mode.
+
+#define ASSERT_PAGE_ALIGNED(address) \
+ ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
+
+#define ASSERT_OBJECT_ALIGNED(address) \
+ ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
+
+#define ASSERT_OBJECT_SIZE(size) \
+ ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
+
+#define ASSERT_PAGE_OFFSET(offset) \
+ ASSERT((Page::kObjectStartOffset <= offset) \
+ && (offset <= Page::kPageSize))
+
+#define ASSERT_MAP_PAGE_INDEX(index) \
+ ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
+
+
+class PagedSpace;
+class MemoryAllocator;
+struct AllocationInfo;
+
+// -----------------------------------------------------------------------------
+// A page normally has 8K bytes. Large object pages may be larger. A page
+// address is always aligned to the 8K page size. A page is divided into
+// three areas: the first two words are used for bookkeeping, the next 248
+// bytes are used as remembered set, and the rest of the page is the object
+// area.
+//
+// Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed
+// for a pointer in the remembered set. Given an address, its remembered set
+// bit position (offset from the start of the page) is calculated by dividing
+// its page offset by 32. Therefore, the object area in a page starts at the
+// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
+// the first two words (64 bits) in a page can be used for other purposes.
+//
+// The mark-compact collector transforms a map pointer into a page index and a
+// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
+// 8K) in total. Because a map pointer is aligned to the pointer size (4
+// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
+// page index + 11 for the offset in the page) are required to encode a map
+// pointer.
+//
+// The only way to get a page pointer is by calling factory methods:
+// Page* p = Page::FromAddress(addr); or
+// Page* p = Page::FromAllocationTop(top);
+class Page {
+ public:
+ // Returns the page containing a given address. The address ranges
+ // from [page_addr .. page_addr + kPageSize[
+ //
+ // Note that this function only works for addresses in normal paged
+ // spaces and addresses in the first 8K of large object pages (ie,
+ // the start of large objects but not necessarily derived pointers
+ // within them).
+ INLINE(static Page* FromAddress(Address a)) {
+ return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
+ }
+
+ // Returns the page containing an allocation top. Because an allocation
+ // top address can be the upper bound of the page, we need to subtract
+ // it with kPointerSize first. The address ranges from
+ // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
+ INLINE(static Page* FromAllocationTop(Address top)) {
+ Page* p = FromAddress(top - kPointerSize);
+ ASSERT_PAGE_OFFSET(p->Offset(top));
+ return p;
+ }
+
+ // Returns the start address of this page.
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ // Checks whether this is a valid page address.
+ bool is_valid() { return address() != NULL; }
+
+ // Returns the next page of this page.
+ inline Page* next_page();
+
+ // Return the end of allocation in this page.
+ inline Address AllocationTop();
+
+ // Returns the start address of the object area in this page.
+ Address ObjectAreaStart() { return address() + kObjectStartOffset; }
+
+ // Returns the end address (exclusive) of the object area in this page.
+ Address ObjectAreaEnd() { return address() + Page::kPageSize; }
+
+ // Returns the start address of the remembered set area.
+ Address RSetStart() { return address() + kRSetStartOffset; }
+
+ // Returns the end address of the remembered set area (exclusive).
+ Address RSetEnd() { return address() + kRSetEndOffset; }
+
+ // Checks whether an address is page aligned.
+ static bool IsAlignedToPageSize(Address a) {
+ return 0 == (OffsetFrom(a) & kPageAlignmentMask);
+ }
+
+ // True if this page is a large object page.
+ bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
+
+ // Returns the offset of a given address to this page.
+ INLINE(int Offset(Address a)) {
+ int offset = a - address();
+ ASSERT_PAGE_OFFSET(offset);
+ return offset;
+ }
+
+ // Returns the address for a given offset to the this page.
+ Address OffsetToAddress(int offset) {
+ ASSERT_PAGE_OFFSET(offset);
+ return address() + offset;
+ }
+
+ // ---------------------------------------------------------------------
+ // Remembered set support
+
+ // Clears remembered set in this page.
+ inline void ClearRSet();
+
+ // Return the address of the remembered set word corresponding to an
+ // object address/offset pair, and the bit encoded as a single-bit
+ // mask in the output parameter 'bitmask'.
+ INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
+ uint32_t* bitmask));
+
+ // Sets the corresponding remembered set bit for a given address.
+ INLINE(static void SetRSet(Address address, int offset));
+
+ // Clears the corresponding remembered set bit for a given address.
+ static inline void UnsetRSet(Address address, int offset);
+
+ // Checks whether the remembered set bit for a given address is set.
+ static inline bool IsRSetSet(Address address, int offset);
+
+#ifdef DEBUG
+ // Use a state to mark whether remembered set space can be used for other
+ // purposes.
+ enum RSetState { IN_USE, NOT_IN_USE };
+ static bool is_rset_in_use() { return rset_state_ == IN_USE; }
+ static void set_rset_state(RSetState state) { rset_state_ = state; }
+#endif
+
+ // 8K bytes per page.
+ static const int kPageSizeBits = 13;
+
+ // Page size in bytes.
+ static const int kPageSize = 1 << kPageSizeBits;
+
+ // Page size mask.
+ static const int kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+ // The end offset of the remembered set in a page
+ // (heaps are aligned to pointer size).
+ static const int kRSetEndOffset= kPageSize / kBitsPerPointer;
+
+ // The start offset of the remembered set in a page.
+ static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer;
+
+ // The start offset of the object area in a page.
+ static const int kObjectStartOffset = kRSetEndOffset;
+
+ // Object area size in bytes.
+ static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
+
+ // Maximum object size that fits in a page.
+ static const int kMaxHeapObjectSize = kObjectAreaSize;
+
+ //---------------------------------------------------------------------------
+ // Page header description.
+ //
+ // If a page is not in a large object space, the first word,
+ // opaque_header, encodes the next page address (aligned to kPageSize 8K)
+ // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
+ // opaque_header. The value range of the opaque_header is [0..kPageSize[,
+ // or [next_page_start, next_page_end[. It cannot point to a valid address
+ // in the current page. If a page is in the large object space, the first
+ // word *may* (if the page start and large object chunk start are the
+ // same) contain the address of the next large object chunk.
+ int opaque_header;
+
+ // If the page is not in the large object space, the low-order bit of the
+ // second word is set. If the page is in the large object space, the
+ // second word *may* (if the page start and large object chunk start are
+ // the same) contain the large object chunk size. In either case, the
+ // low-order bit for large object pages will be cleared.
+ int is_normal_page;
+
+ // The following fields overlap with remembered set, they can only
+ // be used in the mark-compact collector when remembered set is not
+ // used.
+
+ // The allocation pointer after relocating objects to this page.
+ Address mc_relocation_top;
+
+ // The index of the page in its owner space.
+ int mc_page_index;
+
+ // The forwarding address of the first live object in this page.
+ Address mc_first_forwarded;
+
+#ifdef DEBUG
+ private:
+ static RSetState rset_state_; // state of the remembered set
+#endif
+};
+
+
+// ----------------------------------------------------------------------------
+// A space acquires chunks of memory from the operating system. The memory
+// allocator manages chunks for the paged heap spaces (old space and map
+// space). A paged chunk consists of pages. Pages in a chunk have contiguous
+// addresses and are linked as a list.
+//
+// The allocator keeps an initial chunk which is used for the new space. The
+// leftover regions of the initial chunk are used for the initial chunks of
+// old space and map space if they are big enough to hold at least one page.
+// The allocator assumes that there is one old space and one map space, each
+// expands the space by allocating kPagesPerChunk pages except the last
+// expansion (before running out of space). The first chunk may contain fewer
+// than kPagesPerChunk pages as well.
+//
+// The memory allocator also allocates chunks for the large object space, but
+// they are managed by the space itself. The new space does not expand.
+
+class MemoryAllocator : public AllStatic {
+ public:
+ // Initializes its internal bookkeeping structures.
+ // Max capacity of the total space.
+ static bool Setup(int max_capacity);
+
+ // Deletes valid chunks.
+ static void TearDown();
+
+ // Reserves an initial address range of virtual memory to be split between
+ // the two new space semispaces, the old space, and the map space. The
+ // memory is not yet committed or assigned to spaces and split into pages.
+ // The initial chunk is unmapped when the memory allocator is torn down.
+ // This function should only be called when there is not already a reserved
+ // initial chunk (initial_chunk_ should be NULL). It returns the start
+ // address of the initial chunk if successful, with the side effect of
+ // setting the initial chunk, or else NULL if unsuccessful and leaves the
+ // initial chunk NULL.
+ static void* ReserveInitialChunk(const size_t requested);
+
+ // Commits pages from an as-yet-unmanaged block of virtual memory into a
+ // paged space. The block should be part of the initial chunk reserved via
+ // a call to ReserveInitialChunk. The number of pages is always returned in
+ // the output parameter num_pages. This function assumes that the start
+ // address is non-null and that it is big enough to hold at least one
+ // page-aligned page. The call always succeeds, and num_pages is always
+ // greater than zero.
+ static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+ int* num_pages);
+
+ // Commit a contiguous block of memory from the initial chunk. Assumes that
+ // the address is not NULL, the size is greater than zero, and that the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ static bool CommitBlock(Address start, size_t size);
+
+ // Attempts to allocate the requested (non-zero) number of pages from the
+ // OS. Fewer pages might be allocated than requested. If it fails to
+ // allocate memory for the OS or cannot allocate a single page, this
+ // function returns an invalid page pointer (NULL). The caller must check
+ // whether the returned page is valid (by calling Page::is_valid()). It is
+ // guaranteed that allocated pages have contiguous addresses. The actual
+ // number of allocated page is returned in the output parameter
+ // allocated_pages.
+ static Page* AllocatePages(int requested_pages, int* allocated_pages,
+ PagedSpace* owner);
+
+ // Frees pages from a given page and after. If 'p' is the first page
+ // of a chunk, pages from 'p' are freed and this function returns an
+ // invalid page pointer. Otherwise, the function searches a page
+ // after 'p' that is the first page of a chunk. Pages after the
+ // found page are freed and the function returns 'p'.
+ static Page* FreePages(Page* p);
+
+ // Allocates and frees raw memory of certain size.
+ // These are just thin wrappers around OS::Allocate and OS::Free,
+ // but keep track of allocated bytes as part of heap.
+ static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+ static void FreeRawMemory(void* buf, size_t length);
+
+ // Returns the maximum available bytes of heaps.
+ static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+
+ // Links two pages.
+ static inline void SetNextPage(Page* prev, Page* next);
+
+ // Returns the next page of a given page.
+ static inline Page* GetNextPage(Page* p);
+
+ // Checks whether a page belongs to a space.
+ static inline bool IsPageInSpace(Page* p, PagedSpace* space);
+
+ // Returns the space that owns the given page.
+ static inline PagedSpace* PageOwner(Page* page);
+
+ // Finds the first/last page in the same chunk as a given page.
+ static Page* FindFirstPageInSameChunk(Page* p);
+ static Page* FindLastPageInSameChunk(Page* p);
+
+#ifdef DEBUG
+ // Reports statistic info of the space.
+ static void ReportStatistics();
+#endif
+
+ // Due to encoding limitation, we can only have 8K chunks.
+ static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
+ // If a chunk has at least 32 pages, the maximum heap size is about
+ // 8 * 1024 * 32 * 8K = 2G bytes.
+ static const int kPagesPerChunk = 64;
+ static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+
+ private:
+ // Maximum space size in bytes.
+ static int capacity_;
+
+ // Allocated space size in bytes.
+ static int size_;
+
+ // The initial chunk of virtual memory.
+ static VirtualMemory* initial_chunk_;
+
+ // Allocated chunk info: chunk start address, chunk size, and owning space.
+ class ChunkInfo BASE_EMBEDDED {
+ public:
+ ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
+ void init(Address a, size_t s, PagedSpace* o) {
+ address_ = a;
+ size_ = s;
+ owner_ = o;
+ }
+ Address address() { return address_; }
+ size_t size() { return size_; }
+ PagedSpace* owner() { return owner_; }
+
+ private:
+ Address address_;
+ size_t size_;
+ PagedSpace* owner_;
+ };
+
+ // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
+ static List<ChunkInfo> chunks_;
+ static List<int> free_chunk_ids_;
+ static int max_nof_chunks_;
+ static int top_;
+
+ // Push/pop a free chunk id onto/from the stack.
+ static void Push(int free_chunk_id);
+ static int Pop();
+ static bool OutOfChunkIds() { return top_ == 0; }
+
+ // Frees a chunk.
+ static void DeleteChunk(int chunk_id);
+
+ // Basic check whether a chunk id is in the valid range.
+ static inline bool IsValidChunkId(int chunk_id);
+
+ // Checks whether a chunk id identifies an allocated chunk.
+ static inline bool IsValidChunk(int chunk_id);
+
+ // Returns the chunk id that a page belongs to.
+ static inline int GetChunkId(Page* p);
+
+ // Initializes pages in a chunk. Returns the first page address.
+ // This function and GetChunkId() are provided for the mark-compact
+ // collector to rebuild page headers in the from space, which is
+ // used as a marking stack and its page headers are destroyed.
+ static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner);
+};
+
+
+// -----------------------------------------------------------------------------
+// Interface for heap object iterator to be implemented by all object space
+// object iterators.
+//
+// NOTE: The space specific object iterators also implements the own has_next()
+// and next() methods which are used to avoid using virtual functions
+// iterating a specific space.
+
+class ObjectIterator : public Malloced {
+ public:
+ virtual ~ObjectIterator() { }
+
+ virtual bool has_next_object() = 0;
+ virtual HeapObject* next_object() = 0;
+};
+
+
+// -----------------------------------------------------------------------------
+// Space iterator for iterating over all spaces.
+//
+// For each space an object iterator is provided. The deallocation of the
+// returned object iterators is handled by the space iterator.
+
+class SpaceIterator : public Malloced {
+ public:
+ SpaceIterator();
+ virtual ~SpaceIterator();
+
+ bool has_next();
+ ObjectIterator* next();
+
+ private:
+ ObjectIterator* CreateIterator();
+
+ int current_space_; // from enum AllocationSpace.
+ ObjectIterator* iterator_; // object iterator for the current space.
+};
+
+
+// -----------------------------------------------------------------------------
+// Heap object iterator in new/old/map spaces.
+//
+// A HeapObjectIterator iterates objects from a given address to the
+// top of a space. The given address must be below the current
+// allocation pointer (space top). If the space top changes during
+// iteration (because of allocating new objects), the iterator does
+// not iterate new objects. The caller function must create a new
+// iterator starting from the old top in order to visit these new
+// objects. Heap::Scavenage() is such an example.
+
+class HeapObjectIterator: public ObjectIterator {
+ public:
+ // Creates a new object iterator in a given space. If a start
+ // address is not given, the iterator starts from the space bottom.
+ // If the size function is not given, the iterator calls the default
+ // Object::Size().
+ explicit HeapObjectIterator(PagedSpace* space);
+ HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+ HeapObjectIterator(PagedSpace* space, Address start);
+ HeapObjectIterator(PagedSpace* space,
+ Address start,
+ HeapObjectCallback size_func);
+
+ inline bool has_next();
+ inline HeapObject* next();
+
+ // implementation of ObjectIterator.
+ virtual bool has_next_object() { return has_next(); }
+ virtual HeapObject* next_object() { return next(); }
+
+ private:
+ Address cur_addr_; // current iteration point
+ Address end_addr_; // end iteration point
+ Address cur_limit_; // current page limit
+ HeapObjectCallback size_func_; // size function
+ Page* end_page_; // caches the page of the end address
+
+ // Slow path of has_next, checks whether there are more objects in
+ // the next page.
+ bool HasNextInNextPage();
+
+ // Initializes fields.
+ void Initialize(Address start, Address end, HeapObjectCallback size_func);
+
+#ifdef DEBUG
+ // Verifies whether fields have valid values.
+ void Verify();
+#endif
+};
+
+
+// -----------------------------------------------------------------------------
+// A PageIterator iterates pages in a space.
+//
+// The PageIterator class provides three modes for iterating pages in a space:
+// PAGES_IN_USE iterates pages that are in use by the allocator;
+// PAGES_USED_BY_GC iterates pages that hold relocated objects during a
+// mark-compact collection;
+// ALL_PAGES iterates all pages in the space.
+
+class PageIterator BASE_EMBEDDED {
+ public:
+ enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
+
+ PageIterator(PagedSpace* space, Mode mode);
+
+ inline bool has_next();
+ inline Page* next();
+
+ private:
+ Page* cur_page_; // next page to return
+ Page* stop_page_; // page where to stop
+};
+
+
+// -----------------------------------------------------------------------------
+// A space has a list of pages. The next page can be accessed via
+// Page::next_page() call. The next page of the last page is an
+// invalid page pointer. A space can expand and shrink dynamically.
+
+// An abstraction of allocation and relocation pointers in a page-structured
+// space.
+struct AllocationInfo {
+ Address top; // current allocation top
+ Address limit; // current allocation limit
+};
+
+
+// An abstraction of the accounting statistics of a page-structured space.
+// The 'capacity' of a space is the number of object-area bytes (ie, not
+// including page bookkeeping structures) currently in the space. The 'size'
+// of a space is the number of allocated bytes, the 'waste' in the space is
+// the number of bytes that are not allocated and not available to
+// allocation without reorganizing the space via a GC (eg, small blocks due
+// to internal fragmentation, top of page areas in map space), and the bytes
+// 'available' is the number of unallocated bytes that are not waste. The
+// capacity is the sum of size, waste, and available.
+//
+// The stats are only set by functions that ensure they stay balanced. These
+// functions increase or decrease one of the non-capacity stats in
+// conjunction with capacity, or else they always balance increases and
+// decreases to the non-capacity stats.
+class AllocationStats BASE_EMBEDDED {
+ public:
+ AllocationStats() { Clear(); }
+
+ // Zero out all the allocation statistics (ie, no capacity).
+ void Clear() {
+ capacity_ = 0;
+ available_ = 0;
+ size_ = 0;
+ waste_ = 0;
+ }
+
+ // Reset the allocation statistics (ie, available = capacity with no
+ // wasted or allocated bytes).
+ void Reset() {
+ available_ = capacity_;
+ size_ = 0;
+ waste_ = 0;
+ }
+
+ // Accessors for the allocation statistics.
+ int Capacity() { return capacity_; }
+ int Available() { return available_; }
+ int Size() { return size_; }
+ int Waste() { return waste_; }
+
+ // Grow the space by adding available bytes.
+ void ExpandSpace(int size_in_bytes) {
+ capacity_ += size_in_bytes;
+ available_ += size_in_bytes;
+ }
+
+ // Shrink the space by removing available bytes.
+ void ShrinkSpace(int size_in_bytes) {
+ capacity_ -= size_in_bytes;
+ available_ -= size_in_bytes;
+ }
+
+ // Allocate from available bytes (available -> size).
+ void AllocateBytes(int size_in_bytes) {
+ available_ -= size_in_bytes;
+ size_ += size_in_bytes;
+ }
+
+ // Free allocated bytes, making them available (size -> available).
+ void DeallocateBytes(int size_in_bytes) {
+ size_ -= size_in_bytes;
+ available_ += size_in_bytes;
+ }
+
+ // Waste free bytes (available -> waste).
+ void WasteBytes(int size_in_bytes) {
+ available_ -= size_in_bytes;
+ waste_ += size_in_bytes;
+ }
+
+ // Consider the wasted bytes to be allocated, as they contain filler
+ // objects (waste -> size).
+ void FillWastedBytes(int size_in_bytes) {
+ waste_ -= size_in_bytes;
+ size_ += size_in_bytes;
+ }
+
+ private:
+ int capacity_;
+ int available_;
+ int size_;
+ int waste_;
+};
+
+
+class PagedSpace : public Malloced {
+ friend class PageIterator;
+ public:
+ // Creates a space with a maximum capacity, and an id.
+ PagedSpace(int max_capacity, AllocationSpace id);
+
+ // Set up the space using the given address range of virtual memory (from
+ // the memory allocator's initial chunk) if possible. If the block of
+ // addresses is not big enough to contain a single page-aligned page, a
+ // fresh chunk will be allocated.
+ bool Setup(Address start, size_t size);
+
+ // Returns true if the space has been successfully set up and not
+ // subsequently torn down.
+ bool HasBeenSetup();
+
+ // Cleans up the space, frees all pages in this space except those belonging
+ // to the initial chunk, uncommits addresses in the initial chunk.
+ void TearDown();
+
+ // Checks whether an object/address is in this space.
+ inline bool Contains(Address a);
+ bool Contains(HeapObject* o) { return Contains(o->address()); }
+
+ // Finds an object that the given address falls in its body. Returns
+ // Failure::Exception() if the operation failed. The implementation
+ // iterates objects in the page containing the address, the cost is
+ // linear to the number of objects in the page. It may be slow.
+ Object* FindObject(Address addr);
+
+ // Clears remembered sets of pages in this space.
+ void ClearRSet();
+
+ // Current capacity without growing (Size() + Available() + Waste()).
+ int Capacity() { return accounting_stats_.Capacity(); }
+
+ // Available bytes without growing.
+ int Available() { return accounting_stats_.Available(); }
+
+ // Allocated bytes in this space.
+ int Size() { return accounting_stats_.Size(); }
+
+ // Wasted bytes due to fragmentation and not recoverable until the
+ // next GC of this space.
+ int Waste() { return accounting_stats_.Waste(); }
+
+ // Returns the address of the first object in this space.
+ Address bottom() { return first_page_->ObjectAreaStart(); }
+
+ // Returns the allocation pointer in this space.
+ Address top() { return allocation_info_.top; }
+
+ AllocationSpace identity() { return identity_; }
+
+ // If 'linear_only' is true, force allocation_mode_ to
+ // LINEAR_ONLY. If 'linear_only' is false, allocation_mode_ is
+ // checked to be LINEAR_ONLY and changed to LINEAR, allowing it to
+ // alternate between LINEAR and FREE_LIST automatically.
+ void SetLinearAllocationOnly(bool linear_only);
+
+ // ---------------------------------------------------------------------------
+ // Mark-compact collection support functions
+
+ // Set the relocation point to the beginning of the space.
+ void MCResetRelocationInfo();
+
+ // Writes relocation info to the top page.
+ void MCWriteRelocationInfoToPage() {
+ TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
+ }
+
+ // Computes the offset of a given address in this space to the beginning
+ // of the space.
+ int MCSpaceOffsetForAddress(Address addr);
+
+ // Releases half of unused pages.
+ void Shrink();
+
+ // Ensures that the capacity is at least 'capacity'. Returns false on failure.
+ bool EnsureCapacity(int capacity);
+
+#ifdef DEBUG
+ void CheckLinearAllocationOnly() { CHECK(allocation_mode_ == LINEAR_ONLY); }
+
+ // Print meta info and objects in this space.
+ void Print();
+
+ // Report code object related statistics
+ void CollectCodeStatistics();
+ static void ReportCodeStatistics();
+ static void ResetCodeStatistics();
+#endif
+
+ protected:
+ // In LINEAR and LINEAR_ONLY mode, allocation is from the end of the last
+ // page. In FREE_LIST mode, allocation is from a fragment list of free
+ // space at the end of recent pages. LINEAR and FREE_LIST mode alternate
+ // automatically. LINEAR_ONLY mode is sticky until converted to LINEAR by
+ // an API call.
+ enum AllocationMode { LINEAR_ONLY, LINEAR, FREE_LIST };
+
+ // Maximum capacity of this space.
+ int max_capacity_;
+
+ // Accounting information for this space.
+ AllocationStats accounting_stats_;
+
+ // The first page in this space.
+ Page* first_page_;
+
+ // The allocation mode.
+ AllocationMode allocation_mode_;
+
+ // Normal allocation information.
+ AllocationInfo allocation_info_;
+
+ // Relocation information during mark-compact collections.
+ AllocationInfo mc_forwarding_info_;
+
+ // Sets allocation pointer to a page bottom.
+ static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
+
+ // Returns the top page specified by an allocation info structure.
+ static Page* TopPageOf(AllocationInfo alloc_info) {
+ return Page::FromAllocationTop(alloc_info.limit);
+ }
+
+ // Expands the space by allocating a fixed number of pages. Returns false if
+ // it cannot allocate requested number of pages from OS. Newly allocated
+ // pages are appened to the last_page;
+ bool Expand(Page* last_page);
+
+#ifdef DEBUG
+ void DoPrintRSet(const char* space_name);
+#endif
+ private:
+ // Identity of this space.
+ AllocationSpace identity_;
+
+ // Returns the page of the allocation pointer.
+ Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+
+ // Returns a pointer to the page of the relocation pointer.
+ Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+
+#ifdef DEBUG
+ // Returns the number of total pages in this space.
+ int CountTotalPages();
+#endif
+};
+
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+// HistogramInfo class for recording a single "bar" of a histogram. This
+// class is used for collecting statistics to print to stdout (when compiled
+// with DEBUG) or to the log file (when compiled with
+// ENABLE_LOGGING_AND_PROFILING).
+class HistogramInfo BASE_EMBEDDED {
+ public:
+ HistogramInfo() : number_(0), bytes_(0) {}
+
+ const char* name() { return name_; }
+ void set_name(const char* name) { name_ = name; }
+
+ int number() { return number_; }
+ void increment_number(int num) { number_ += num; }
+
+ int bytes() { return bytes_; }
+ void increment_bytes(int size) { bytes_ += size; }
+
+ // Clear the number of objects and size fields, but not the name.
+ void clear() {
+ number_ = 0;
+ bytes_ = 0;
+ }
+
+ private:
+ const char* name_;
+ int number_;
+ int bytes_;
+};
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpace in young generation
+//
+// A semispace is a contiguous chunk of memory. The mark-compact collector
+// uses the memory in the from space as a marking stack when tracing live
+// objects.
+
+class SemiSpace BASE_EMBEDDED {
+ public:
+ // Creates a space in the young generation. The constructor does not
+ // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
+ // memory of size 'capacity' when set up, and does not grow or shrink
+ // otherwise. In the mark-compact collector, the memory region of the from
+ // space is used as the marking stack. It requires contiguous memory
+ // addresses.
+ SemiSpace(int initial_capacity, int maximum_capacity);
+
+ // Sets up the semispace using the given chunk.
+ bool Setup(Address start, int size);
+
+ // Tear down the space. Heap memory was not allocated by the space, so it
+ // is not deallocated here.
+ void TearDown();
+
+ // True if the space has been set up but not torn down.
+ bool HasBeenSetup() { return start_ != NULL; }
+
+ // Double the size of the semispace by committing extra virtual memory.
+ // Assumes that the caller has checked that the semispace has not reached
+ // its maxmimum capacity (and thus there is space available in the reserved
+ // address range to grow).
+ bool Double();
+
+ // Returns the start address of the space.
+ Address low() { return start_; }
+ // Returns one past the end address of the space.
+ Address high() { return low() + capacity_; }
+
+ // Age mark accessors.
+ Address age_mark() { return age_mark_; }
+ void set_age_mark(Address mark) { age_mark_ = mark; }
+
+ // True if the address is in the address range of this semispace (not
+ // necessarily below the allocation pointer).
+ bool Contains(Address a) {
+ return (reinterpret_cast<uint32_t>(a) & address_mask_)
+ == reinterpret_cast<uint32_t>(start_);
+ }
+
+ // True if the object is a heap object in the address range of this
+ // semispace (not necessarily below the allocation pointer).
+ bool Contains(Object* o) {
+ return (reinterpret_cast<uint32_t>(o) & object_mask_) == object_expected_;
+ }
+
+ // The offset of an address from the begining of the space.
+ int SpaceOffsetForAddress(Address addr) { return addr - low(); }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ // The current and maximum capacity of the space.
+ int capacity_;
+ int maximum_capacity_;
+
+ // The start address of the space.
+ Address start_;
+ // Used to govern object promotion during mark-compact collection.
+ Address age_mark_;
+
+ // Masks and comparison values to test for containment in this semispace.
+ uint32_t address_mask_;
+ uint32_t object_mask_;
+ uint32_t object_expected_;
+
+ public:
+ TRACK_MEMORY("SemiSpace")
+};
+
+
+// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// semispace of the heap's new space. It iterates over the objects in the
+// semispace from a given start address (defaulting to the bottom of the
+// semispace) to the top of the semispace. New objects allocated after the
+// iterator is created are not iterated.
+class SemiSpaceIterator : public ObjectIterator {
+ public:
+ // Create an iterator over the objects in the given space. If no start
+ // address is given, the iterator starts from the bottom of the space. If
+ // no size function is given, the iterator calls Object::Size().
+ explicit SemiSpaceIterator(NewSpace* space);
+ SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+ SemiSpaceIterator(NewSpace* space, Address start);
+
+ bool has_next() {return current_ < limit_; }
+
+ HeapObject* next() {
+ ASSERT(has_next());
+
+ HeapObject* object = HeapObject::FromAddress(current_);
+ int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
+ ASSERT_OBJECT_SIZE(size);
+
+ current_ += size;
+ return object;
+ }
+
+ // Implementation of the ObjectIterator functions.
+ virtual bool has_next_object() { return has_next(); }
+ virtual HeapObject* next_object() { return next(); }
+
+ private:
+ void Initialize(NewSpace* space, Address start, Address end,
+ HeapObjectCallback size_func);
+
+ // The semispace.
+ SemiSpace* space_;
+ // The current iteration point.
+ Address current_;
+ // The end of iteration.
+ Address limit_;
+ // The callback function.
+ HeapObjectCallback size_func_;
+};
+
+
+// -----------------------------------------------------------------------------
+// The young generation space.
+//
+// The new space consists of a contiguous pair of semispaces. It simply
+// forwards most functions to the appropriate semispace.
+
+class NewSpace : public Malloced {
+ public:
+ // Create a new space with a given allocation capacity (ie, the capacity of
+ // *one* of the semispaces). The constructor does not allocate heap memory
+ // from the OS. When the space is set up, it is given a contiguous chunk of
+ // memory of size 2 * semispace_capacity. To support fast containment
+ // testing in the new space, the size of this chunk must be a power of two
+ // and it must be aligned to its size.
+ NewSpace(int initial_semispace_capacity, int maximum_semispace_capacity);
+
+ // Sets up the new space using the given chunk.
+ bool Setup(Address start, int size);
+
+ // Tears down the space. Heap memory was not allocated by the space, so it
+ // is not deallocated here.
+ void TearDown();
+
+ // True if the space has been set up but not torn down.
+ bool HasBeenSetup() {
+ return to_space_->HasBeenSetup() && from_space_->HasBeenSetup();
+ }
+
+ // Flip the pair of spaces.
+ void Flip();
+
+ // Doubles the capacity of the semispaces. Assumes that they are not at
+ // their maximum capacity. Returns a flag indicating success or failure.
+ bool Double();
+
+ // True if the address or object lies in the address range of either
+ // semispace (not necessarily below the allocation pointer).
+ bool Contains(Address a) {
+ return (reinterpret_cast<uint32_t>(a) & address_mask_)
+ == reinterpret_cast<uint32_t>(start_);
+ }
+ bool Contains(Object* o) {
+ return (reinterpret_cast<uint32_t>(o) & object_mask_) == object_expected_;
+ }
+
+ // Return the allocated bytes in the active semispace.
+ int Size() { return top() - bottom(); }
+ // Return the current capacity of a semispace.
+ int Capacity() { return capacity_; }
+ // Return the available bytes without growing in the active semispace.
+ int Available() { return Capacity() - Size(); }
+
+ // Return the maximum capacity of a semispace.
+ int MaximumCapacity() { return maximum_capacity_; }
+
+ // Return the address of the allocation pointer in the active semispace.
+ Address top() { return allocation_info_.top; }
+ // Return the address of the first object in the active semispace.
+ Address bottom() { return to_space_->low(); }
+
+ // Get the age mark of the inactive semispace.
+ Address age_mark() { return from_space_->age_mark(); }
+ // Set the age mark in the active semispace.
+ void set_age_mark(Address mark) { to_space_->set_age_mark(mark); }
+
+ // The start address of the space and a bit mask. Anding an address in the
+ // new space with the mask will result in the start address.
+ Address start() { return start_; }
+ uint32_t mask() { return address_mask_; }
+
+ // The allocation top and limit addresses.
+ Address* allocation_top_address() { return &allocation_info_.top; }
+ Address* allocation_limit_address() { return &allocation_info_.limit; }
+
+ // Allocate the requested number of bytes in the space if possible, return a
+ // failure object if not.
+ Object* AllocateRaw(int size_in_bytes) {
+ return AllocateRawInternal(size_in_bytes, &allocation_info_);
+ }
+
+ // Allocate the requested number of bytes for relocation during mark-compact
+ // collection.
+ Object* MCAllocateRaw(int size_in_bytes) {
+ return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
+ }
+
+ // Reset the allocation pointer to the beginning of the active semispace.
+ void ResetAllocationInfo();
+ // Reset the reloction pointer to the bottom of the inactive semispace in
+ // preparation for mark-compact collection.
+ void MCResetRelocationInfo();
+ // Update the allocation pointer in the active semispace after a
+ // mark-compact collection.
+ void MCCommitRelocationInfo();
+
+ // Get the extent of the inactive semispace (for use as a marking stack).
+ Address FromSpaceLow() { return from_space_->low(); }
+ Address FromSpaceHigh() { return from_space_->high(); }
+
+ // Get the extent of the active semispace (to sweep newly copied objects
+ // during a scavenge collection).
+ Address ToSpaceLow() { return to_space_->low(); }
+ Address ToSpaceHigh() { return to_space_->high(); }
+
+ // Offsets from the beginning of the semispaces.
+ int ToSpaceOffsetForAddress(Address a) {
+ return to_space_->SpaceOffsetForAddress(a);
+ }
+ int FromSpaceOffsetForAddress(Address a) {
+ return from_space_->SpaceOffsetForAddress(a);
+ }
+
+ // True if the object is a heap object in the address range of the
+ // respective semispace (not necessarily below the allocation pointer of the
+ // semispace).
+ bool ToSpaceContains(Object* o) { return to_space_->Contains(o); }
+ bool FromSpaceContains(Object* o) { return from_space_->Contains(o); }
+
+ bool ToSpaceContains(Address a) { return to_space_->Contains(a); }
+ bool FromSpaceContains(Address a) { return from_space_->Contains(a); }
+
+#ifdef DEBUG
+ // Verify the active semispace.
+ void Verify();
+ // Print the active semispace.
+ void Print() { to_space_->Print(); }
+#endif
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ // Iterates the active semispace to collect statistics.
+ void CollectStatistics();
+ // Reports previously collected statistics of the active semispace.
+ void ReportStatistics();
+ // Clears previously collected statistics.
+ void ClearHistograms();
+
+ // Record the allocation or promotion of a heap object. Note that we don't
+ // record every single allocation, but only those that happen in the
+ // to space during a scavenge GC.
+ void RecordAllocation(HeapObject* obj);
+ void RecordPromotion(HeapObject* obj);
+#endif
+
+ private:
+ // The current and maximum capacities of a semispace.
+ int capacity_;
+ int maximum_capacity_;
+
+ // The semispaces.
+ SemiSpace* to_space_;
+ SemiSpace* from_space_;
+
+ // Start address and bit mask for containment testing.
+ Address start_;
+ uint32_t address_mask_;
+ uint32_t object_mask_;
+ uint32_t object_expected_;
+
+ // Allocation pointer and limit for normal allocation and allocation during
+ // mark-compact collection.
+ AllocationInfo allocation_info_;
+ AllocationInfo mc_forwarding_info_;
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ HistogramInfo* allocated_histogram_;
+ HistogramInfo* promoted_histogram_;
+#endif
+
+ // Implementation of AllocateRaw and MCAllocateRaw.
+ inline Object* AllocateRawInternal(int size_in_bytes,
+ AllocationInfo* alloc_info);
+
+ friend class SemiSpaceIterator;
+
+ public:
+ TRACK_MEMORY("NewSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap. They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object). They have a size and a next pointer. The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+ // Obtain a free-list node from a raw address. This is not a cast because
+ // it does not check nor require that the first word at the address is a map
+ // pointer.
+ static FreeListNode* FromAddress(Address address) {
+ return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+ }
+
+ // Set the size in bytes, which can be read with HeapObject::Size(). This
+ // function also writes a map to the first word of the block so that it
+ // looks like a heap object to the garbage collector and heap iteration
+ // functions.
+ void set_size(int size_in_bytes);
+
+ // Accessors for the next field.
+ inline Address next();
+ inline void set_next(Address next);
+
+ private:
+ static const int kNextOffset = Array::kHeaderSize;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.
+class OldSpaceFreeList BASE_EMBEDDED {
+ public:
+ explicit OldSpaceFreeList(AllocationSpace owner);
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ int available() { return available_; }
+
+ // Place a node on the free list. The block of size 'size_in_bytes'
+ // starting at 'start' is placed on the free list. The return value is the
+ // number of bytes that have been lost due to internal fragmentation by
+ // freeing the block. Bookkeeping information will be written to the block,
+ // ie, its contents will be destroyed. The start address should be word
+ // aligned, and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size 'size_in_bytes' from the free list. The block
+ // is unitialized. A failure is returned if no block is available. The
+ // number of bytes lost to fragmentation is returned in the output parameter
+ // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ Object* Allocate(int size_in_bytes, int* wasted_bytes);
+
+ private:
+ // The size range of blocks, in bytes. (Smaller allocations are allowed, but
+ // will always result in waste.)
+ static const int kMinBlockSize = Array::kHeaderSize + kPointerSize;
+ static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+ // The identity of the owning space, for building allocation Failure
+ // objects.
+ AllocationSpace owner_;
+
+ // Total available bytes in all blocks on this free list.
+ int available_;
+
+ // Blocks are put on exact free lists in an array, indexed by size in words.
+ // The available sizes are kept in an increasingly ordered list. Entries
+ // corresponding to sizes < kMinBlockSize always have an empty free list
+ // (but index kHead is used for the head of the size list).
+ struct SizeNode {
+ // Address of the head FreeListNode of the implied block size or NULL.
+ Address head_node_;
+ // Size (words) of the next larger available size if head_node_ != NULL.
+ int next_size_;
+ };
+ static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
+ SizeNode free_[kFreeListsLength];
+
+ // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
+ static const int kHead = kMinBlockSize / kPointerSize - 1;
+ static const int kEnd = kMaxInt;
+
+ // We keep a "finger" in the size list to speed up a common pattern:
+ // repeated requests for the same or increasing sizes.
+ int finger_;
+
+ // Starting from *prev, find and return the smallest size >= index (words),
+ // or kEnd. Update *prev to be the largest size < index, or kHead.
+ int FindSize(int index, int* prev) {
+ int cur = free_[*prev].next_size_;
+ while (cur < index) {
+ *prev = cur;
+ cur = free_[cur].next_size_;
+ }
+ return cur;
+ }
+
+ // Remove an existing element from the size list.
+ void RemoveSize(int index) {
+ int prev = kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(cur == index);
+ free_[prev].next_size_ = free_[cur].next_size_;
+ finger_ = prev;
+ }
+
+ // Insert a new element into the size list.
+ void InsertSize(int index) {
+ int prev = kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(cur != index);
+ free_[prev].next_size_ = index;
+ free_[index].next_size_ = cur;
+ }
+
+ // The size list is not updated during a sequence of calls to Free, but is
+ // rebuilt before the next allocation.
+ void RebuildSizeList();
+ bool needs_rebuild_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(OldSpaceFreeList);
+};
+
+
+// The free list for the map space.
+class MapSpaceFreeList BASE_EMBEDDED {
+ public:
+ MapSpaceFreeList();
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ int available() { return available_; }
+
+ // Place a node on the free list. The block starting at 'start' (assumed to
+ // have size Map::kSize) is placed on the free list. Bookkeeping
+ // information will be written to the block, ie, its contents will be
+ // destroyed. The start address should be word aligned.
+ void Free(Address start);
+
+ // Allocate a map-sized block from the free list. The block is unitialized.
+ // A failure is returned if no block is available.
+ Object* Allocate();
+
+ private:
+ // Available bytes on the free list.
+ int available_;
+
+ // The head of the free list.
+ Address head_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MapSpaceFreeList);
+};
+
+
+// -----------------------------------------------------------------------------
+// Old object space (excluding map objects)
+
+class OldSpace : public PagedSpace {
+ public:
+ // Creates an old space object with a given maximum capacity.
+ // The constructor does not allocate pages from OS.
+ explicit OldSpace(int max_capacity, AllocationSpace id)
+ : PagedSpace(max_capacity, id), free_list_(id) {
+ }
+
+ // Returns maximum available bytes that the old space can have.
+ int MaxAvailable() {
+ return (MemoryAllocator::Available() / Page::kPageSize)
+ * Page::kObjectAreaSize;
+ }
+
+ // The bytes available on the free list (ie, not above the linear allocation
+ // pointer).
+ int AvailableFree() { return free_list_.available(); }
+
+ // The top of allocation in a page in this space.
+ Address PageAllocationTop(Page* page) {
+ return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
+ }
+
+ // Allocates requested bytes. May return Failure if the space is full.
+ Object* AllocateRaw(int size_in_bytes) {
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, &allocation_info_);
+ }
+
+ // Allocates requested bytes for object relocation.
+ Object* MCAllocateRaw(int size_in_bytes) {
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
+ }
+
+ // Give a block of memory to the space's free list. It might be added to
+ // the free list or accounted as waste.
+ void Free(Address start, int size_in_bytes) {
+ int wasted_bytes = free_list_.Free(start, size_in_bytes);
+ accounting_stats_.DeallocateBytes(size_in_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
+
+ // Prepare for full garbage collection. Resets the relocation pointer and
+ // clears the free list.
+ void PrepareForMarkCompact(bool will_compact);
+
+ // Adjust the top of relocation pointer to point to the end of the object
+ // given by 'address' and 'size_in_bytes'. Move it to the next page if
+ // necessary, ensure that it points to the address, then increment it by the
+ // size.
+ void MCAdjustRelocationEnd(Address address, int size_in_bytes);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ void MCCommitRelocationInfo();
+
+#ifdef DEBUG
+ // Verify integrity of this space.
+ void Verify();
+
+ // Reports statistics for the space
+ void ReportStatistics();
+ // Dump the remembered sets in the space to stdout.
+ void PrintRSet();
+#endif
+
+ private:
+ // The space's free list.
+ OldSpaceFreeList free_list_;
+
+ // During relocation, we keep a pointer to the most recently relocated
+ // object in order to know when to move to the next page.
+ Address mc_end_of_relocation_;
+
+ // Implementation of AllocateRaw. Allocates requested number of bytes using
+ // the given allocation information according to the space's current
+ // allocation mode.
+ Object* AllocateRawInternal(int size_in_bytes, AllocationInfo* alloc_info);
+
+ // Slow path of AllocateRaw functions.
+ Object* SlowAllocateRaw(int size_in_bytes, AllocationInfo* alloc_info);
+
+ public:
+ TRACK_MEMORY("OldSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public PagedSpace {
+ public:
+ // Creates a map space object with a maximum capacity.
+ explicit MapSpace(int max_capacity) : PagedSpace(max_capacity, MAP_SPACE) { }
+
+ // The bytes available on the free list (ie, not above the linear allocation
+ // pointer).
+ int AvailableFree() { return free_list_.available(); }
+
+ // The top of allocation in a page in this space.
+ Address PageAllocationTop(Page* page) {
+ return page == TopPageOf(allocation_info_) ? top()
+ : page->ObjectAreaEnd() - kPageExtra;
+ }
+
+ // Allocates requested bytes. May return Failure if the space is full.
+ Object* AllocateRaw(int size_in_bytes) {
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, &allocation_info_);
+ }
+
+ // Allocates requested bytes for object relocation.
+ Object* MCAllocateRaw(int size_in_bytes) {
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
+ }
+
+ // Give a map-sized block of memory to the space's free list.
+ void Free(Address start) {
+ free_list_.Free(start);
+ accounting_stats_.DeallocateBytes(Map::kSize);
+ }
+
+ // Given an index, returns the page address.
+ Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+
+ // Prepares for a mark-compact GC.
+ void PrepareForMarkCompact(bool will_compact);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ void MCCommitRelocationInfo();
+
+#ifdef DEBUG
+ // Verify integrity of this space.
+ void Verify();
+
+ // Reports statistic info of the space
+ void ReportStatistics();
+ // Dump the remembered sets in the space to stdout.
+ void PrintRSet();
+#endif
+
+ // Constants.
+ static const int kMapPageIndexBits = 10;
+ static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
+
+ static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
+
+ private:
+ // The space's free list.
+ MapSpaceFreeList free_list_;
+
+ // An array of page start address in a map space.
+ Address page_addresses_[kMaxMapPageIndex];
+
+ // Implementation of AllocateRaw. Allocates requested bytes using
+ // the given allocation information.
+ Object* AllocateRawInternal(int size_in_bytes, AllocationInfo* alloc_info);
+
+ // Slow path of AllocateRaw functions.
+ Object* SlowAllocateRaw(int size_int_bytes, AllocationInfo* alloc_info);
+
+ public:
+ TRACK_MEMORY("MapSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
+// the large object space. A large object is allocated from OS heap with
+// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// A large object always starts at Page::kObjectStartOffset to a page.
+// Large objects do not move during garbage collections.
+//
+
+// A LargeObjectChunk holds exactly one large object page with exactly one
+// large object.
+class LargeObjectChunk {
+ public:
+ // Allocates a new LargeObjectChunk that contains a large object page
+ // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
+ // object and possibly extra remembered set words) bytes after the object
+ // area start of that page. The allocated chunk size is set in the output
+ // parameter chunk_size.
+ static LargeObjectChunk* New(int size_in_bytes, size_t* chunk_size);
+
+ // Interpret a raw address as a large object chunk.
+ static LargeObjectChunk* FromAddress(Address address) {
+ return reinterpret_cast<LargeObjectChunk*>(address);
+ }
+
+ // Returns the address of this chunk.
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ // Accessors for the fields of the chunk.
+ LargeObjectChunk* next() { return next_; }
+ void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
+
+ size_t size() { return size_; }
+ void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
+
+ // Returns the object in this chunk.
+ inline HeapObject* GetObject();
+
+ // Given a requested size (including any extra remembereed set words),
+ // returns the physical size of a chunk to be allocated.
+ static int ChunkSizeFor(int size_in_bytes);
+
+ // Given a chunk size, returns the object size it can accomodate (not
+ // including any extra remembered set words). Used by
+ // LargeObjectSpace::Available. Note that this can overestimate the size
+ // of object that will fit in a chunk---if the object requires extra
+ // remembered set words (eg, for large fixed arrays), the actual object
+ // size for the chunk will be smaller than reported by this function.
+ static int ObjectSizeFor(int chunk_size) {
+ if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+ return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+ }
+
+ private:
+ // A pointer to the next large object chunk in the space or NULL.
+ LargeObjectChunk* next_;
+
+ // The size of this chunk.
+ size_t size_;
+
+ public:
+ TRACK_MEMORY("LargeObjectChunk")
+};
+
+
+class LargeObjectSpace {
+ friend class LargeObjectIterator;
+ public:
+ LargeObjectSpace();
+
+ // Initializes internal data structures.
+ bool Setup();
+
+ // Releases internal resources, frees objects in this space.
+ void TearDown();
+
+ // Allocates a (non-FixedArray) large object.
+ Object* AllocateRaw(int size_in_bytes);
+ // Allocates a large FixedArray.
+ Object* AllocateRawFixedArray(int size_in_bytes);
+
+ // Available bytes for objects in this space, not including any extra
+ // remembered set words.
+ int Available() {
+ return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
+ }
+
+ int Size() {
+ return size_;
+ }
+
+ int PageCount() {
+ return page_count_;
+ }
+
+ // Finds an object for a given address, returns Failure::Exception()
+ // if it is not found. The function iterates through all objects in this
+ // space, may be slow.
+ Object* FindObject(Address a);
+
+ // Clears remembered sets.
+ void ClearRSet();
+
+ // Iterates objects whose remembered set bits are set.
+ void IterateRSet(ObjectSlotCallback func);
+
+ // Frees unmarked objects.
+ void FreeUnmarkedObjects();
+
+ // Checks whether a heap object is in this space; O(1).
+ bool Contains(HeapObject* obj);
+
+ // Checks whether the space is empty.
+ bool IsEmpty() { return first_chunk_ == NULL; }
+
+#ifdef DEBUG
+ void Verify();
+ void Print();
+ void ReportStatistics();
+ void CollectCodeStatistics();
+ // Dump the remembered sets in the space to stdout.
+ void PrintRSet();
+#endif
+ // Checks whether an address is in the object area in this space. It
+ // iterates all objects in the space. May be slow.
+ bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
+
+ private:
+ // The head of the linked list of large object chunks.
+ LargeObjectChunk* first_chunk_;
+ int size_; // allocated bytes
+ int page_count_; // number of chunks
+
+
+ // Shared implementation of AllocateRaw and AllocateRawFixedArray.
+ Object* AllocateRawInternal(int requested_size, int object_size);
+
+ // Returns the number of extra bytes (rounded up to the nearest full word)
+ // required for extra_object_bytes of extra pointers (in bytes).
+ static inline int ExtraRSetBytesFor(int extra_object_bytes);
+
+ public:
+ TRACK_MEMORY("LargeObjectSpace")
+};
+
+
+class LargeObjectIterator: public ObjectIterator {
+ public:
+ explicit LargeObjectIterator(LargeObjectSpace* space);
+ LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
+
+ bool has_next() { return current_ != NULL; }
+ HeapObject* next();
+
+ // implementation of ObjectIterator.
+ virtual bool has_next_object() { return has_next(); }
+ virtual HeapObject* next_object() { return next(); }
+
+ private:
+ LargeObjectChunk* current_;
+ HeapObjectCallback size_func_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_SPACES_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "factory.h"
+#include "string-stream.h"
+
+namespace v8 { namespace internal {
+
+static const int kMentionedObjectCacheMaxSize = 256;
+static List<HeapObject*, PreallocatedStorage>* debug_object_cache = NULL;
+static Object* current_security_token = NULL;
+
+
+char* HeapStringAllocator::allocate(unsigned bytes) {
+ space_ = NewArray<char>(bytes);
+ return space_;
+}
+
+
+NoAllocationStringAllocator::NoAllocationStringAllocator(unsigned bytes) {
+ size_ = bytes;
+ space_ = NewArray<char>(bytes);
+}
+
+
+bool StringStream::Put(char c) {
+ if (space() == 0) return false;
+ if (length_ >= capacity_ - 1) {
+ unsigned new_capacity = capacity_;
+ char* new_buffer = allocator_->grow(&new_capacity);
+ if (new_capacity > capacity_) {
+ capacity_ = new_capacity;
+ buffer_ = new_buffer;
+ } else {
+ // Indicate truncation with dots.
+ memset(cursor(), '.', space());
+ length_ = capacity_;
+ buffer_[length_ - 2] = '\n';
+ buffer_[length_ - 1] = '\0';
+ return false;
+ }
+ }
+ buffer_[length_] = c;
+ buffer_[length_ + 1] = '\0';
+ length_++;
+ return true;
+}
+
+
+// A control character is one that configures a format element. For
+// instance, in %.5s, .5 are control characters.
+static bool IsControlChar(char c) {
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9': case '.': case '-':
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+void StringStream::Add(const char* format, Vector<FmtElm> elms) {
+ // If we already ran out of space then return immediately.
+ if (space() == 0)
+ return;
+ int offset = 0;
+ int elm = 0;
+ while (format[offset] != '\0') {
+ if (format[offset] != '%' || elm == elms.length()) {
+ Put(format[offset]);
+ offset++;
+ continue;
+ }
+ // Read this formatting directive into a temporary buffer
+ const int kTempSize = 24;
+ char temp_buffer[kTempSize];
+ // Wrap temp buffer in a vector to get bounds checking in debug
+ // mode
+ Vector<char> temp(temp_buffer, kTempSize);
+ int format_length = 0;
+ // Skip over the whole control character sequence until the
+ // format element type
+ temp[format_length++] = format[offset++];
+ // '\0' is not a control character so we don't have to
+ // explicitly check for the end of the string
+ while (IsControlChar(format[offset]))
+ temp[format_length++] = format[offset++];
+ char type = format[offset];
+ if (type == '\0') return;
+ temp[format_length++] = type;
+ temp[format_length] = '\0';
+ offset++;
+ FmtElm current = elms[elm++];
+ switch (type) {
+ case 's': {
+ ASSERT_EQ(FmtElm::C_STR, current.type_);
+ const char* value = current.data_.u_c_str_;
+ Add(value);
+ break;
+ }
+ case 'o': {
+ ASSERT_EQ(FmtElm::OBJ, current.type_);
+ Object* obj = current.data_.u_obj_;
+ PrintObject(obj);
+ break;
+ }
+ case 'i': case 'd': case 'u': case 'x': case 'c': case 'p': {
+ int value = current.data_.u_int_;
+ char formatted[kTempSize];
+#ifdef WIN32
+ // This is not my idea of a good time.
+ _snprintf(formatted, kTempSize, temp.start(), value);
+#else
+ snprintf(formatted, kTempSize, temp.start(), value);
+#endif
+ Add(formatted);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // Verify that the buffer is 0-terminated and doesn't contain any
+ // other 0-characters.
+ ASSERT(buffer_[length_] == '\0');
+ ASSERT(strlen(buffer_) == length_);
+}
+
+
+void StringStream::PrintObject(Object* o) {
+ o->ShortPrint(this);
+ if (o->IsString()) {
+ if (String::cast(o)->length() <= String::kMaxMediumStringSize) {
+ return;
+ }
+ } else if (o->IsNumber() || o->IsOddball()) {
+ return;
+ }
+ if (o->IsHeapObject()) {
+ for (int i = 0; i < debug_object_cache->length(); i++) {
+ if ((*debug_object_cache)[i] == o) {
+ Add("#%d#", i);
+ return;
+ }
+ }
+ if (debug_object_cache->length() < kMentionedObjectCacheMaxSize) {
+ Add("#%d#", debug_object_cache->length());
+ debug_object_cache->Add(HeapObject::cast(o));
+ } else {
+ Add("@%p", o);
+ }
+ }
+}
+
+
+void StringStream::Add(const char* format) {
+ Add(format, Vector<FmtElm>::empty());
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0) {
+ const char argc = 1;
+ FmtElm argv[argc] = { arg0 };
+ Add(format, Vector<FmtElm>(argv, argc));
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1) {
+ const char argc = 2;
+ FmtElm argv[argc] = { arg0, arg1 };
+ Add(format, Vector<FmtElm>(argv, argc));
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
+ FmtElm arg2) {
+ const char argc = 3;
+ FmtElm argv[argc] = { arg0, arg1, arg2 };
+ Add(format, Vector<FmtElm>(argv, argc));
+}
+
+
+SmartPointer<char> StringStream::ToCString() {
+ char* str = NewArray<char>(length_ + 1);
+ memcpy(str, buffer_, length_);
+ str[length_] = '\0';
+ return SmartPointer<char>(str);
+}
+
+
+void StringStream::Log() {
+ LOG(StringEvent("StackDump", buffer_));
+}
+
+
+void StringStream::OutputToStdOut() {
+ // Dump the output to stdout, but make sure to break it up into
+ // manageable chunks to avoid losing parts of the output in the OS
+ // printing code. This is a problem on Windows in particular; see
+ // the VPrint() function implementations in platform-win32.cc.
+ unsigned position = 0;
+ for (unsigned next; (next = position + 2048) < length_; position = next) {
+ char save = buffer_[next];
+ buffer_[next] = '\0';
+ internal::PrintF("%s", &buffer_[position]);
+ buffer_[next] = save;
+ }
+ internal::PrintF("%s", &buffer_[position]);
+}
+
+
+Handle<String> StringStream::ToString() {
+ return Factory::NewStringFromUtf8(Vector<const char>(buffer_, length_));
+}
+
+
+void StringStream::ClearMentionedObjectCache() {
+ current_security_token = NULL;
+ if (debug_object_cache == NULL) {
+ debug_object_cache = new List<HeapObject*, PreallocatedStorage>(0);
+ }
+ debug_object_cache->Clear();
+}
+
+
+#ifdef DEBUG
+bool StringStream::IsMentionedObjectCacheClear() {
+ return (debug_object_cache->length() == 0);
+}
+#endif
+
+
+bool StringStream::Put(String* str) {
+ return Put(str, 0, str->length());
+}
+
+
+bool StringStream::Put(String* str, int start, int end) {
+ StringInputBuffer name_buffer(str);
+ name_buffer.Seek(start);
+ for (int i = start; i < end && name_buffer.has_more(); i++) {
+ int c = name_buffer.GetNext();
+ if (c >= 127 || c < 32) {
+ c = '?';
+ }
+ if (!Put(c)) {
+ return false; // Output was truncated.
+ }
+ }
+ return true;
+}
+
+
+void StringStream::PrintName(Object* name) {
+ if (name->IsString()) {
+ String* str = String::cast(name);
+ if (str->length() > 0) {
+ Put(str);
+ } else {
+ Add("/* anonymous */");
+ }
+ } else {
+ Add("%o", name);
+ }
+}
+
+
+void StringStream::PrintUsingMap(JSObject* js_object) {
+ Map* map = js_object->map();
+ if (!Heap::Contains(map) ||
+ !map->IsHeapObject() ||
+ !map->IsMap()) {
+ Add("<Invalid map>\n");
+ return;
+ }
+ for (DescriptorReader r(map->instance_descriptors()); !r.eos(); r.advance()) {
+ switch (r.type()) {
+ case FIELD: {
+ Object* key = r.GetKey();
+ if (key->IsString() || key->IsNumber()) {
+ int len = 3;
+ if (key->IsString()) {
+ len = String::cast(key)->length();
+ }
+ for (; len < 18; len++)
+ Put(' ');
+ if (key->IsString()) {
+ Put(String::cast(key));
+ } else {
+ key->ShortPrint();
+ }
+ Add(": ");
+ Object* value = js_object->properties()->get(r.GetFieldIndex());
+ Add("%o\n", value);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+
+void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
+ for (unsigned int i = 0; i < 10 && i < limit; i++) {
+ Object* element = array->get(i);
+ if (element != Heap::the_hole_value()) {
+ for (int len = 1; len < 18; len++)
+ Put(' ');
+ Add("%d: %o\n", i, array->get(i));
+ }
+ }
+ if (limit >= 10) {
+ Add(" ...\n");
+ }
+}
+
+
+void StringStream::PrintByteArray(ByteArray* byte_array) {
+ unsigned int limit = byte_array->length();
+ for (unsigned int i = 0; i < 10 && i < limit; i++) {
+ byte b = byte_array->get(i);
+ Add(" %d: %3d 0x%02x", i, b, b);
+ if (b >= ' ' && b <= '~') {
+ Add(" '%c'", b);
+ } else if (b == '\n') {
+ Add(" '\n'");
+ } else if (b == '\r') {
+ Add(" '\r'");
+ } else if (b >= 1 && b <= 26) {
+ Add(" ^%c", b + 'A' - 1);
+ }
+ Add("\n");
+ }
+ if (limit >= 10) {
+ Add(" ...\n");
+ }
+}
+
+
+void StringStream::PrintMentionedObjectCache() {
+ Add("==== Key ============================================\n\n");
+ for (int i = 0; i < debug_object_cache->length(); i++) {
+ HeapObject* printee = (*debug_object_cache)[i];
+ Add(" #%d# %p: ", i, printee);
+ printee->ShortPrint(this);
+ Add("\n");
+ if (printee->IsJSObject()) {
+ if (printee->IsJSValue()) {
+ Add(" value(): %o\n", JSValue::cast(printee)->value());
+ }
+ PrintUsingMap(JSObject::cast(printee));
+ if (printee->IsJSArray()) {
+ JSArray* array = JSArray::cast(printee);
+ if (array->HasFastElements()) {
+ unsigned int limit = FixedArray::cast(array->elements())->length();
+ unsigned int length =
+ static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
+ if (length < limit) limit = length;
+ PrintFixedArray(FixedArray::cast(array->elements()), limit);
+ }
+ }
+ } else if (printee->IsByteArray()) {
+ PrintByteArray(ByteArray::cast(printee));
+ } else if (printee->IsFixedArray()) {
+ unsigned int limit = FixedArray::cast(printee)->length();
+ PrintFixedArray(FixedArray::cast(printee), limit);
+ }
+ }
+}
+
+
+void StringStream::PrintSecurityTokenIfChanged(Object* f) {
+ if (!f->IsHeapObject() || !Heap::Contains(HeapObject::cast(f))) {
+ return;
+ }
+ Map* map = HeapObject::cast(f)->map();
+ if (!map->IsHeapObject() ||
+ !Heap::Contains(map) ||
+ !map->IsMap() ||
+ !f->IsJSFunction()) {
+ return;
+ }
+
+ JSFunction* fun = JSFunction::cast(f);
+ Object* perhaps_context = fun->unchecked_context();
+ if (perhaps_context->IsHeapObject() &&
+ Heap::Contains(HeapObject::cast(perhaps_context)) &&
+ perhaps_context->IsContext()) {
+ Context* context = fun->context();
+ if (!Heap::Contains(context)) {
+ Add("(Function context is outside heap)\n");
+ return;
+ }
+ GlobalObject* global = context->global();
+ if (!Heap::Contains(global)) {
+ Add("(Function context global is outside heap)\n");
+ return;
+ }
+ if (global->IsJSGlobalObject()) {
+ Object* token = JSGlobalObject::cast(global)->security_token();
+ if (token != current_security_token) {
+ Add("Security context: %o\n", token);
+ current_security_token = token;
+ }
+ } else {
+ Add("(No security context)\n");
+ }
+ } else {
+ Add("(Function context is corrupt)\n");
+ }
+}
+
+
+void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
+ if (f->IsHeapObject() &&
+ Heap::Contains(HeapObject::cast(f)) &&
+ Heap::Contains(HeapObject::cast(f)->map()) &&
+ HeapObject::cast(f)->map()->IsMap()) {
+ if (f->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(f);
+ // Common case: on-stack function present and resolved.
+ PrintPrototype(fun, receiver);
+ *code = fun->code();
+ } else if (f->IsSymbol()) {
+ // Unresolved and megamorphic calls: Instead of the function
+ // we have the function name on the stack.
+ PrintName(f);
+ Add("/* unresolved */ ");
+ } else {
+ // Unless this is the frame of a built-in function, we should always have
+ // the callee function or name on the stack. If we don't, we have a
+ // problem or a change of the stack frame layout.
+ Add("%o", f);
+ Add("/* warning: no JSFunction object or function name found */ ");
+ }
+ /* } else if (is_trampoline()) {
+ Print("trampoline ");
+ */
+ } else {
+ if (!f->IsHeapObject()) {
+ Add("/* warning: 'function' was not a heap object */ ");
+ return;
+ }
+ if (!Heap::Contains(HeapObject::cast(f))) {
+ Add("/* warning: 'function' was not on the heap */ ");
+ return;
+ }
+ if (!Heap::Contains(HeapObject::cast(f)->map())) {
+ Add("/* warning: function's map was not on the heap */ ");
+ return;
+ }
+ if (!HeapObject::cast(f)->map()->IsMap()) {
+ Add("/* warning: function's map was not a valid map */ ");
+ return;
+ }
+ Add("/* warning: Invalid JSFunction object found */ ");
+ }
+}
+
+
+void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
+ Object* name = fun->shared()->name();
+ bool print_name = false;
+ for (Object* p = receiver; p != Heap::null_value(); p = p->GetPrototype()) {
+ if (p->IsJSObject()) {
+ Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
+ if (key != Heap::undefined_value()) {
+ if (!name->IsString() ||
+ !key->IsString() ||
+ !String::cast(name)->Equals(String::cast(key))) {
+ print_name = true;
+ }
+ if (name->IsString() && String::cast(name)->length() == 0) {
+ print_name = false;
+ }
+ name = key;
+ }
+ } else {
+ print_name = true;
+ }
+ }
+ PrintName(name);
+ // Also known as - if the name in the function doesn't match the name under
+ // which it was looked up.
+ if (print_name) {
+ Add("(aka ");
+ PrintName(fun->shared()->name());
+ Put(')');
+ }
+}
+
+
+char* HeapStringAllocator::grow(unsigned* bytes) {
+ unsigned new_bytes = *bytes * 2;
+ // Check for overflow.
+ if (new_bytes <= *bytes) {
+ return space_;
+ }
+ char* new_space = NewArray<char>(new_bytes);
+ if (new_space == NULL) {
+ return space_;
+ }
+ memcpy(new_space, space_, *bytes);
+ *bytes = new_bytes;
+ DeleteArray(space_);
+ space_ = new_space;
+ return new_space;
+}
+
+
+char* NoAllocationStringAllocator::grow(unsigned* bytes) {
+ unsigned new_bytes = *bytes * 2;
+ if (new_bytes > size_) {
+ new_bytes = size_;
+ }
+ *bytes = new_bytes;
+ return space_;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STRING_STREAM_H_
+#define V8_STRING_STREAM_H_
+
+namespace v8 { namespace internal {
+
+
+class StringAllocator {
+ public:
+ virtual ~StringAllocator() {}
+ // Allocate a number of bytes.
+ virtual char* allocate(unsigned bytes) = 0;
+ // Allocate a larger number of bytes and copy the old buffer to the new one.
+ // bytes is an input and output parameter passing the old size of the buffer
+ // and returning the new size. If allocation fails then we return the old
+ // buffer and do not increase the size.
+ virtual char* grow(unsigned* bytes) = 0;
+};
+
+
+// Normal allocator uses new[] and delete[].
+class HeapStringAllocator: public StringAllocator {
+ public:
+ ~HeapStringAllocator() { DeleteArray(space_); }
+ char* allocate(unsigned bytes);
+ char* grow(unsigned* bytes);
+ private:
+ char* space_;
+};
+
+
+// Allocator for use when no new c++ heap allocation is allowed.
+// Allocates all space up front and does no allocation while building
+// message.
+class NoAllocationStringAllocator: public StringAllocator {
+ public:
+ explicit NoAllocationStringAllocator(unsigned bytes);
+ char* allocate(unsigned bytes) { return space_; }
+ char* grow(unsigned* bytes);
+ private:
+ unsigned size_;
+ char* space_;
+};
+
+
+class FmtElm {
+ public:
+ FmtElm(int value) : type_(INT) { data_.u_int_ = value; }
+ FmtElm(const char* value) : type_(C_STR) { data_.u_c_str_ = value; }
+ FmtElm(Object* value) : type_(OBJ) { data_.u_obj_ = value; }
+ FmtElm(Handle<Object> value) : type_(HANDLE) { data_.u_handle_ = value.location(); }
+ FmtElm(void* value) : type_(INT) { data_.u_int_ = reinterpret_cast<int>(value); }
+ private:
+ friend class StringStream;
+ enum Type { INT, C_STR, OBJ, HANDLE };
+ Type type_;
+ union {
+ int u_int_;
+ const char* u_c_str_;
+ Object* u_obj_;
+ Object** u_handle_;
+ } data_;
+};
+
+
+class StringStream {
+ public:
+ explicit StringStream(StringAllocator* allocator):
+ allocator_(allocator),
+ capacity_(kInitialCapacity),
+ length_(0),
+ buffer_(allocator_->allocate(kInitialCapacity)) {
+ buffer_[0] = 0;
+ }
+
+ ~StringStream() {
+ }
+
+ bool Put(char c);
+ bool Put(String* str);
+ bool Put(String* str, int start, int end);
+ void Add(const char* format, Vector<FmtElm> elms);
+ void Add(const char* format);
+ void Add(const char* format, FmtElm arg0);
+ void Add(const char* format, FmtElm arg0, FmtElm arg1);
+ void Add(const char* format, FmtElm arg0, FmtElm arg1, FmtElm arg2);
+
+ // Getting the message out.
+ void OutputToStdOut();
+ void Log();
+ Handle<String> ToString();
+ SmartPointer<char> ToCString();
+
+ // Object printing support.
+ void PrintName(Object* o);
+ void PrintFixedArray(FixedArray* array, unsigned int limit);
+ void PrintByteArray(ByteArray* ba);
+ void PrintUsingMap(JSObject* js_object);
+ void PrintPrototype(JSFunction* fun, Object* receiver);
+ void PrintSecurityTokenIfChanged(Object* function);
+ // NOTE: Returns the code in the output parameter.
+ void PrintFunction(Object* function, Object* receiver, Code** code);
+
+ // Reset the stream.
+ void Reset() {
+ length_ = 0;
+ buffer_[0] = 0;
+ }
+
+ // Mentioned object cache support.
+ void PrintMentionedObjectCache();
+ static void ClearMentionedObjectCache();
+#ifdef DEBUG
+ static bool IsMentionedObjectCacheClear();
+#endif
+
+
+ static const int kInitialCapacity = 16;
+
+ private:
+ void PrintObject(Object* obj);
+
+ StringAllocator* allocator_;
+ unsigned capacity_;
+ unsigned length_; // does not include terminating 0-character
+ char* buffer_;
+
+ int space() const { return capacity_ - length_; }
+ char* cursor() const { return buffer_ + length_; }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_STRING_STREAM_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// const $String = global.String;
+
+
+// Set the String function and constructor.
+%SetCode($String, function(x) {
+ var value = %_ArgumentsLength() == 0 ? '' : ToString(x);
+ if (%IsConstructCall(this)) {
+ %_SetValueOf(this, value);
+ } else {
+ return value;
+ }
+});
+
+%FunctionSetPrototype($String, new $String());
+
+%AddProperty($String.prototype, "constructor", $String, DONT_ENUM);
+
+%AddProperty($String.prototype, "valueOf", function() {
+ if (!IS_STRING(this) && %ClassOf(this) !== 'String')
+ throw new $TypeError('String.prototype.valueOf is not generic');
+ return %_ValueOf(this);
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "toString", $String.prototype.valueOf, DONT_ENUM);
+
+// ECMA-262 section 15.5.4.5
+%AddProperty($String.prototype, "charCodeAt", function(pos) {
+ var subject = ToString(this);
+ var index = TO_INTEGER(pos);
+ return %StringCharCodeAt(subject, index);
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.5.4.6
+%AddProperty($String.prototype, "concat", function() {
+ var len = %_ArgumentsLength();
+ var parts = new $Array(len + 1);
+ parts[0] = ToString(this);
+ for (var i = 0; i < len; i++)
+ parts[i + 1] = ToString(%_Arguments(i));
+ return parts.join('');
+}, DONT_ENUM);
+
+// Match ES3 and Safari
+%FunctionSetLength($String.prototype.concat, 1);
+
+
+// SubString is an internal function that returns the sub string of 'string'.
+// If resulting string is of length 1, we use the one character cache
+// otherwise we call the runtime system.
+function SubString(string, start, end) {
+ // Use the one character string cache.
+ if (start + 1 == end) return %CharFromCode(%StringCharCodeAt(string, start));
+ return %StringSlice(string, start, end);
+}
+
+
+// ECMA-262, section 15.5.4.11
+%AddProperty($String.prototype, "replace", function (search, replace) {
+ var subject = ToString(this);
+
+ // Delegate to one of the regular expression variants if necessary.
+ if (IS_REGEXP(search)) {
+ if (IS_FUNCTION(replace)) {
+ return StringReplaceRegExpWithFunction(subject, search, replace);
+ } else {
+ return StringReplaceRegExp(subject, search, replace);
+ }
+ }
+
+ // Convert the search argument to a string and search for it.
+ search = ToString(search);
+ var start = %StringIndexOf(subject, search, 0);
+ if (start < 0) return subject;
+ var end = start + search.length;
+
+ var builder = new StringBuilder();
+ // prefix
+ builder.add(SubString(subject, 0, start));
+
+ // Compute the string to replace with.
+ if (IS_FUNCTION(replace)) {
+ builder.add(replace.call(null, search, start, subject));
+ } else {
+ ExpandReplacement(ToString(replace), subject, [ start, end ], builder);
+ }
+
+ // suffix
+ builder.add(SubString(subject, end, subject.length));
+
+ return builder.generate();
+}, DONT_ENUM);
+
+
+// Helper function for regular expressions in String.prototype.replace.
+function StringReplaceRegExp(subject, regexp, replace) {
+ // Compute an array of matches; each match is really a list of
+ // captures - pairs of (start, end) indexes into the subject string.
+ var matches;
+ if (regexp.global) {
+ matches = DoRegExpExecGlobal(regexp, subject);
+ if (matches.length == 0) return subject;
+ } else {
+ var captures = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(captures)) return subject;
+ matches = [ captures ];
+ }
+
+ // Determine the number of matches.
+ var length = matches.length;
+
+ // Build the resulting string of subject slices and replacements.
+ var result = new StringBuilder();
+ var previous = 0;
+ // The caller of StringReplaceRegExp must ensure that replace is not a
+ // function.
+ replace = ToString(replace);
+ for (var i = 0; i < length; i++) {
+ var captures = matches[i];
+ result.add(SubString(subject, previous, captures[0]));
+ ExpandReplacement(replace, subject, captures, result);
+ previous = captures[1]; // continue after match
+ }
+ result.add(SubString(subject, previous, subject.length));
+ return result.generate();
+};
+
+
+// Expand the $-expressions in the string and return a new string with
+// the result.
+function ExpandReplacement(string, subject, captures, builder) {
+ var next = %StringIndexOf(string, '$', 0);
+ if (next < 0) {
+ builder.add(string);
+ return;
+ }
+
+ // Compute the number of captures; see ECMA-262, 15.5.4.11, p. 102.
+ var m = captures.length >> 1; // includes the match
+
+ if (next > 0) builder.add(SubString(string, 0, next));
+ var length = string.length;
+
+ while (true) {
+ var expansion = '$';
+ var position = next + 1;
+ if (position < length) {
+ var peek = %StringCharCodeAt(string, position);
+ if (peek == 36) { // $$
+ ++position;
+ } else if (peek == 38) { // $& - match
+ ++position;
+ expansion = SubString(subject, captures[0], captures[1]);
+ } else if (peek == 96) { // $` - prefix
+ ++position;
+ expansion = SubString(subject, 0, captures[0]);
+ } else if (peek == 39) { // $' - suffix
+ ++position;
+ expansion = SubString(subject, captures[1], subject.length);
+ } else if (peek >= 48 && peek <= 57) { // $n, 0 <= n <= 9
+ ++position;
+ var n = peek - 48;
+ if (position < length) {
+ peek = %StringCharCodeAt(string, position);
+ // $nn, 01 <= nn <= 99
+ if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
+ var nn = n * 10 + (peek - 48);
+ if (nn < m) {
+ // If the two digit capture reference is within range of
+ // the captures, we use it instead of the single digit
+ // one. Otherwise, we fall back to using the single
+ // digit reference. This matches the behavior of
+ // SpiderMonkey.
+ ++position;
+ n = nn;
+ }
+ }
+ }
+ if (0 < n && n < m) {
+ expansion = CaptureString(subject, captures, n);
+ if (IS_UNDEFINED(expansion)) expansion = "";
+ } else {
+ // Because of the captures range check in the parsing of two
+ // digit capture references, we can only enter here when a
+ // single digit capture reference is outside the range of
+ // captures.
+ --position;
+ }
+ }
+ }
+
+ // Append the $ expansion and go the the next $ in the string.
+ builder.add(expansion);
+ next = %StringIndexOf(string, '$', position);
+
+ // Return if there are no more $ characters in the string. If we
+ // haven't reached the end, we need to append the suffix.
+ if (next < 0) {
+ if (position < length) {
+ builder.add(SubString(string, position, length));
+ }
+ return;
+ }
+
+ // Append substring between the previous and the next $ character.
+ builder.add(SubString(string, position, next));
+ }
+};
+
+
+// Compute the string of a given PCRE capture.
+function CaptureString(string, captures, index) {
+ // Scale the index.
+ var scaled = index << 1;
+ // Compute start and end.
+ var start = captures[scaled];
+ var end = captures[scaled + 1];
+ // If either start or end is missing return undefined.
+ if (start < 0 || end < 0) return;
+ return SubString(string, start, end);
+};
+
+
+// Helper function for replacing regular expressions with the result of a
+// function application in String.prototype.replace. The function application
+// must be interleaved with the regexp matching (contrary to ECMA-262
+// 15.5.4.11) to mimic SpiderMonkey and KJS behavior when the function uses
+// the static properties of the RegExp constructor. Example:
+// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
+// should be 'abcd' and not 'dddd' (or anything else).
+function StringReplaceRegExpWithFunction(subject, regexp, replace) {
+ var result = new ReplaceResultBuilder(subject);
+ // Captures is an array of pairs of (start, end) indices for the match and
+ // any captured substrings.
+ var captures = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(captures)) return subject;
+
+ // There's at least one match. If the regexp is global, we have to loop
+ // over all matches. The loop is not in C++ code here like the one in
+ // RegExp.prototype.exec, because of the interleaved function application.
+ // Unfortunately, that means this code is nearly duplicated, here and in
+ // jsregexp.cc.
+ if (regexp.global) {
+ var previous = 0;
+ do {
+ result.addSpecialSlice(previous, captures[0]);
+ result.add(ApplyReplacementFunction(replace, captures, subject));
+ // Continue with the next match.
+ previous = captures[1];
+ // Increment previous if we matched an empty string, as per ECMA-262
+ // 15.5.4.10.
+ if (captures[0] == captures[1]) previous++;
+
+ // Per ECMA-262 15.10.6.2, if the previous index is greater than the
+ // string length, there is no match
+ captures = (previous > subject.length)
+ ? null
+ : DoRegExpExec(regexp, subject, previous);
+ } while (!IS_NULL(captures));
+
+ // Tack on the final right substring after the last match, if necessary.
+ if (previous < subject.length) {
+ result.addSpecialSlice(previous, subject.length);
+ }
+ } else { // Not a global regexp, no need to loop.
+ result.addSpecialSlice(0, captures[0]);
+ result.add(ApplyReplacementFunction(replace, captures, subject));
+ result.addSpecialSlice(captures[1], subject.length);
+ }
+
+ return result.generate();
+}
+
+
+// Helper function to apply a string replacement function once.
+function ApplyReplacementFunction(replace, captures, subject) {
+ // Compute the parameter list consisting of the match, captures, index,
+ // and subject for the replace function invocation.
+ var index = captures[0];
+ // The number of captures plus one for the match.
+ var m = captures.length >> 1;
+ if (m == 1) {
+ var s = CaptureString(subject, captures, 0);
+ return ToString(replace.call(null, s, index, subject));
+ }
+ var parameters = $Array(m + 2);
+ for (var j = 0; j < m; j++) {
+ parameters[j] = CaptureString(subject, captures, j);
+ }
+ parameters[j] = index;
+ parameters[j + 1] = subject;
+ return ToString(replace.apply(null, parameters));
+}
+
+
+// ECMA-262 section 15.5.4.7
+%AddProperty($String.prototype, "indexOf", function(searchString /* position */) { // length == 1
+ var str = ToString(this);
+ var searchStr = ToString(searchString);
+ var index = 0;
+ if (%_ArgumentsLength() > 1) {
+ var arg1 = %_Arguments(1); // position
+ index = TO_INTEGER(arg1);
+ }
+ if (index < 0) index = 0;
+ if (index > str.length) index = str.length;
+ return %StringIndexOf(str, searchStr, index);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.8
+%AddProperty($String.prototype, "lastIndexOf", function(searchString /* position */) { // length == 1
+ var sub = ToString(this);
+ var pat = ToString(searchString);
+ var index = (%_ArgumentsLength() > 1)
+ ? ToNumber(%_Arguments(1) /* position */)
+ : %NumberNaN(1);
+ var firstIndex;
+ if ($isNaN(index)) {
+ firstIndex = sub.length - pat.length;
+ } else {
+ firstIndex = TO_INTEGER(index);
+ if (firstIndex + pat.length > sub.length) {
+ firstIndex = sub.length - pat.length;
+ }
+ }
+ return %StringLastIndexOf(sub, pat, firstIndex);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.9
+//
+// This function is implementation specific. For now, we do not
+// do anything locale specific.
+%AddProperty($String.prototype, "localeCompare", function(other) {
+ if (%_ArgumentsLength() === 0) return 0;
+
+ var this_str = ToString(this);
+ var other_str = ToString(other);
+ return %StringLocaleCompare(this_str, other_str);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.10
+%AddProperty($String.prototype, "match", function(regexp) {
+ if (!IS_REGEXP(regexp)) regexp = new ORIGINAL_REGEXP(regexp);
+ var subject = ToString(this);
+
+ if (!regexp.global) return regexp.exec(subject);
+ var matches = DoRegExpExecGlobal(regexp, subject);
+
+ // If the regexp did not match, return null.
+ if (matches.length == 0) return null;
+
+ // Build the result array.
+ var result = new $Array(match_string);
+ for (var i = 0; i < matches.length; ++i) {
+ var match = matches[i];
+ var match_string = subject.slice(match[0], match[1]);
+ result[i] = match_string;
+ }
+
+ return result;
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.12
+%AddProperty($String.prototype, "search", function(re) {
+ var regexp = new ORIGINAL_REGEXP(re);
+ var s = ToString(this);
+ var last_idx = regexp.lastIndex; // keep old lastIndex
+ regexp.lastIndex = 0; // ignore re.global property
+ var result = regexp.exec(s);
+ regexp.lastIndex = last_idx; // restore lastIndex
+ if (result == null)
+ return -1;
+ else
+ return result.index;
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.13
+%AddProperty($String.prototype, "slice", function(start, end) {
+ var s = ToString(this);
+ var s_len = s.length;
+ var start_i = TO_INTEGER(start);
+ var end_i = s_len;
+ if (end !== void 0)
+ end_i = TO_INTEGER(end);
+
+ if (start_i < 0) {
+ start_i += s_len;
+ if (start_i < 0)
+ start_i = 0;
+ } else {
+ if (start_i > s_len)
+ start_i = s_len;
+ }
+
+ if (end_i < 0) {
+ end_i += s_len;
+ if (end_i < 0)
+ end_i = 0;
+ } else {
+ if (end_i > s_len)
+ end_i = s_len;
+ }
+
+ var num_c = end_i - start_i;
+ if (num_c < 0)
+ num_c = 0;
+
+ return SubString(s, start_i, start_i + num_c);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.14
+%AddProperty($String.prototype, "split", function(separator, limit) {
+ var subject = ToString(this);
+ var result = [];
+ var lim = (limit === void 0) ? 0xffffffff : ToUint32(limit);
+
+ if (lim === 0) return result;
+
+ // ECMA-262 says that if separator is undefined, the result should
+ // be an array of size 1 containing the entire string. SpiderMonkey
+ // and KJS have this behaviour only when no separator is given. If
+ // undefined is explicitly given, they convert it to a string and
+ // use that. We do as SpiderMonkey and KJS.
+ if (%_ArgumentsLength() === 0) {
+ result[result.length] = subject;
+ return result;
+ }
+
+ var length = subject.length;
+ var currentIndex = 0;
+ var startIndex = 0;
+
+ var sep = IS_REGEXP(separator) ? separator : ToString(separator);
+
+ if (length === 0) {
+ if (splitMatch(sep, subject, 0, 0) != null) return result;
+ result[result.length] = subject;
+ return result;
+ }
+
+ while (true) {
+
+ if (startIndex === length) {
+ result[result.length] = subject.slice(currentIndex, length);
+ return result;
+ }
+
+ var match = splitMatch(sep, subject, currentIndex, startIndex);
+
+ if (IS_NULL(match)) {
+ result[result.length] = subject.slice(currentIndex, length);
+ return result;
+ }
+
+ var endIndex = match[0];
+
+ // We ignore a zero-length match at the currentIndex.
+ if (startIndex === endIndex && endIndex === currentIndex) {
+ startIndex++;
+ continue;
+ }
+
+ result[result.length] = match[1];
+ if (result.length === lim) return result;
+
+ for (var i = 2; i < match.length; i++) {
+ result[result.length] = match[i];
+ if (result.length === lim) return result;
+ }
+
+ startIndex = currentIndex = endIndex;
+ }
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.5.4.14
+// Helper function used by split.
+function splitMatch(separator, subject, current_index, start_index) {
+ if (IS_REGEXP(separator)) {
+ var ovector = DoRegExpExec(separator, subject, start_index);
+ if (ovector == null) return null;
+ var nof_results = ovector.length >> 1;
+ var result = new $Array(nof_results + 1);
+ result[0] = ovector[1];
+ result[1] = subject.slice(current_index, ovector[0]);
+ for (var i = 1; i < nof_results; i++) {
+ var matching_start = ovector[2*i];
+ var matching_end = ovector[2*i + 1];
+ if (matching_start != -1 && matching_end != -1) {
+ result[i + 1] = subject.slice(matching_start, matching_end);
+ }
+ }
+ return result;
+ }
+
+ var separatorIndex = subject.indexOf(separator, start_index);
+ if (separatorIndex === -1) return null;
+
+ return [ separatorIndex + separator.length, subject.slice(current_index, separatorIndex) ];
+};
+
+
+// ECMA-262 section 15.5.4.15
+%AddProperty($String.prototype, "substring", function(start, end) {
+ var s = ToString(this);
+ var s_len = s.length;
+ var start_i = TO_INTEGER(start);
+ var end_i = s_len;
+ if (!IS_UNDEFINED(end))
+ end_i = TO_INTEGER(end);
+
+ if (start_i < 0) start_i = 0;
+ if (start_i > s_len) start_i = s_len;
+ if (end_i < 0) end_i = 0;
+ if (end_i > s_len) end_i = s_len;
+
+ if (start_i > end_i) {
+ var tmp = end_i;
+ end_i = start_i;
+ start_i = tmp;
+ }
+
+ return SubString(s, start_i, end_i);
+}, DONT_ENUM);
+
+
+// This is not a part of ECMA-262.
+%AddProperty($String.prototype, "substr", function(start, n) {
+ var s = ToString(this);
+ var len;
+
+ // Correct n: If not given, set to string length; if explicitly
+ // set to undefined, zero, or negative, returns empty string.
+ if (n === void 0) {
+ len = s.length;
+ } else {
+ len = TO_INTEGER(n);
+ if (len <= 0) return '';
+ }
+
+ // Correct start: If not given (or undefined), set to zero; otherwise
+ // convert to integer and handle negative case.
+ if (start === void 0) {
+ start = 0;
+ } else {
+ start = TO_INTEGER(start);
+ // If positive, and greater than or equal to the string length,
+ // return empty string.
+ if (start >= s.length) return '';
+ // If negative and absolute value is larger than the string length,
+ // use zero.
+ if (start < 0) {
+ start += s.length;
+ if (start < 0) start = 0;
+ }
+ }
+
+ var end = start + len;
+ if (end > s.length) end = s.length;
+
+ return SubString(s, start, end);
+}, DONT_ENUM);
+
+
+// ECMA-262, 15.5.4.16
+%AddProperty($String.prototype, "toLowerCase", function() {
+ return %StringToLowerCase(ToString(this));
+}, DONT_ENUM);
+
+
+// ECMA-262, 15.5.4.17
+%AddProperty($String.prototype, "toLocaleLowerCase", $String.prototype.toLowerCase, DONT_ENUM);
+
+
+// ECMA-262, 15.5.4.18
+%AddProperty($String.prototype, "toUpperCase", function() {
+ return %StringToUpperCase(ToString(this));
+}, DONT_ENUM);
+
+
+// ECMA-262, 15.5.4.19
+%AddProperty($String.prototype, "toLocaleUpperCase", $String.prototype.toUpperCase, DONT_ENUM);
+
+
+// ECMA-262, section 15.5.3.2
+%AddProperty($String, "fromCharCode", function(code) {
+ var n = %_ArgumentsLength();
+ if (n == 1) return %CharFromCode(ToNumber(code) & 0xffff)
+
+ // NOTE: This is not super-efficient, but it is necessary because we
+ // want to avoid converting to numbers from within the virtual
+ // machine. Maybe we can find another way of doing this?
+ var codes = new $Array(n);
+ for (var i = 0; i < n; i++) codes[i] = ToNumber(%_Arguments(i));
+ return %StringFromCharCodeArray(codes);
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.5.4.4
+function CharAt(pos) {
+ var subject = ToString(this);
+ var index = TO_INTEGER(pos);
+ if (index >= subject.length || index < 0) return "";
+ return %CharFromCode(%StringCharCodeAt(subject, index));
+};
+
+%AddProperty($String.prototype, "charAt", CharAt, DONT_ENUM);
+
+
+// Helper function for very basic XSS protection.
+function HtmlEscape(str) {
+ return ToString(str).replace(/</g, "<")
+ .replace(/>/g, ">")
+ .replace(/"/g, """)
+ .replace(/'/g, "'");
+};
+
+
+// Compatibility support for KJS.
+// Tested by mozilla/js/tests/js1_5/Regress/regress-276103.js.
+%AddProperty($String.prototype, "link", function(link) {
+ return "<a href=\"" + HtmlEscape(link) + "\">" + this + "</a>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "anchor", function(name) {
+ return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "fontcolor", function(color) {
+ return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "fontsize", function(size) {
+ return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "big", function() {
+ return "<big>" + this + "</big>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "blink", function() {
+ return "<blink>" + this + "</blink>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "bold", function() {
+ return "<b>" + this + "</b>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "fixed", function() {
+ return "<tt>" + this + "</tt>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "italics", function() {
+ return "<i>" + this + "</i>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "small", function() {
+ return "<small>" + this + "</small>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "strike", function() {
+ return "<strike>" + this + "</strike>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "sub", function() {
+ return "<sub>" + this + "</sub>";
+}, DONT_ENUM);
+
+
+%AddProperty($String.prototype, "sup", function() {
+ return "<sup>" + this + "</sup>";
+}, DONT_ENUM);
+
+
+// StringBuilder support.
+
+function StringBuilder() {
+ this.elements = new $Array();
+}
+
+
+function ReplaceResultBuilder(str) {
+ this.elements = new $Array();
+ this.special_string = str;
+}
+
+
+ReplaceResultBuilder.prototype.add =
+StringBuilder.prototype.add = function(str) {
+ if (!IS_STRING(str)) str = ToString(str);
+ if (str.length > 0) {
+ var elements = this.elements;
+ elements[elements.length] = str;
+ }
+}
+
+
+ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
+ var len = end - start;
+ if (len == 0) return;
+ var elements = this.elements;
+ if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) {
+ elements[elements.length] = (start << 11) + len;
+ } else {
+ elements[elements.length] = SubString(this.special_string, start, end);
+ }
+}
+
+
+StringBuilder.prototype.generate = function() {
+ return %StringBuilderConcat(this.elements, "");
+}
+
+
+ReplaceResultBuilder.prototype.generate = function() {
+ return %StringBuilderConcat(this.elements, this.special_string);
+}
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+#define __ masm->
+
+
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ ExternalReference value_offset(SCTableReference::valueReference(table));
+
+ Label miss;
+
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ mov(ip, Operand(key_offset));
+ __ ldr(ip, MemOperand(ip, offset, LSL, 1));
+ __ cmp(name, Operand(ip));
+ __ b(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ mov(ip, Operand(value_offset));
+ __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+
+ // Check that the flags match what we're looking for.
+ __ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, offset, Operand(~Code::kFlagsTypeMask));
+ __ cmp(offset, Operand(flags));
+ __ b(ne, &miss);
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(ip, Operand(value_offset));
+ __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(offset);
+
+ // Miss: Restore offset and fall through.
+ __ bind(&miss);
+ __ pop(offset);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch) {
+ Label miss;
+
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 8.
+ ASSERT(sizeof(Entry) == 8);
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(ip, FieldMemOperand(name, String::kLengthOffset));
+ __ add(scratch, scratch, Operand(ip));
+ __ eor(scratch, scratch, Operand(flags));
+ __ and_(scratch,
+ scratch,
+ Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ sub(scratch, scratch, Operand(name));
+ __ add(scratch, scratch, Operand(flags));
+ __ and_(scratch,
+ scratch,
+ Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+#undef __
+
+#define __ masm()->
+
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ HandleScope scope;
+
+ // Enter the JS frame but don't add additional arguments.
+ __ EnterJSFrame(0, 0);
+
+ // Push the function on the stack and call the runtime function.
+ __ Push(MemOperand(pp, 0));
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+
+ // Move result to r1 and restore number of arguments.
+ __ mov(r1, Operand(r0));
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
+
+ __ ExitJSFrame(DO_NOT_RETURN, 0);
+
+ // Do a tail-call of the compiled function.
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r1);
+
+ return GetCodeWithFlags(flags);
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Do the right check and compute the holder register.
+ Register reg =
+ __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+
+ // Get the properties array of the holder and get the function from the field.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ ldr(r3, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r3, offset));
+
+ // Check that the function really is a function.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ // Get the map.
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(ne, &miss);
+
+ // Patch the function on the stack; 1 ~ receiver.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r3, MemOperand(ip, 1 * kPointerSize));
+
+ // Setup the context and jump to the call code of the function (tail call).
+ __ ldr(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ __ ldr(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(FIELD);
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ if (check != NUMBER_CHECK) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ }
+
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ // Check that the maps haven't changed.
+ __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+ break;
+
+ case STRING_CHECK:
+ // Check that the object is a two-byte string or a symbol.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+ __ b(hs, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r2);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ r2, holder, r3, r1, &miss);
+ break;
+
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &fast);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ r2);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ r2, holder, r3, r1, &miss);
+ break;
+ }
+
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(r1, Operand(Factory::true_value()));
+ __ b(eq, &fast);
+ __ cmp(r1, Operand(Factory::false_value()));
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ r2);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ r2, holder, r3, r1, &miss);
+ break;
+ }
+
+ case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+ __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+ // Make sure object->elements()->map() != Heap::hash_table_map()
+ // Get the elements array of the object.
+ __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(Factory::hash_table_map()));
+ __ b(eq, &miss);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // Get the function and setup the context.
+ __ mov(r3, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ // Patch the function on the stack; 1 ~ receiver.
+ __ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r3, MemOperand(ip, 1 * kPointerSize));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ __ Jump(code, code_target);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // TODO(1224669): Implement.
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss, exit;
+
+ // Get the receiver from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the map of the receiver hasn't changed.
+ __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Handle<Map>(object->map())));
+ __ b(ne, &miss);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalObject()) {
+ __ CheckAccessGlobal(r3, r1, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+
+ // Get the properties array
+ __ ldr(r1, FieldMemOperand(r3, JSObject::kPropertiesOffset));
+
+ // Perform map transition for the receiver if necessary.
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ mov(ip, Operand(Handle<Map>(transition)));
+ __ str(ip, FieldMemOperand(r3, HeapObject::kMapOffset));
+ }
+
+ // Write to the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ str(r0, FieldMemOperand(r1, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ // Update the write barrier for the array address.
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r1, r3, r2); // OK to clobber r2, since we return
+
+ // Return the value (register r0).
+ __ bind(&exit);
+ __ Ret();
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the object from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Handle<Map>(object->map())));
+ __ b(ne, &miss);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalObject()) {
+ __ CheckAccessGlobal(r3, r1, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+
+ __ ldr(ip, MemOperand(sp)); // receiver
+ __ push(ip);
+ __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
+ __ push(ip);
+ __ push(r2); // name
+ __ push(r0); // value
+
+ // Do tail-call to the C builtin.
+ __ mov(r0, Operand(3)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(IC_Utility(IC::kStoreCallbackProperty)));
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the object from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Handle<Map>(receiver->map())));
+ __ b(ne, &miss);
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalObject()) {
+ __ CheckAccessGlobal(r3, r1, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalObject() || !receiver->IsAccessCheckNeeded());
+
+ __ ldr(ip, MemOperand(sp)); // receiver
+ __ push(ip);
+ __ push(r2); // name
+ __ push(r0); // value
+
+ // Do tail-call to the C builtin.
+ __ mov(r0, Operand(2)); // not counting receiver
+ ExternalReference store_interceptor =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ __ JumpToBuiltin(store_interceptor);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the maps haven't changed.
+ Register reg = __ CheckMaps(object, r0, holder, r3, r1, &miss);
+
+ // Get the properties array of the holder.
+ __ ldr(r3, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Return the value from the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ ldr(r0, FieldMemOperand(r3, offset));
+ __ Ret();
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ __ ldr(r0, MemOperand(sp)); // restore receiver
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(FIELD);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the maps haven't changed.
+ Register reg = __ CheckMaps(object, r0, holder, r3, r1, &miss);
+
+ // Push the arguments on the JS stack of the caller.
+ __ push(r0); // receiver
+ __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
+ __ push(ip);
+ __ push(r2); // name
+ __ push(reg); // holder
+
+ // Do tail-call to the C builtin.
+ __ mov(r0, Operand(3)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(IC_Utility(IC::kLoadCallbackProperty)));
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the maps haven't changed.
+ Register reg = __ CheckMaps(object, r0, holder, r3, r1, &miss);
+
+ // Return the constant value.
+ __ mov(r0, Operand(Handle<Object>(value)));
+ __ Ret();
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the maps haven't changed.
+ Register reg = __ CheckMaps(object, r0, holder, r3, r1, &miss);
+
+ // Push the arguments on the JS stack of the caller.
+ __ push(r0); // receiver
+ __ push(reg); // holder
+ __ push(r2); // name
+
+ // Do tail-call to the C builtin.
+ __ mov(r0, Operand(2)); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)));
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+// TODO(1224671): IC stubs for keyed loads have not been implemented
+// for ARM.
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadShortStringLength(String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadMediumStringLength(String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadLongStringLength(String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ UNIMPLEMENTED();
+ return Heap::undefined_value();
+}
+
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+#define __ masm->
+
+
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ ExternalReference value_offset(SCTableReference::valueReference(table));
+
+ Label miss;
+
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+ __ j(not_equal, &miss, not_taken);
+
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsTypeMask);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+ // Jump to the first instruction in the code stub.
+ __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(Operand(offset));
+
+ // Miss: Restore offset and fall through.
+ __ bind(&miss);
+ __ pop(offset);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch) {
+ Label miss;
+
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 8.
+ ASSERT(sizeof(Entry) == 8);
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Get the map of the receiver and compute the hash.
+ __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ add(scratch, FieldOperand(name, String::kLengthOffset));
+ __ xor_(scratch, flags);
+ __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ sub(scratch, Operand(name));
+ __ add(Operand(scratch), Immediate(flags));
+ __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ mov(prototype,
+ FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ mov(prototype,
+ FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the object is a JS array.
+ __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(scratch, JS_ARRAY_TYPE);
+ __ j(not_equal, miss_label, not_taken);
+
+ // Load length directly from the JS array.
+ __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadShortStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the object is a short string.
+ __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsNotStringMask | kStringSizeMask);
+ __ cmp(scratch, kStringTag | kShortStringTag);
+ __ j(not_equal, miss_label, not_taken);
+
+ // Load length directly from the string.
+ __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
+ __ shr(eax, String::kShortLengthShift);
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+}
+
+void StubCompiler::GenerateLoadMediumStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the object is a short string.
+ __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsNotStringMask | kStringSizeMask);
+ __ cmp(scratch, kStringTag | kMediumStringTag);
+ __ j(not_equal, miss_label, not_taken);
+
+ // Load length directly from the string.
+ __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
+ __ shr(eax, String::kMediumLengthShift);
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadLongStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the object is a short string.
+ __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsNotStringMask | kStringSizeMask);
+ __ cmp(scratch, kStringTag | kLongStringTag);
+ __ j(not_equal, miss_label, not_taken);
+
+ // Load length directly from the string.
+ __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
+ __ shr(eax, String::kLongLengthShift);
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the receiver is a function.
+ __ mov(scratch1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(scratch2, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ __ cmp(scratch2, JS_FUNCTION_TYPE);
+ __ j(not_equal, miss_label, not_taken);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ movzx_b(scratch2, FieldOperand(scratch1, Map::kBitFieldOffset));
+ __ test(scratch2, Immediate(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &non_instance, not_taken);
+
+ // Get the prototype or initial map from the function.
+ __ mov(scratch1,
+ FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ __ cmp(Operand(scratch1), Immediate(Factory::the_hole_value()));
+ __ j(equal, miss_label, not_taken);
+ __ mov(eax, Operand(scratch1));
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ mov(scratch1, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(scratch2, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ __ cmp(scratch2, MAP_TYPE);
+ __ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
+
+ // All done: Return the prototype.
+ __ bind(&done);
+ __ ret(0);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ __ bind(&non_instance);
+ __ mov(eax, FieldOperand(scratch1, Map::kConstructorOffset));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadField(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ int index,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ __ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Get the properties array of the holder.
+ __ mov(scratch1, FieldOperand(reg, JSObject::kPropertiesOffset));
+
+ // Return the value from the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ mov(eax, FieldOperand(scratch1, offset));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name,
+ Register scratch1,
+ Register scratch2,
+ AccessorInfo* callback,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ __ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Push the arguments on the JS stack of the caller.
+ __ pop(scratch2); // remove return address
+ __ push(receiver); // receiver
+ __ push(Immediate(Handle<AccessorInfo>(callback))); // callback data
+ __ push(name); // name
+ __ push(reg); // holder
+ __ push(scratch2); // restore return address
+
+ // Do tail-call to the C builtin.
+ __ mov(eax, 3); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(IC_Utility(IC::kLoadCallbackProperty)));
+}
+
+
+void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Object* value,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ __ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Return the constant value.
+ __ mov(eax, Handle<Object>(value));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ __ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Push the arguments on the JS stack of the caller.
+ __ pop(scratch2); // remove return address
+ __ push(receiver); // receiver
+ __ push(reg); // holder
+ __ push(name); // name
+ __ push(scratch2); // restore return address
+
+ // Do tail-call to the C builtin.
+ __ mov(eax, 2); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)));
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+ Code* code = NULL;
+ if (kind == Code::LOAD_IC) {
+ code = Builtins::builtin(Builtins::LoadIC_Miss);
+ } else {
+ code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ }
+
+ Handle<Code> ic(code);
+ __ jmp(ic, code_target);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ JSObject* object,
+ int index,
+ Map* transition,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the object isn't a smi.
+ __ test(receiver_reg, Immediate(kSmiTagMask));
+ __ j(zero, miss_label, not_taken);
+
+ // Check that the map of the object hasn't changed.
+ __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(object->map())));
+ __ j(not_equal, miss_label, not_taken);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalObject()) {
+ __ CheckAccessGlobal(receiver_reg, scratch, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+
+ // Get the properties array (optimistically).
+ __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+
+ // Perform map transition for the receiver if necessary.
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(transition)));
+ }
+
+ // Write to the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ mov(FieldOperand(scratch, offset), eax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, Operand(eax));
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+
+ // Return the value (register eax).
+ __ ret(0);
+}
+
+
+#undef __
+
+#define __ masm()->
+
+
+// TODO(1241006): Avoid having lazy compile stubs specialized by the
+// number of arguments. It is not needed anymore.
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ HandleScope scope;
+
+ // Enter an internal frame.
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+
+ __ push(edi); // function is also the parameter to the runtime call
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ pop(edi);
+
+ __ ExitFrame(StackFrame::INTERNAL);
+
+ // Do a tail-call of the compiled function.
+ __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(Operand(ecx));
+
+ return GetCodeWithFlags(flags);
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Do the right check and compute the holder register.
+ Register reg =
+ __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+
+ // Get the properties array of the holder and get the function from the field.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ mov(edi, FieldOperand(reg, JSObject::kPropertiesOffset));
+ __ mov(edi, FieldOperand(edi, offset));
+
+ // Check that the function really is a function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); // get the map
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ebx, JS_FUNCTION_TYPE);
+ __ j(not_equal, &miss, not_taken);
+
+ // Invoke the function.
+ __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(FIELD);
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ if (check != NUMBER_CHECK) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+ }
+
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ // Check that the maps haven't changed.
+ __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+ break;
+
+ case STRING_CHECK:
+ // Check that the object is a two-byte string or a symbol.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &miss, not_taken);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ ecx);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ ecx, holder, ebx, edx, &miss);
+ break;
+
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &fast, taken);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, HEAP_NUMBER_TYPE);
+ __ j(not_equal, &miss, not_taken);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ ecx);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ ecx, holder, ebx, edx, &miss);
+ break;
+ }
+
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(edx, Factory::true_value());
+ __ j(equal, &fast, taken);
+ __ cmp(edx, Factory::false_value());
+ __ j(not_equal, &miss, not_taken);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ ecx);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ ecx, holder, ebx, edx, &miss);
+ break;
+ }
+
+ case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+ __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+ // Make sure object->elements()->map() != Heap::dictionary_array_map()
+ // Get the elements array of the object.
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(equal, &miss, not_taken);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // Get the function and setup the context.
+ __ mov(Operand(edi), Immediate(Handle<JSFunction>(function)));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments(), code_target, JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ // Get the receiver from the stack.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that maps have not changed and compute the holder register.
+ Register reg =
+ __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+
+ // Enter an internal frame.
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Push arguments on the expression stack.
+ __ push(edx); // receiver
+ __ push(reg); // holder
+ __ push(Operand(ebp, (argc + 3) * kPointerSize)); // name
+
+ // Perform call.
+ __ mov(Operand(eax), Immediate(2)); // 2 arguments w/o receiver
+ ExternalReference load_interceptor =
+ ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
+ __ mov(Operand(ebx), Immediate(load_interceptor));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ // Move result to edi and restore receiver.
+ __ mov(Operand(edi), eax);
+ __ mov(edx, Operand(ebp, (argc + 2) * kPointerSize)); // receiver
+
+ // Exit frame.
+ __ ExitFrame(StackFrame::INTERNAL);
+
+ // Check that the function really is a function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ebx, JS_FUNCTION_TYPE);
+ __ j(not_equal, &miss, not_taken);
+
+ // Invoke the function.
+ __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(argc);
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the object from the stack.
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Generate store field code. Trashes the name register.
+ GenerateStoreField(masm(), object, index, transition, ebx, ecx, edx, &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(Operand(ecx), Immediate(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the object from the stack.
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the map of the object hasn't changed.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(object->map())));
+ __ j(not_equal, &miss, not_taken);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalObject()) {
+ __ CheckAccessGlobal(ebx, edx, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalObject() || !object->IsAccessCheckNeeded());
+
+ __ pop(ebx); // remove the return address
+ __ push(Operand(esp, 0)); // receiver
+ __ push(Immediate(Handle<AccessorInfo>(callback))); // callback info
+ __ push(ecx); // name
+ __ push(eax); // value
+ __ push(ebx); // restore return address
+
+ // Do tail-call to the C builtin.
+ __ mov(eax, 3); // not counting receiver
+ __ JumpToBuiltin(ExternalReference(IC_Utility(IC::kStoreCallbackProperty)));
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(Operand(ecx), Immediate(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ // Get the object from the stack.
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the map of the object hasn't changed.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(receiver->map())));
+ __ j(not_equal, &miss, not_taken);
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalObject()) {
+ __ CheckAccessGlobal(ebx, edx, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalObject() || !receiver->IsAccessCheckNeeded());
+
+ __ pop(ebx); // remove the return address
+ __ push(Operand(esp, 0)); // receiver
+ __ push(ecx); // name
+ __ push(eax); // value
+ __ push(ebx); // restore return address
+
+ // Do tail-call to the C builtin.
+ __ mov(eax, 2); // not counting receiver
+ ExternalReference store_interceptor =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ __ JumpToBuiltin(store_interceptor);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(Operand(ecx), Immediate(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ IncrementCounter(&Counters::keyed_store_field, 1);
+
+ // Get the name from the stack.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ // Check that the name has not changed.
+ __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ // Get the object from the stack.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+
+ // Generate store field code. Trashes the name register.
+ GenerateStoreField(masm(), object, index, transition, ebx, ecx, edx, &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_store_field, 1);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ jmp(ic, code_target);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ GenerateLoadField(masm(), object, holder, eax, ebx, edx, index, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ GenerateLoadCallback(masm(), object, holder, eax, ecx, ebx,
+ edx, callback, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ GenerateLoadConstant(masm(), object, holder, eax, ebx, edx, value, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ GenerateLoadInterceptor(masm(), receiver, holder, eax, ecx, edx, ebx, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadField(masm(), receiver, holder, ecx, ebx, edx, index, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_field, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_callback, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadCallback(masm(), receiver, holder, ecx, eax, ebx, edx,
+ callback, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadConstant(masm(), receiver, holder, ecx, ebx, edx, value, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadInterceptor(masm(), receiver, holder, ecx, eax, edx, ebx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR);
+}
+
+
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadArrayLength(masm(), ecx, edx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadShortStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadShortStringLength(masm(), ecx, edx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadMediumStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadMediumStringLength(masm(), ecx, edx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadLongStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadLongStringLength(masm(), ecx, edx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8] : receiver
+ // -----------------------------------
+ HandleScope scope;
+ Label miss;
+
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+ // Check that the name has not changed.
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ j(not_equal, &miss, not_taken);
+
+ GenerateLoadFunctionPrototype(masm(), ecx, edx, ebx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "ic-inl.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+// -----------------------------------------------------------------------
+// StubCache implementation.
+
+
+StubCache::Entry StubCache::primary_[StubCache::kPrimaryTableSize];
+StubCache::Entry StubCache::secondary_[StubCache::kSecondaryTableSize];
+
+void StubCache::Initialize(bool create_heap_objects) {
+ ASSERT(IsPowerOf2(kPrimaryTableSize));
+ ASSERT(IsPowerOf2(kSecondaryTableSize));
+ if (create_heap_objects) {
+ HandleScope scope;
+ Clear();
+ }
+}
+
+
+Code* StubCache::Set(String* name, Map* map, Code* code) {
+ // Get the flags from the code.
+ Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
+
+ // Validate that the name does not move on scavenge, and that we
+ // can use identity checks instead of string equality checks.
+ ASSERT(!Heap::InNewSpace(name));
+ ASSERT(name->IsSymbol());
+
+ // The state bits are not important to the hash function because
+ // the stub cache only contains monomorphic stubs. Make sure that
+ // the bits are the least significant so they will be the ones
+ // masked out.
+ ASSERT(Code::ExtractStateFromFlags(flags) == MONOMORPHIC);
+ ASSERT(Code::kFlagsStateShift == 0);
+
+ // Make sure that the code type is not included in the hash.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Compute the primary entry.
+ int primary_offset = PrimaryOffset(name, flags, map);
+ Entry* primary = entry(primary_, primary_offset);
+ Code* hit = primary->value;
+
+ // If the primary entry has useful data in it, we retire it to the
+ // secondary cache before overwriting it.
+ if (hit != Builtins::builtin(Builtins::Illegal)) {
+ Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
+ int secondary_offset =
+ SecondaryOffset(primary->key, primary_flags, primary_offset);
+ Entry* secondary = entry(secondary_, secondary_offset);
+ *secondary = *primary;
+ }
+
+ // Update primary cache.
+ primary->key = name;
+ primary->value = code;
+ return code;
+}
+
+
+Object* StubCache::ComputeLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ LoadStubCompiler compiler;
+ code = compiler.CompileLoadField(receiver, holder, field_index);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ LoadStubCompiler compiler;
+ code = compiler.CompileLoadCallback(receiver, holder, callback);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ LoadStubCompiler compiler;
+ code = compiler.CompileLoadConstant(receiver, holder, value);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ LoadStubCompiler compiler;
+ code = compiler.CompileLoadInterceptor(receiver, holder, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
+ Code* code = Builtins::builtin(Builtins::LoadIC_Normal);
+ return Set(name, receiver->map(), code);
+}
+
+
+Object* StubCache::ComputeKeyedLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadField(name, receiver, holder, field_index);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadConstant(name, receiver, holder, value);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadInterceptor(receiver, holder, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadCallback(name, receiver, holder, callback);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+
+Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
+ JSArray* receiver) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadArrayLength(name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadShortStringLength(String* name,
+ String* receiver) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadShortStringLength(name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadMediumStringLength(String* name,
+ String* receiver) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadMediumStringLength(name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadLongStringLength(String* name,
+ String* receiver) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadLongStringLength(name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
+ JSFunction* receiver) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ code = compiler.CompileLoadFunctionPrototype(name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition) {
+ PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ StoreStubCompiler compiler;
+ code = compiler.CompileStoreField(receiver, field_index, transition, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeStoreCallback(String* name,
+ JSObject* receiver,
+ AccessorInfo* callback) {
+ ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, CALLBACKS);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ StoreStubCompiler compiler;
+ code = compiler.CompileStoreCallback(receiver, callback, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeStoreInterceptor(String* name,
+ JSObject* receiver) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::STORE_IC, INTERCEPTOR);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ StoreStubCompiler compiler;
+ code = compiler.CompileStoreInterceptor(receiver, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
+ int field_index, Map* transition) {
+ PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedStoreStubCompiler compiler;
+ code = compiler.CompileStoreField(receiver, field_index, transition, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("KeyedStoreIC", Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return code;
+}
+
+
+Object* StubCache::ComputeCallConstant(int argc,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ JSFunction* function) {
+ // Compute the check type and the map.
+ Map* map = IC::GetCodeCacheMapForObject(object);
+
+ // Compute check type based on receiver/holder.
+ StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
+ if (object->IsString()) {
+ check = StubCompiler::STRING_CHECK;
+ } else if (object->IsNumber()) {
+ check = StubCompiler::NUMBER_CHECK;
+ } else if (object->IsBoolean()) {
+ check = StubCompiler::BOOLEAN_CHECK;
+ }
+
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::CALL_IC, CONSTANT_FUNCTION, argc);
+ Object* code = map->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ if (object->IsJSObject()) {
+ Object* opt =
+ Top::LookupSpecialFunction(JSObject::cast(object), holder, function);
+ if (opt->IsJSFunction()) {
+ check = StubCompiler::JSARRAY_HAS_FAST_ELEMENTS_CHECK;
+ function = JSFunction::cast(opt);
+ }
+ }
+ // If the function hasn't been compiled yet, we cannot do it now
+ // because it may cause GC. To avoid this issue, we return an
+ // internal error which will make sure we do not update any
+ // caches.
+ if (!function->is_compiled()) return Failure::InternalError();
+ // Compile the stub - only create stubs for fully compiled functions.
+ CallStubCompiler compiler(argc);
+ code = compiler.CompileCallConstant(object, holder, function, check);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return Set(name, map, Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallField(int argc,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ int index) {
+ // Compute the check type and the map.
+ Map* map = IC::GetCodeCacheMapForObject(object);
+
+ // TODO(1233596): We cannot do receiver map check for non-JS objects
+ // because they may be represented as immediates without a
+ // map. Instead, we check against the map in the holder.
+ if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+ object = holder;
+ }
+
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, FIELD, argc);
+ Object* code = map->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ CallStubCompiler compiler(argc);
+ code = compiler.CompileCallField(object, holder, index);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return Set(name, map, Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallInterceptor(int argc,
+ String* name,
+ Object* object,
+ JSObject* holder) {
+ // Compute the check type and the map.
+ // If the object is a value, we use the prototype map for the cache.
+ Map* map = IC::GetCodeCacheMapForObject(object);
+
+ // TODO(1233596): We cannot do receiver map check for non-JS objects
+ // because they may be represented as immediates without a
+ // map. Instead, we check against the map in the holder.
+ if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+ object = holder;
+ }
+
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::CALL_IC, INTERCEPTOR, argc);
+ Object* code = map->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ CallStubCompiler compiler(argc);
+ code = compiler.CompileCallInterceptor(object, holder, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return result;
+ }
+ return Set(name, map, Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallNormal(int argc,
+ String* name,
+ JSObject* receiver) {
+ Object* code = ComputeCallNormal(argc);
+ if (code->IsFailure()) return code;
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+static Object* GetProbeValue(Code::Flags flags) {
+ Dictionary* dictionary = Heap::non_monomorphic_cache();
+ int entry = dictionary->FindNumberEntry(flags);
+ if (entry != -1) return dictionary->ValueAt(entry);
+ return Heap::undefined_value();
+}
+
+
+static Object* ProbeCache(Code::Flags flags) {
+ Object* probe = GetProbeValue(flags);
+ if (probe != Heap::undefined_value()) return probe;
+ // Seed the cache with an undefined value to make sure that any
+ // generated code object can always be inserted into the cache
+ // without causing allocation failures.
+ Object* result =
+ Heap::non_monomorphic_cache()->AtNumberPut(flags,
+ Heap::undefined_value());
+ if (result->IsFailure()) return result;
+ Heap::set_non_monomorphic_cache(Dictionary::cast(result));
+ return probe;
+}
+
+
+static Object* FillCache(Object* code) {
+ if (code->IsCode()) {
+ int entry =
+ Heap::non_monomorphic_cache()->FindNumberEntry(
+ Code::cast(code)->flags());
+ // The entry must be present see comment in ProbeCache.
+ ASSERT(entry != -1);
+ ASSERT(Heap::non_monomorphic_cache()->ValueAt(entry) ==
+ Heap::undefined_value());
+ Heap::non_monomorphic_cache()->ValueAtPut(entry, code);
+ CHECK(GetProbeValue(Code::cast(code)->flags()) == code);
+ }
+ return code;
+}
+
+
+Code* StubCache::FindCallInitialize(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED, NORMAL, argc);
+ Object* result = ProbeCache(flags);
+ ASSERT(!result->IsUndefined());
+ // This might be called during the marking phase of the collector
+ // hence the unchecked cast.
+ return reinterpret_cast<Code*>(result);
+}
+
+
+Object* StubCache::ComputeCallInitialize(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallInitialize(flags));
+}
+
+
+Object* StubCache::ComputeCallPreMonomorphic(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, PREMONOMORPHIC, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallPreMonomorphic(flags));
+}
+
+
+Object* StubCache::ComputeCallNormal(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallNormal(flags));
+}
+
+
+Object* StubCache::ComputeCallMegamorphic(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, MEGAMORPHIC, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallMegamorphic(flags));
+}
+
+
+Object* StubCache::ComputeCallMiss(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::STUB, MEGAMORPHIC, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallMiss(flags));
+}
+
+
+Object* StubCache::ComputeCallDebugBreak(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, DEBUG_BREAK, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallDebugBreak(flags));
+}
+
+
+Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, DEBUG_PREPARE_STEP_IN, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(compiler.CompileCallDebugPrepareStepIn(flags));
+}
+
+
+Object* StubCache::ComputeLazyCompile(int argc) {
+ Code::Flags flags =
+ Code::ComputeFlags(Code::STUB, UNINITIALIZED, NORMAL, argc);
+ Object* probe = ProbeCache(flags);
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ Object* result = FillCache(compiler.CompileLazyCompile(flags));
+ if (result->IsCode()) {
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("LazyCompile", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+void StubCache::Clear() {
+ for (int i = 0; i < kPrimaryTableSize; i++) {
+ primary_[i].key = Heap::empty_string();
+ primary_[i].value = Builtins::builtin(Builtins::Illegal);
+ }
+ for (int j = 0; j < kSecondaryTableSize; j++) {
+ secondary_[j].key = Heap::empty_string();
+ secondary_[j].value = Builtins::builtin(Builtins::Illegal);
+ }
+}
+
+
+// ------------------------------------------------------------------------
+// StubCompiler implementation.
+
+
+// Support function for computing call IC miss stubs.
+Handle<Code> ComputeCallMiss(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeCallMiss(argc), Code);
+}
+
+
+
+Object* LoadCallbackProperty(Arguments args) {
+ Handle<JSObject> recv = args.at<JSObject>(0);
+ AccessorInfo* callback = AccessorInfo::cast(args[1]);
+ v8::AccessorGetter fun =
+ FUNCTION_CAST<v8::AccessorGetter>(
+ v8::ToCData<Address>(callback->getter()));
+ ASSERT(fun != NULL);
+ Handle<String> name = args.at<String>(2);
+ Handle<JSObject> holder = args.at<JSObject>(3);
+ HandleScope scope;
+ Handle<Object> data(callback->data());
+ LOG(ApiNamedPropertyAccess("load", *recv, *name));
+ // NOTE: If we can align the structure of an AccessorInfo with the
+ // locations of the arguments to this function maybe we don't have
+ // to explicitly create the structure but can just pass a pointer
+ // into the stack.
+ v8::AccessorInfo info(
+ v8::Utils::ToLocal(recv),
+ v8::Utils::ToLocal(data),
+ v8::Utils::ToLocal(holder));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = fun(v8::Utils::ToLocal(name), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (result.IsEmpty()) {
+ return Heap::undefined_value();
+ } else {
+ return *v8::Utils::OpenHandle(*result);
+ }
+}
+
+
+Object* StoreCallbackProperty(Arguments args) {
+ Handle<JSObject> recv = args.at<JSObject>(0);
+ AccessorInfo* callback = AccessorInfo::cast(args[1]);
+ v8::AccessorSetter fun =
+ FUNCTION_CAST<v8::AccessorSetter>(
+ v8::ToCData<Address>(callback->setter()));
+ ASSERT(fun != NULL);
+ Handle<String> name = args.at<String>(2);
+ Handle<Object> value = args.at<Object>(3);
+ HandleScope scope;
+ Handle<Object> data(callback->data());
+ LOG(ApiNamedPropertyAccess("store", *recv, *name));
+ v8::AccessorInfo info(
+ v8::Utils::ToLocal(recv),
+ v8::Utils::ToLocal(data),
+ v8::Utils::ToLocal(recv));
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return *value;
+}
+
+
+Object* LoadInterceptorProperty(Arguments args) {
+ HandleScope scope;
+ Handle<JSObject> recv = args.at<JSObject>(0);
+ Handle<JSObject> holder = args.at<JSObject>(1);
+ Handle<String> name = args.at<String>(2);
+ ASSERT(holder->HasNamedInterceptor());
+ PropertyAttributes attr = NONE;
+ Handle<Object> result = GetPropertyWithInterceptor(recv, holder, name, &attr);
+
+ // GetPropertyWithInterceptor already converts a scheduled exception
+ // to a pending one if any. Don't use RETURN_IF_SCHEDULED_EXCEPTION() here.
+
+ // Make sure to propagate exceptions.
+ if (result.is_null()) {
+ // Failure::Exception is converted to a null handle in the
+ // handle-based methods such as SetProperty. We therefore need
+ // to convert null handles back to exceptions.
+ ASSERT(Top::has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // If the property is present, return it.
+ if (attr != ABSENT) return *result;
+
+ // If the top frame is an internal frame, this is really a call
+ // IC. In this case, we simply return the undefined result which
+ // will lead to an exception when trying to invoke the result as a
+ // function.
+ StackFrameIterator it;
+ it.Advance(); // skip exit frame
+ if (it.frame()->is_internal()) return *result;
+
+ // If the load is non-contextual, just return the undefined result.
+ // Note that both keyed and non-keyed loads may end up here, so we
+ // can't use either LoadIC or KeyedLoadIC constructors.
+ IC ic(IC::NO_EXTRA_FRAME);
+ ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
+ if (!ic.is_contextual()) return *result;
+
+ // Throw a reference error.
+ Handle<Object> error =
+ Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
+ return Top::Throw(*error);
+}
+
+
+Object* StoreInterceptorProperty(Arguments args) {
+ HandleScope scope;
+ Handle<JSObject> recv = args.at<JSObject>(0);
+ Handle<String> name = args.at<String>(1);
+ Handle<Object> value = args.at<Object>(2);
+ ASSERT(recv->HasNamedInterceptor());
+ PropertyAttributes attr = NONE;
+ Handle<Object> result = SetPropertyWithInterceptor(recv, name, value, attr);
+
+ // SetPropertyWithInterceptor already converts a scheduled exception
+ // to a pending one if any. Don't use RETURN_IF_SCHEDULED_EXCEPTION() here.
+
+ // Make sure to propagate exceptions.
+ if (result.is_null()) {
+ // Failure::Exception is converted to a null handle in the
+ // handle-based methods such as SetProperty. We therefore need
+ // to convert null handles back to exceptions.
+ ASSERT(Top::has_pending_exception());
+ return Failure::Exception();
+ }
+ return *result;
+}
+
+
+Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
+ HandleScope scope;
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ CallIC::GenerateInitialize(masm(), argc);
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Counters::call_initialize_stubs.Increment();
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallInitialize", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
+ HandleScope scope;
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ CallIC::GenerateInitialize(masm(), argc);
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Counters::call_premonomorphic_stubs.Increment();
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallPreMonomorphic", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
+ HandleScope scope;
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ CallIC::GenerateNormal(masm(), argc);
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Counters::call_normal_stubs.Increment();
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallNormal", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
+ HandleScope scope;
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ CallIC::GenerateMegamorphic(masm(), argc);
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Counters::call_megamorphic_stubs.Increment();
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallMegamorphic", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+Object* StubCompiler::CompileCallMiss(Code::Flags flags) {
+ HandleScope scope;
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ CallIC::GenerateMiss(masm(), argc);
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Counters::call_megamorphic_stubs.Increment();
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallMiss", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
+ HandleScope scope;
+ Builtins::Generate_CallIC_DebugBreak(masm());
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallDebugBreak", code, code->arguments_count()));
+ }
+ return result;
+}
+
+
+Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
+ HandleScope scope;
+ // Use the same code for the the step in preparations as we do for
+ // the miss case.
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ CallIC::GenerateMiss(masm(), argc);
+ Object* result = GetCodeWithFlags(flags);
+ if (!result->IsFailure()) {
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent("CallDebugPrepareStepIn", code,
+ code->arguments_count()));
+ }
+ return result;
+}
+
+
+DECLARE_bool(print_code_stubs);
+Object* StubCompiler::GetCodeWithFlags(Code::Flags flags) {
+ CodeDesc desc;
+ masm_.GetCode(&desc);
+ Object* result = Heap::CreateCode(desc, NULL, flags);
+#ifdef DEBUG
+ if (FLAG_print_code_stubs && !result->IsFailure()) {
+ Code::cast(result)->Print();
+ }
+#endif
+ return result;
+}
+
+
+Object* LoadStubCompiler::GetCode(PropertyType type) {
+ return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::LOAD_IC, type));
+}
+
+
+Object* KeyedLoadStubCompiler::GetCode(PropertyType type) {
+ return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
+ type));
+}
+
+
+Object* StoreStubCompiler::GetCode(PropertyType type) {
+ return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::STORE_IC, type));
+}
+
+
+Object* KeyedStoreStubCompiler::GetCode(PropertyType type) {
+ return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC,
+ type));
+}
+
+
+Object* CallStubCompiler::GetCode(PropertyType type) {
+ int argc = arguments_.immediate();
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, type, argc);
+ return GetCodeWithFlags(flags);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STUB_CACHE_H_
+#define V8_STUB_CACHE_H_
+
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+
+// The stub cache is used for megamorphic calls and property accesses.
+// It maps (map, name, type)->Code*
+
+// The design of the table uses the inline cache stubs used for
+// mono-morphic calls. The beauty of this, we do not have to
+// invalidate the cache whenever a prototype map is changed. The stub
+// validates the map chain as in the mono-morphic case.
+
+class SCTableReference;
+
+class StubCache : public AllStatic {
+ public:
+ struct Entry {
+ String* key;
+ Code* value;
+ };
+
+
+ static void Initialize(bool create_heap_objects);
+
+ // Computes the right stub matching. Inserts the result in the
+ // cache before returning. This might compile a stub if needed.
+ static Object* ComputeLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
+
+ static Object* ComputeLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback);
+
+ static Object* ComputeLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
+
+ static Object* ComputeLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder);
+
+ static Object* ComputeLoadNormal(String* name, JSObject* receiver);
+
+
+ // ---
+
+ static Object* ComputeKeyedLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
+
+ static Object* ComputeKeyedLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback);
+
+ static Object* ComputeKeyedLoadConstant(String* name, JSObject* receiver,
+ JSObject* holder, Object* value);
+
+ static Object* ComputeKeyedLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder);
+
+ static Object* ComputeKeyedLoadArrayLength(String* name, JSArray* receiver);
+
+ static Object* ComputeKeyedLoadShortStringLength(String* name,
+ String* receiver);
+
+ static Object* ComputeKeyedLoadMediumStringLength(String* name,
+ String* receiver);
+
+ static Object* ComputeKeyedLoadLongStringLength(String* name,
+ String* receiver);
+
+ static Object* ComputeKeyedLoadFunctionPrototype(String* name,
+ JSFunction* receiver);
+
+ // ---
+
+ static Object* ComputeStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition = NULL);
+
+ static Object* ComputeStoreCallback(String* name,
+ JSObject* receiver,
+ AccessorInfo* callback);
+
+ static Object* ComputeStoreInterceptor(String* name, JSObject* receiver);
+
+ // ---
+
+ static Object* ComputeKeyedStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition = NULL);
+
+ // ---
+
+ static Object* ComputeCallField(int argc,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ int index);
+
+ static Object* ComputeCallConstant(int argc,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ JSFunction* function);
+
+ static Object* ComputeCallNormal(int argc, String* name, JSObject* receiver);
+
+ static Object* ComputeCallInterceptor(int argc,
+ String* name,
+ Object* object,
+ JSObject* holder);
+
+ // ---
+
+ static Object* ComputeCallInitialize(int argc);
+ static Object* ComputeCallPreMonomorphic(int argc);
+ static Object* ComputeCallNormal(int argc);
+ static Object* ComputeCallMegamorphic(int argc);
+ static Object* ComputeCallMiss(int argc);
+
+ // Finds the Code object stored in the Heap::non_monomorphic_cache().
+ static Code* FindCallInitialize(int argc);
+
+ static Object* ComputeCallDebugBreak(int argc);
+ static Object* ComputeCallDebugPrepareStepIn(int argc);
+
+ static Object* ComputeLazyCompile(int argc);
+
+
+ // Update cache for entry hash(name, map).
+ static Code* Set(String* name, Map* map, Code* code);
+
+ // Clear the lookup table (@ mark compact collection).
+ static void Clear();
+
+ // Functions for generating stubs at startup.
+ static void GenerateMiss(MacroAssembler* masm);
+
+ // Generate code for probing the stub cache table.
+ static void GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch);
+
+ enum Table {
+ kPrimary,
+ kSecondary
+ };
+
+ private:
+ friend class SCTableReference;
+ static const int kPrimaryTableSize = 2048;
+ static const int kSecondaryTableSize = 512;
+ static Entry primary_[];
+ static Entry secondary_[];
+
+ // Computes the hashed offsets for primary and secondary caches.
+ static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+ // Compute the hash of the name (use entire length field).
+ uint32_t name_hash = name->length_field();
+ ASSERT(name_hash & String::kHashComputedMask);
+ // Base the offset on a simple combination of name, flags, and map.
+ uint32_t key = (reinterpret_cast<uint32_t>(map) + name_hash) ^ flags;
+ return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ }
+
+ static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
+ // Use the seed from the primary cache in the secondary cache.
+ uint32_t key = seed - reinterpret_cast<uint32_t>(name) + flags;
+ return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
+ }
+
+ // Compute the entry for a given offset in exactly the same way as
+ // we done in generated code. This makes it a lot easier to avoid
+ // making mistakes in the hashed offset computations.
+ static Entry* entry(Entry* table, int offset) {
+ return reinterpret_cast<Entry*>(
+ reinterpret_cast<Address>(table) + (offset << 1));
+ }
+};
+
+
+class SCTableReference {
+ public:
+ static SCTableReference keyReference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->key));
+ }
+
+
+ static SCTableReference valueReference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->value));
+ }
+
+ Address address() const { return address_; }
+
+ private:
+ explicit SCTableReference(Address address) : address_(address) {}
+
+ static StubCache::Entry* first_entry(StubCache::Table table) {
+ switch (table) {
+ case StubCache::kPrimary: return StubCache::primary_;
+ case StubCache::kSecondary: return StubCache::secondary_;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+ Address address_;
+};
+
+// ------------------------------------------------------------------------
+
+
+// Support functions for IC stubs for callbacks.
+Object* LoadCallbackProperty(Arguments args);
+Object* StoreCallbackProperty(Arguments args);
+
+
+// Support functions for IC stubs for interceptors.
+Object* LoadInterceptorProperty(Arguments args);
+Object* StoreInterceptorProperty(Arguments args);
+Object* CallInterceptorProperty(Arguments args);
+
+
+// Support function for computing call IC miss stubs.
+Handle<Code> ComputeCallMiss(int argc);
+
+
+// The stub compiler compiles stubs for the stub cache.
+class StubCompiler BASE_EMBEDDED {
+ public:
+ enum CheckType {
+ RECEIVER_MAP_CHECK,
+ STRING_CHECK,
+ NUMBER_CHECK,
+ BOOLEAN_CHECK,
+ JSARRAY_HAS_FAST_ELEMENTS_CHECK
+ };
+
+ StubCompiler() : masm_(NULL, 256) { }
+
+ Object* CompileCallInitialize(Code::Flags flags);
+ Object* CompileCallPreMonomorphic(Code::Flags flags);
+ Object* CompileCallNormal(Code::Flags flags);
+ Object* CompileCallMegamorphic(Code::Flags flags);
+ Object* CompileCallMiss(Code::Flags flags);
+ Object* CompileCallDebugBreak(Code::Flags flags);
+ Object* CompileCallDebugPrepareStepIn(Code::Flags flags);
+ Object* CompileLazyCompile(Code::Flags flags);
+
+ // Static functions for generating parts of stubs.
+ static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype);
+ static void GenerateLoadField(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ int index,
+ Label* miss_label);
+ static void GenerateLoadCallback(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name,
+ Register scratch1,
+ Register scratch2,
+ AccessorInfo* callback,
+ Label* miss_label);
+ static void GenerateLoadConstant(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Object* value,
+ Label* miss_label);
+ static void GenerateLoadInterceptor(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
+ static void GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label);
+ static void GenerateLoadShortStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label);
+ static void GenerateLoadMediumStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label);
+ static void GenerateLoadLongStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label);
+ static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
+ static void GenerateStoreField(MacroAssembler* masm,
+ JSObject* object,
+ int index,
+ Map* transition,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch,
+ Label* miss_label);
+ static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
+
+ protected:
+ Object* GetCodeWithFlags(Code::Flags flags);
+
+ MacroAssembler* masm() { return &masm_; }
+
+ private:
+ MacroAssembler masm_;
+};
+
+
+class LoadStubCompiler: public StubCompiler {
+ public:
+ Object* CompileLoadField(JSObject* object, JSObject* holder, int index);
+ Object* CompileLoadCallback(JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback);
+ Object* CompileLoadConstant(JSObject* object,
+ JSObject* holder,
+ Object* value);
+ Object* CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name);
+
+ private:
+ Object* GetCode(PropertyType);
+};
+
+
+class KeyedLoadStubCompiler: public StubCompiler {
+ public:
+ Object* CompileLoadField(String* name,
+ JSObject* object,
+ JSObject* holder,
+ int index);
+ Object* CompileLoadCallback(String* name,
+ JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback);
+ Object* CompileLoadConstant(String* name,
+ JSObject* object,
+ JSObject* holder,
+ Object* value);
+ Object* CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name);
+ Object* CompileLoadArrayLength(String* name);
+ Object* CompileLoadShortStringLength(String* name);
+ Object* CompileLoadMediumStringLength(String* name);
+ Object* CompileLoadLongStringLength(String* name);
+ Object* CompileLoadFunctionPrototype(String* name);
+
+ private:
+ Object* GetCode(PropertyType);
+};
+
+
+class StoreStubCompiler: public StubCompiler {
+ public:
+ Object* CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name);
+ Object* CompileStoreCallback(JSObject* object,
+ AccessorInfo* callbacks,
+ String* name);
+ Object* CompileStoreInterceptor(JSObject* object, String* name);
+
+ private:
+ Object* GetCode(PropertyType type);
+};
+
+
+class KeyedStoreStubCompiler: public StubCompiler {
+ public:
+ Object* CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name);
+
+ private:
+ Object* GetCode(PropertyType type);
+};
+
+
+class CallStubCompiler: public StubCompiler {
+ public:
+ explicit CallStubCompiler(int argc) : arguments_(argc) { }
+
+ Object* CompileCallField(Object* object, JSObject* holder, int index);
+ Object* CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ CheckType check);
+ Object* CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name);
+
+ private:
+ const ParameterCount arguments_;
+
+ const ParameterCount& arguments() { return arguments_; }
+
+ Object* GetCode(PropertyType type);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_STUB_CACHE_H_
--- /dev/null
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
+/* On a machine with IEEE extended-precision registers, it is
+ * necessary to specify double-precision (53-bit) rounding precision
+ * before invoking strtod or dtoa. If the machine uses (the equivalent
+ * of) Intel 80x87 arithmetic, the call
+ * _control87(PC_53, MCW_PC);
+ * does this with many compilers. Whether this or another call is
+ * appropriate depends on the compiler; for this to work, it may be
+ * necessary to #include "float.h" or another system-dependent header
+ * file.
+ */
+
+/* strtod for IEEE-, VAX-, and IBM-arithmetic machines.
+ *
+ * This strtod returns a nearest machine number to the input decimal
+ * string (or sets errno to ERANGE). With IEEE arithmetic, ties are
+ * broken by the IEEE round-even rule. Otherwise ties are broken by
+ * biased rounding (add half and chop).
+ *
+ * Inspired loosely by William D. Clinger's paper "How to Read Floating
+ * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ *
+ * 1. We only require IEEE, IBM, or VAX double-precision
+ * arithmetic (not IEEE double-extended).
+ * 2. We get by with floating-point arithmetic in a case that
+ * Clinger missed -- when we're computing d * 10^n
+ * for a small integer d and the integer n is not too
+ * much larger than 22 (the maximum integer k for which
+ * we can represent 10^k exactly), we may be able to
+ * compute (d*10^k) * 10^(e-k) with just one roundoff.
+ * 3. Rather than a bit-at-a-time adjustment of the binary
+ * result in the hard case, we use floating-point
+ * arithmetic to determine the adjustment to within
+ * one bit; only in really hard cases do we need to
+ * compute a second residual.
+ * 4. Because of 3., we don't need a large table of powers of 10
+ * for ten-to-e (just some small tables, e.g. of 10^k
+ * for 0 <= k <= 22).
+ */
+
+/*
+ * #define IEEE_8087 for IEEE-arithmetic machines where the least
+ * significant byte has the lowest address.
+ * #define IEEE_MC68k for IEEE-arithmetic machines where the most
+ * significant byte has the lowest address.
+ * #define Long int on machines with 32-bit ints and 64-bit longs.
+ * #define IBM for IBM mainframe-style floating-point arithmetic.
+ * #define VAX for VAX-style floating-point arithmetic (D_floating).
+ * #define No_leftright to omit left-right logic in fast floating-point
+ * computation of dtoa.
+ * #define Honor_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ * and strtod and dtoa should round accordingly.
+ * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ * and Honor_FLT_ROUNDS is not #defined.
+ * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines
+ * that use extended-precision instructions to compute rounded
+ * products and quotients) with IBM.
+ * #define ROUND_BIASED for IEEE-format with biased rounding.
+ * #define Inaccurate_Divide for IEEE-format with correctly rounded
+ * products but inaccurate quotients, e.g., for Intel i860.
+ * #define NO_LONG_LONG on machines that do not have a "long long"
+ * integer type (of >= 64 bits). On such machines, you can
+ * #define Just_16 to store 16 bits per 32-bit Long when doing
+ * high-precision integer arithmetic. Whether this speeds things
+ * up or slows things down depends on the machine and the number
+ * being converted. If long long is available and the name is
+ * something other than "long long", #define Llong to be the name,
+ * and if "unsigned Llong" does not work as an unsigned version of
+ * Llong, #define #ULLong to be the corresponding unsigned type.
+ * #define KR_headers for old-style C function headers.
+ * #define Bad_float_h if your system lacks a float.h or if it does not
+ * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP,
+ * FLT_RADIX, FLT_ROUNDS, and DBL_MAX.
+ * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n)
+ * if memory is available and otherwise does something you deem
+ * appropriate. If MALLOC is undefined, malloc will be invoked
+ * directly -- and assumed always to succeed.
+ * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making
+ * memory allocations from a private pool of memory when possible.
+ * When used, the private pool is PRIVATE_MEM bytes long: 2304 bytes,
+ * unless #defined to be a different length. This default length
+ * suffices to get rid of MALLOC calls except for unusual cases,
+ * such as decimal-to-binary conversion of a very long string of
+ * digits. The longest string dtoa can return is about 751 bytes
+ * long. For conversions by strtod of strings of 800 digits and
+ * all dtoa conversions in single-threaded executions with 8-byte
+ * pointers, PRIVATE_MEM >= 7400 appears to suffice; with 4-byte
+ * pointers, PRIVATE_MEM >= 7112 appears adequate.
+ * #define INFNAN_CHECK on IEEE systems to cause strtod to check for
+ * Infinity and NaN (case insensitively). On some systems (e.g.,
+ * some HP systems), it may be necessary to #define NAN_WORD0
+ * appropriately -- to the most significant word of a quiet NaN.
+ * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.)
+ * When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined,
+ * strtod also accepts (case insensitively) strings of the form
+ * NaN(x), where x is a string of hexadecimal digits and spaces;
+ * if there is only one string of hexadecimal digits, it is taken
+ * for the 52 fraction bits of the resulting NaN; if there are two
+ * or more strings of hex digits, the first is for the high 20 bits,
+ * the second and subsequent for the low 32 bits, with intervening
+ * white space ignored; but if this results in none of the 52
+ * fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0
+ * and NAN_WORD1 are used instead.
+ * #define MULTIPLE_THREADS if the system offers preemptively scheduled
+ * multiple threads. In this case, you must provide (or suitably
+ * #define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed
+ * by FREE_DTOA_LOCK(n) for n = 0 or 1. (The second lock, accessed
+ * in pow5mult, ensures lazy evaluation of only one copy of high
+ * powers of 5; omitting this lock would introduce a small
+ * probability of wasting memory, but would otherwise be harmless.)
+ * You must also invoke freedtoa(s) to free the value s returned by
+ * dtoa. You may do so whether or not MULTIPLE_THREADS is #defined.
+ * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that
+ * avoids underflows on inputs whose result does not underflow.
+ * If you #define NO_IEEE_Scale on a machine that uses IEEE-format
+ * floating-point numbers and flushes underflows to zero rather
+ * than implementing gradual underflow, then you must also #define
+ * Sudden_Underflow.
+ * #define YES_ALIAS to permit aliasing certain double values with
+ * arrays of ULongs. This leads to slightly better code with
+ * some compilers and was always used prior to 19990916, but it
+ * is not strictly legal and can cause trouble with aggressively
+ * optimizing compilers (e.g., gcc 2.95.1 under -O2).
+ * #define USE_LOCALE to use the current locale's decimal_point value.
+ * #define SET_INEXACT if IEEE arithmetic is being used and extra
+ * computation should be done to set the inexact flag when the
+ * result is inexact and avoid setting inexact when the result
+ * is exact. In this case, dtoa.c must be compiled in
+ * an environment, perhaps provided by #include "dtoa.c" in a
+ * suitable wrapper, that defines two functions,
+ * int get_inexact(void);
+ * void clear_inexact(void);
+ * such that get_inexact() returns a nonzero value if the
+ * inexact bit is already set, and clear_inexact() sets the
+ * inexact bit to 0. When SET_INEXACT is #defined, strtod
+ * also does extra computations to set the underflow and overflow
+ * flags when appropriate (i.e., when the result is tiny and
+ * inexact or when it is a numeric value rounded to +-infinity).
+ * #define NO_ERRNO if strtod should not assign errno = ERANGE when
+ * the result overflows to +-Infinity or underflows to 0.
+ */
+
+#ifndef Long
+#define Long long
+#endif
+#ifndef ULong
+typedef unsigned Long ULong;
+#endif
+
+#ifdef DEBUG
+#include "stdio.h"
+#define Bug(x) {fprintf(stderr, "%s\n", x); exit(1);}
+#endif
+
+#include "stdlib.h"
+#include "string.h"
+
+#ifdef USE_LOCALE
+#include "locale.h"
+#endif
+
+#ifdef MALLOC
+#ifdef KR_headers
+extern char *MALLOC();
+#else
+extern void *MALLOC(size_t);
+#endif
+#else
+#define MALLOC malloc
+#endif
+
+#ifndef Omit_Private_Memory
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2304
+#endif
+#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
+static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+#endif
+
+#undef IEEE_Arith
+#undef Avoid_Underflow
+#ifdef IEEE_MC68k
+#define IEEE_Arith
+#endif
+#ifdef IEEE_8087
+#define IEEE_Arith
+#endif
+
+#include "errno.h"
+
+#ifdef Bad_float_h
+
+#ifdef IEEE_Arith
+#define DBL_DIG 15
+#define DBL_MAX_10_EXP 308
+#define DBL_MAX_EXP 1024
+#define FLT_RADIX 2
+#endif /*IEEE_Arith*/
+
+#ifdef IBM
+#define DBL_DIG 16
+#define DBL_MAX_10_EXP 75
+#define DBL_MAX_EXP 63
+#define FLT_RADIX 16
+#define DBL_MAX 7.2370055773322621e+75
+#endif
+
+#ifdef VAX
+#define DBL_DIG 16
+#define DBL_MAX_10_EXP 38
+#define DBL_MAX_EXP 127
+#define FLT_RADIX 2
+#define DBL_MAX 1.7014118346046923e+38
+#endif
+
+#ifndef LONG_MAX
+#define LONG_MAX 2147483647
+#endif
+
+#else /* ifndef Bad_float_h */
+#include "float.h"
+#endif /* Bad_float_h */
+
+#ifndef __MATH_H__
+#include "math.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CONST
+#ifdef KR_headers
+#define CONST /* blank */
+#else
+#define CONST const
+#endif
+#endif
+
+#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(VAX) + defined(IBM) != 1
+Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined.
+#endif
+
+typedef union { double d; ULong L[2]; } U;
+
+#ifdef YES_ALIAS
+#define dval(x) x
+#ifdef IEEE_8087
+#define word0(x) ((ULong *)&x)[1]
+#define word1(x) ((ULong *)&x)[0]
+#else
+#define word0(x) ((ULong *)&x)[0]
+#define word1(x) ((ULong *)&x)[1]
+#endif
+#else
+#ifdef IEEE_8087
+#define word0(x) ((U*)&x)->L[1]
+#define word1(x) ((U*)&x)->L[0]
+#else
+#define word0(x) ((U*)&x)->L[0]
+#define word1(x) ((U*)&x)->L[1]
+#endif
+#define dval(x) ((U*)&x)->d
+#endif
+
+/* The following definition of Storeinc is appropriate for MIPS processors.
+ * An alternative that might be better on some machines is
+ * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff)
+ */
+#if defined(IEEE_8087) + defined(VAX)
+#define Storeinc(a,b,c) (((unsigned short *)a)[1] = (unsigned short)b, \
+((unsigned short *)a)[0] = (unsigned short)c, a++)
+#else
+#define Storeinc(a,b,c) (((unsigned short *)a)[0] = (unsigned short)b, \
+((unsigned short *)a)[1] = (unsigned short)c, a++)
+#endif
+
+/* #define P DBL_MANT_DIG */
+/* Ten_pmax = floor(P*log(2)/log(5)) */
+/* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */
+/* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */
+/* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */
+
+#ifdef IEEE_Arith
+#define Exp_shift 20
+#define Exp_shift1 20
+#define Exp_msk1 0x100000
+#define Exp_msk11 0x100000
+#define Exp_mask 0x7ff00000
+#define P 53
+#define Bias 1023
+#define Emin (-1022)
+#define Exp_1 0x3ff00000
+#define Exp_11 0x3ff00000
+#define Ebits 11
+#define Frac_mask 0xfffff
+#define Frac_mask1 0xfffff
+#define Ten_pmax 22
+#define Bletch 0x10
+#define Bndry_mask 0xfffff
+#define Bndry_mask1 0xfffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 1
+#define Tiny0 0
+#define Tiny1 1
+#define Quick_max 14
+#define Int_max 14
+#ifndef NO_IEEE_Scale
+#define Avoid_Underflow
+#ifdef Flush_Denorm /* debugging option */
+#undef Sudden_Underflow
+#endif
+#endif
+
+#ifndef Flt_Rounds
+#ifdef FLT_ROUNDS
+#define Flt_Rounds FLT_ROUNDS
+#else
+#define Flt_Rounds 1
+#endif
+#endif /*Flt_Rounds*/
+
+#ifdef Honor_FLT_ROUNDS
+#define Rounding rounding
+#undef Check_FLT_ROUNDS
+#define Check_FLT_ROUNDS
+#else
+#define Rounding Flt_Rounds
+#endif
+
+#else /* ifndef IEEE_Arith */
+#undef Check_FLT_ROUNDS
+#undef Honor_FLT_ROUNDS
+#undef SET_INEXACT
+#undef Sudden_Underflow
+#define Sudden_Underflow
+#ifdef IBM
+#undef Flt_Rounds
+#define Flt_Rounds 0
+#define Exp_shift 24
+#define Exp_shift1 24
+#define Exp_msk1 0x1000000
+#define Exp_msk11 0x1000000
+#define Exp_mask 0x7f000000
+#define P 14
+#define Bias 65
+#define Exp_1 0x41000000
+#define Exp_11 0x41000000
+#define Ebits 8 /* exponent has 7 bits, but 8 is the right value in b2d */
+#define Frac_mask 0xffffff
+#define Frac_mask1 0xffffff
+#define Bletch 4
+#define Ten_pmax 22
+#define Bndry_mask 0xefffff
+#define Bndry_mask1 0xffffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 4
+#define Tiny0 0x100000
+#define Tiny1 0
+#define Quick_max 14
+#define Int_max 15
+#else /* VAX */
+#undef Flt_Rounds
+#define Flt_Rounds 1
+#define Exp_shift 23
+#define Exp_shift1 7
+#define Exp_msk1 0x80
+#define Exp_msk11 0x800000
+#define Exp_mask 0x7f80
+#define P 56
+#define Bias 129
+#define Exp_1 0x40800000
+#define Exp_11 0x4080
+#define Ebits 8
+#define Frac_mask 0x7fffff
+#define Frac_mask1 0xffff007f
+#define Ten_pmax 24
+#define Bletch 2
+#define Bndry_mask 0xffff007f
+#define Bndry_mask1 0xffff007f
+#define LSB 0x10000
+#define Sign_bit 0x8000
+#define Log2P 1
+#define Tiny0 0x80
+#define Tiny1 0
+#define Quick_max 15
+#define Int_max 15
+#endif /* IBM, VAX */
+#endif /* IEEE_Arith */
+
+#ifndef IEEE_Arith
+#define ROUND_BIASED
+#endif
+
+#ifdef RND_PRODQUOT
+#define rounded_product(a,b) a = rnd_prod(a, b)
+#define rounded_quotient(a,b) a = rnd_quot(a, b)
+#ifdef KR_headers
+extern double rnd_prod(), rnd_quot();
+#else
+extern double rnd_prod(double, double), rnd_quot(double, double);
+#endif
+#else
+#define rounded_product(a,b) a *= b
+#define rounded_quotient(a,b) a /= b
+#endif
+
+#define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1))
+#define Big1 0xffffffff
+
+#ifndef Pack_32
+#define Pack_32
+#endif
+
+#ifdef KR_headers
+#define FFFFFFFF ((((unsigned long)0xffff)<<16)|(unsigned long)0xffff)
+#else
+#define FFFFFFFF 0xffffffffUL
+#endif
+
+#ifdef NO_LONG_LONG
+#undef ULLong
+#ifdef Just_16
+#undef Pack_32
+/* When Pack_32 is not defined, we store 16 bits per 32-bit Long.
+ * This makes some inner loops simpler and sometimes saves work
+ * during multiplications, but it often seems to make things slightly
+ * slower. Hence the default is now to store 32 bits per Long.
+ */
+#endif
+#else /* long long available */
+#ifndef Llong
+#define Llong long long
+#endif
+#ifndef ULLong
+#define ULLong unsigned Llong
+#endif
+#endif /* NO_LONG_LONG */
+
+#ifndef MULTIPLE_THREADS
+#define ACQUIRE_DTOA_LOCK(n) /*nothing*/
+#define FREE_DTOA_LOCK(n) /*nothing*/
+#endif
+
+#define Kmax 15
+
+#ifdef __cplusplus
+extern "C" double strtod(const char *s00, char **se);
+extern "C" char *dtoa(double d, int mode, int ndigits,
+ int *decpt, int *sign, char **rve);
+#endif
+
+ struct
+Bigint {
+ struct Bigint *next;
+ int k, maxwds, sign, wds;
+ ULong x[1];
+ };
+
+ typedef struct Bigint Bigint;
+
+ static Bigint *freelist[Kmax+1];
+
+ static Bigint *
+Balloc
+#ifdef KR_headers
+ (k) int k;
+#else
+ (int k)
+#endif
+{
+ int x;
+ Bigint *rv;
+#ifndef Omit_Private_Memory
+ unsigned int len;
+#endif
+
+ ACQUIRE_DTOA_LOCK(0);
+ if ((rv = freelist[k])) {
+ freelist[k] = rv->next;
+ }
+ else {
+ x = 1 << k;
+#ifdef Omit_Private_Memory
+ rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong));
+#else
+ len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
+ /sizeof(double);
+ if (pmem_next - private_mem + len <= PRIVATE_mem) {
+ rv = (Bigint*)pmem_next;
+ pmem_next += len;
+ }
+ else
+ rv = (Bigint*)MALLOC(len*sizeof(double));
+#endif
+ rv->k = k;
+ rv->maxwds = x;
+ }
+ FREE_DTOA_LOCK(0);
+ rv->sign = rv->wds = 0;
+ return rv;
+ }
+
+ static void
+Bfree
+#ifdef KR_headers
+ (v) Bigint *v;
+#else
+ (Bigint *v)
+#endif
+{
+ if (v) {
+ ACQUIRE_DTOA_LOCK(0);
+ v->next = freelist[v->k];
+ freelist[v->k] = v;
+ FREE_DTOA_LOCK(0);
+ }
+ }
+
+#define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \
+y->wds*sizeof(Long) + 2*sizeof(int))
+
+ static Bigint *
+multadd
+#ifdef KR_headers
+ (b, m, a) Bigint *b; int m, a;
+#else
+ (Bigint *b, int m, int a) /* multiply by m and add a */
+#endif
+{
+ int i, wds;
+#ifdef ULLong
+ ULong *x;
+ ULLong carry, y;
+#else
+ ULong carry, *x, y;
+#ifdef Pack_32
+ ULong xi, z;
+#endif
+#endif
+ Bigint *b1;
+
+ wds = b->wds;
+ x = b->x;
+ i = 0;
+ carry = a;
+ do {
+#ifdef ULLong
+ y = *x * (ULLong)m + carry;
+ carry = y >> 32;
+ *x++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+ xi = *x;
+ y = (xi & 0xffff) * m + carry;
+ z = (xi >> 16) * m + (y >> 16);
+ carry = z >> 16;
+ *x++ = (z << 16) + (y & 0xffff);
+#else
+ y = *x * m + carry;
+ carry = y >> 16;
+ *x++ = y & 0xffff;
+#endif
+#endif
+ }
+ while(++i < wds);
+ if (carry) {
+ if (wds >= b->maxwds) {
+ b1 = Balloc(b->k+1);
+ Bcopy(b1, b);
+ Bfree(b);
+ b = b1;
+ }
+ b->x[wds++] = carry;
+ b->wds = wds;
+ }
+ return b;
+ }
+
+ static Bigint *
+s2b
+#ifdef KR_headers
+ (s, nd0, nd, y9) CONST char *s; int nd0, nd; ULong y9;
+#else
+ (CONST char *s, int nd0, int nd, ULong y9)
+#endif
+{
+ Bigint *b;
+ int i, k;
+ Long x, y;
+
+ x = (nd + 8) / 9;
+ for(k = 0, y = 1; x > y; y <<= 1, k++) ;
+#ifdef Pack_32
+ b = Balloc(k);
+ b->x[0] = y9;
+ b->wds = 1;
+#else
+ b = Balloc(k+1);
+ b->x[0] = y9 & 0xffff;
+ b->wds = (b->x[1] = y9 >> 16) ? 2 : 1;
+#endif
+
+ i = 9;
+ if (9 < nd0) {
+ s += 9;
+ do b = multadd(b, 10, *s++ - '0');
+ while(++i < nd0);
+ s++;
+ }
+ else
+ s += 10;
+ for(; i < nd; i++)
+ b = multadd(b, 10, *s++ - '0');
+ return b;
+ }
+
+ static int
+hi0bits
+#ifdef KR_headers
+ (x) register ULong x;
+#else
+ (register ULong x)
+#endif
+{
+ register int k = 0;
+
+ if (!(x & 0xffff0000)) {
+ k = 16;
+ x <<= 16;
+ }
+ if (!(x & 0xff000000)) {
+ k += 8;
+ x <<= 8;
+ }
+ if (!(x & 0xf0000000)) {
+ k += 4;
+ x <<= 4;
+ }
+ if (!(x & 0xc0000000)) {
+ k += 2;
+ x <<= 2;
+ }
+ if (!(x & 0x80000000)) {
+ k++;
+ if (!(x & 0x40000000))
+ return 32;
+ }
+ return k;
+ }
+
+ static int
+lo0bits
+#ifdef KR_headers
+ (y) ULong *y;
+#else
+ (ULong *y)
+#endif
+{
+ register int k;
+ register ULong x = *y;
+
+ if (x & 7) {
+ if (x & 1)
+ return 0;
+ if (x & 2) {
+ *y = x >> 1;
+ return 1;
+ }
+ *y = x >> 2;
+ return 2;
+ }
+ k = 0;
+ if (!(x & 0xffff)) {
+ k = 16;
+ x >>= 16;
+ }
+ if (!(x & 0xff)) {
+ k += 8;
+ x >>= 8;
+ }
+ if (!(x & 0xf)) {
+ k += 4;
+ x >>= 4;
+ }
+ if (!(x & 0x3)) {
+ k += 2;
+ x >>= 2;
+ }
+ if (!(x & 1)) {
+ k++;
+ x >>= 1;
+ if (!x)
+ return 32;
+ }
+ *y = x;
+ return k;
+ }
+
+ static Bigint *
+i2b
+#ifdef KR_headers
+ (i) int i;
+#else
+ (int i)
+#endif
+{
+ Bigint *b;
+
+ b = Balloc(1);
+ b->x[0] = i;
+ b->wds = 1;
+ return b;
+ }
+
+ static Bigint *
+mult
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ Bigint *c;
+ int k, wa, wb, wc;
+ ULong *x, *xa, *xae, *xb, *xbe, *xc, *xc0;
+ ULong y;
+#ifdef ULLong
+ ULLong carry, z;
+#else
+ ULong carry, z;
+#ifdef Pack_32
+ ULong z2;
+#endif
+#endif
+
+ if (a->wds < b->wds) {
+ c = a;
+ a = b;
+ b = c;
+ }
+ k = a->k;
+ wa = a->wds;
+ wb = b->wds;
+ wc = wa + wb;
+ if (wc > a->maxwds)
+ k++;
+ c = Balloc(k);
+ for(x = c->x, xa = x + wc; x < xa; x++)
+ *x = 0;
+ xa = a->x;
+ xae = xa + wa;
+ xb = b->x;
+ xbe = xb + wb;
+ xc0 = c->x;
+#ifdef ULLong
+ for(; xb < xbe; xc0++) {
+ if ((y = *xb++)) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * (ULLong)y + *xc + carry;
+ carry = z >> 32;
+ *xc++ = z & FFFFFFFF;
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ }
+#else
+#ifdef Pack_32
+ for(; xb < xbe; xb++, xc0++) {
+ if (y = *xb & 0xffff) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+ carry = z >> 16;
+ z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+ carry = z2 >> 16;
+ Storeinc(xc, z2, z);
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ if (y = *xb >> 16) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ z2 = *xc;
+ do {
+ z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+ carry = z >> 16;
+ Storeinc(xc, z, z2);
+ z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+ carry = z2 >> 16;
+ }
+ while(x < xae);
+ *xc = z2;
+ }
+ }
+#else
+ for(; xb < xbe; xc0++) {
+ if (y = *xb++) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * y + *xc + carry;
+ carry = z >> 16;
+ *xc++ = z & 0xffff;
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ }
+#endif
+#endif
+ for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ;
+ c->wds = wc;
+ return c;
+ }
+
+ static Bigint *p5s;
+
+ static Bigint *
+pow5mult
+#ifdef KR_headers
+ (b, k) Bigint *b; int k;
+#else
+ (Bigint *b, int k)
+#endif
+{
+ Bigint *b1, *p5, *p51;
+ int i;
+ static int p05[3] = { 5, 25, 125 };
+
+ if ((i = k & 3))
+ b = multadd(b, p05[i-1], 0);
+
+ if (!(k >>= 2))
+ return b;
+ if (!(p5 = p5s)) {
+ /* first time */
+#ifdef MULTIPLE_THREADS
+ ACQUIRE_DTOA_LOCK(1);
+ if (!(p5 = p5s)) {
+ p5 = p5s = i2b(625);
+ p5->next = 0;
+ }
+ FREE_DTOA_LOCK(1);
+#else
+ p5 = p5s = i2b(625);
+ p5->next = 0;
+#endif
+ }
+ for(;;) {
+ if (k & 1) {
+ b1 = mult(b, p5);
+ Bfree(b);
+ b = b1;
+ }
+ if (!(k >>= 1))
+ break;
+ if (!(p51 = p5->next)) {
+#ifdef MULTIPLE_THREADS
+ ACQUIRE_DTOA_LOCK(1);
+ if (!(p51 = p5->next)) {
+ p51 = p5->next = mult(p5,p5);
+ p51->next = 0;
+ }
+ FREE_DTOA_LOCK(1);
+#else
+ p51 = p5->next = mult(p5,p5);
+ p51->next = 0;
+#endif
+ }
+ p5 = p51;
+ }
+ return b;
+ }
+
+ static Bigint *
+lshift
+#ifdef KR_headers
+ (b, k) Bigint *b; int k;
+#else
+ (Bigint *b, int k)
+#endif
+{
+ int i, k1, n, n1;
+ Bigint *b1;
+ ULong *x, *x1, *xe, z;
+
+#ifdef Pack_32
+ n = k >> 5;
+#else
+ n = k >> 4;
+#endif
+ k1 = b->k;
+ n1 = n + b->wds + 1;
+ for(i = b->maxwds; n1 > i; i <<= 1)
+ k1++;
+ b1 = Balloc(k1);
+ x1 = b1->x;
+ for(i = 0; i < n; i++)
+ *x1++ = 0;
+ x = b->x;
+ xe = x + b->wds;
+#ifdef Pack_32
+ if (k &= 0x1f) {
+ k1 = 32 - k;
+ z = 0;
+ do {
+ *x1++ = *x << k | z;
+ z = *x++ >> k1;
+ }
+ while(x < xe);
+ if ((*x1 = z))
+ ++n1;
+ }
+#else
+ if (k &= 0xf) {
+ k1 = 16 - k;
+ z = 0;
+ do {
+ *x1++ = *x << k & 0xffff | z;
+ z = *x++ >> k1;
+ }
+ while(x < xe);
+ if (*x1 = z)
+ ++n1;
+ }
+#endif
+ else do
+ *x1++ = *x++;
+ while(x < xe);
+ b1->wds = n1 - 1;
+ Bfree(b);
+ return b1;
+ }
+
+ static int
+cmp
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ ULong *xa, *xa0, *xb, *xb0;
+ int i, j;
+
+ i = a->wds;
+ j = b->wds;
+#ifdef DEBUG
+ if (i > 1 && !a->x[i-1])
+ Bug("cmp called with a->x[a->wds-1] == 0");
+ if (j > 1 && !b->x[j-1])
+ Bug("cmp called with b->x[b->wds-1] == 0");
+#endif
+ if (i -= j)
+ return i;
+ xa0 = a->x;
+ xa = xa0 + j;
+ xb0 = b->x;
+ xb = xb0 + j;
+ for(;;) {
+ if (*--xa != *--xb)
+ return *xa < *xb ? -1 : 1;
+ if (xa <= xa0)
+ break;
+ }
+ return 0;
+ }
+
+ static Bigint *
+diff
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ Bigint *c;
+ int i, wa, wb;
+ ULong *xa, *xae, *xb, *xbe, *xc;
+#ifdef ULLong
+ ULLong borrow, y;
+#else
+ ULong borrow, y;
+#ifdef Pack_32
+ ULong z;
+#endif
+#endif
+
+ i = cmp(a,b);
+ if (!i) {
+ c = Balloc(0);
+ c->wds = 1;
+ c->x[0] = 0;
+ return c;
+ }
+ if (i < 0) {
+ c = a;
+ a = b;
+ b = c;
+ i = 1;
+ }
+ else
+ i = 0;
+ c = Balloc(a->k);
+ c->sign = i;
+ wa = a->wds;
+ xa = a->x;
+ xae = xa + wa;
+ wb = b->wds;
+ xb = b->x;
+ xbe = xb + wb;
+ xc = c->x;
+ borrow = 0;
+#ifdef ULLong
+ do {
+ y = (ULLong)*xa++ - *xb++ - borrow;
+ borrow = y >> 32 & (ULong)1;
+ *xc++ = y & FFFFFFFF;
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = *xa++ - borrow;
+ borrow = y >> 32 & (ULong)1;
+ *xc++ = y & FFFFFFFF;
+ }
+#else
+#ifdef Pack_32
+ do {
+ y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = (*xa & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+#else
+ do {
+ y = *xa++ - *xb++ - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *xc++ = y & 0xffff;
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = *xa++ - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *xc++ = y & 0xffff;
+ }
+#endif
+#endif
+ while(!*--xc)
+ wa--;
+ c->wds = wa;
+ return c;
+ }
+
+ static double
+ulp
+#ifdef KR_headers
+ (x) double x;
+#else
+ (double x)
+#endif
+{
+ register Long L;
+ double a;
+
+ L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+ if (L > 0) {
+#endif
+#endif
+#ifdef IBM
+ L |= Exp_msk1 >> 4;
+#endif
+ word0(a) = L;
+ word1(a) = 0;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+ }
+ else {
+ L = -L >> Exp_shift;
+ if (L < Exp_shift) {
+ word0(a) = 0x80000 >> L;
+ word1(a) = 0;
+ }
+ else {
+ word0(a) = 0;
+ L -= Exp_shift;
+ word1(a) = L >= 31 ? 1 : 1 << 31 - L;
+ }
+ }
+#endif
+#endif
+ return dval(a);
+ }
+
+ static double
+b2d
+#ifdef KR_headers
+ (a, e) Bigint *a; int *e;
+#else
+ (Bigint *a, int *e)
+#endif
+{
+ ULong *xa, *xa0, w, y, z;
+ int k;
+ double d;
+#ifdef VAX
+ ULong d0, d1;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+ xa0 = a->x;
+ xa = xa0 + a->wds;
+ y = *--xa;
+#ifdef DEBUG
+ if (!y) Bug("zero y in b2d");
+#endif
+ k = hi0bits(y);
+ *e = 32 - k;
+#ifdef Pack_32
+ if (k < Ebits) {
+ d0 = Exp_1 | (y >> (Ebits - k));
+ w = xa > xa0 ? *--xa : 0;
+ d1 = (y << ((32-Ebits) + k)) | (w >> (Ebits - k));
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ if (k -= Ebits) {
+ d0 = Exp_1 | (y << k) | (z >> (32 - k));
+ y = xa > xa0 ? *--xa : 0;
+ d1 = (z << k) | (y >> (32 - k));
+ }
+ else {
+ d0 = Exp_1 | y;
+ d1 = z;
+ }
+#else
+ if (k < Ebits + 16) {
+ z = xa > xa0 ? *--xa : 0;
+ d0 = Exp_1 | (y << (k - Ebits)) | (z >> (Ebits + 16 - k));
+ w = xa > xa0 ? *--xa : 0;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = (z << (k + 16 - Ebits)) | (w << (k - Ebits)) | (y >> (16 + Ebits - k));
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ w = xa > xa0 ? *--xa : 0;
+ k -= Ebits + 16;
+ d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = w << k + 16 | y << k;
+#endif
+ ret_d:
+#ifdef VAX
+ word0(d) = d0 >> 16 | d0 << 16;
+ word1(d) = d1 >> 16 | d1 << 16;
+#else
+#undef d0
+#undef d1
+#endif
+ return dval(d);
+ }
+
+ static Bigint *
+d2b
+#ifdef KR_headers
+ (d, e, bits) double d; int *e, *bits;
+#else
+ (double d, int *e, int *bits)
+#endif
+{
+ Bigint *b;
+ int de, k;
+ ULong *x, y, z;
+#ifndef Sudden_Underflow
+ int i;
+#endif
+#ifdef VAX
+ ULong d0, d1;
+ d0 = word0(d) >> 16 | word0(d) << 16;
+ d1 = word1(d) >> 16 | word1(d) << 16;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+#ifdef Pack_32
+ b = Balloc(1);
+#else
+ b = Balloc(2);
+#endif
+ x = b->x;
+
+ z = d0 & Frac_mask;
+ d0 &= 0x7fffffff; /* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+ de = (int)(d0 >> Exp_shift);
+#ifndef IBM
+ z |= Exp_msk11;
+#endif
+#else
+ if ((de = (int)(d0 >> Exp_shift)))
+ z |= Exp_msk1;
+#endif
+#ifdef Pack_32
+ if ((y = d1)) {
+ if ((k = lo0bits(&y))) {
+ x[0] = y | (z << (32 - k));
+ z >>= k;
+ }
+ else
+ x[0] = y;
+#ifndef Sudden_Underflow
+ i =
+#endif
+ b->wds = (x[1] = z) ? 2 : 1;
+ }
+ else {
+ /* This assertion fails for "1e-500" and other very
+ * small numbers. It provides the right result (0)
+ * though. This assert has also been removed from KJS's
+ * version of dtoa.c.
+ *
+ * #ifdef DEBUG
+ * if (!z) Bug("zero z in b2d");
+ * #endif
+ */
+ k = lo0bits(&z);
+ x[0] = z;
+#ifndef Sudden_Underflow
+ i =
+#endif
+ b->wds = 1;
+ k += 32;
+ }
+#else
+ if (y = d1) {
+ if (k = lo0bits(&y))
+ if (k >= 16) {
+ x[0] = y | z << 32 - k & 0xffff;
+ x[1] = z >> k - 16 & 0xffff;
+ x[2] = z >> k;
+ i = 2;
+ }
+ else {
+ x[0] = y & 0xffff;
+ x[1] = y >> 16 | z << 16 - k & 0xffff;
+ x[2] = z >> k & 0xffff;
+ x[3] = z >> k+16;
+ i = 3;
+ }
+ else {
+ x[0] = y & 0xffff;
+ x[1] = y >> 16;
+ x[2] = z & 0xffff;
+ x[3] = z >> 16;
+ i = 3;
+ }
+ }
+ else {
+#ifdef DEBUG
+ if (!z)
+ Bug("Zero passed to d2b");
+#endif
+ k = lo0bits(&z);
+ if (k >= 16) {
+ x[0] = z;
+ i = 0;
+ }
+ else {
+ x[0] = z & 0xffff;
+ x[1] = z >> 16;
+ i = 1;
+ }
+ k += 32;
+ }
+ while(!x[i])
+ --i;
+ b->wds = i + 1;
+#endif
+#ifndef Sudden_Underflow
+ if (de) {
+#endif
+#ifdef IBM
+ *e = (de - Bias - (P-1) << 2) + k;
+ *bits = 4*P + 8 - k - hi0bits(word0(d) & Frac_mask);
+#else
+ *e = de - Bias - (P-1) + k;
+ *bits = P - k;
+#endif
+#ifndef Sudden_Underflow
+ }
+ else {
+ *e = de - Bias - (P-1) + 1 + k;
+#ifdef Pack_32
+ *bits = 32*i - hi0bits(x[i-1]);
+#else
+ *bits = (i+2)*16 - hi0bits(x[i]);
+#endif
+ }
+#endif
+ return b;
+ }
+#undef d0
+#undef d1
+
+ static double
+ratio
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ double da, db;
+ int k, ka, kb;
+
+ dval(da) = b2d(a, &ka);
+ dval(db) = b2d(b, &kb);
+#ifdef Pack_32
+ k = ka - kb + 32*(a->wds - b->wds);
+#else
+ k = ka - kb + 16*(a->wds - b->wds);
+#endif
+#ifdef IBM
+ if (k > 0) {
+ word0(da) += (k >> 2)*Exp_msk1;
+ if (k &= 3)
+ dval(da) *= 1 << k;
+ }
+ else {
+ k = -k;
+ word0(db) += (k >> 2)*Exp_msk1;
+ if (k &= 3)
+ dval(db) *= 1 << k;
+ }
+#else
+ if (k > 0)
+ word0(da) += k*Exp_msk1;
+ else {
+ k = -k;
+ word0(db) += k*Exp_msk1;
+ }
+#endif
+ return dval(da) / dval(db);
+ }
+
+ static CONST double
+tens[] = {
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22
+#ifdef VAX
+ , 1e23, 1e24
+#endif
+ };
+
+ static CONST double
+#ifdef IEEE_Arith
+bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
+#ifdef Avoid_Underflow
+ 9007199254740992.*9007199254740992.e-256
+ /* = 2^106 * 1e-53 */
+#else
+ 1e-256
+#endif
+ };
+/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
+#define Scale_Bit 0x10
+#define n_bigtens 5
+#else
+#ifdef IBM
+bigtens[] = { 1e16, 1e32, 1e64 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64 };
+#define n_bigtens 3
+#else
+bigtens[] = { 1e16, 1e32 };
+static CONST double tinytens[] = { 1e-16, 1e-32 };
+#define n_bigtens 2
+#endif
+#endif
+
+#ifndef IEEE_Arith
+#undef INFNAN_CHECK
+#endif
+
+#ifdef INFNAN_CHECK
+
+#ifndef NAN_WORD0
+#define NAN_WORD0 0x7ff80000
+#endif
+
+#ifndef NAN_WORD1
+#define NAN_WORD1 0
+#endif
+
+ static int
+match
+#ifdef KR_headers
+ (sp, t) char **sp, *t;
+#else
+ (CONST char **sp, char *t)
+#endif
+{
+ int c, d;
+ CONST char *s = *sp;
+
+ while(d = *t++) {
+ if ((c = *++s) >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+ if (c != d)
+ return 0;
+ }
+ *sp = s + 1;
+ return 1;
+ }
+
+#ifndef No_Hex_NaN
+ static void
+hexnan
+#ifdef KR_headers
+ (rvp, sp) double *rvp; CONST char **sp;
+#else
+ (double *rvp, CONST char **sp)
+#endif
+{
+ ULong c, x[2];
+ CONST char *s;
+ int havedig, udx0, xshift;
+
+ x[0] = x[1] = 0;
+ havedig = xshift = 0;
+ udx0 = 1;
+ s = *sp;
+ while(c = *(CONST unsigned char*)++s) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'f')
+ c += 10 - 'a';
+ else if (c >= 'A' && c <= 'F')
+ c += 10 - 'A';
+ else if (c <= ' ') {
+ if (udx0 && havedig) {
+ udx0 = 0;
+ xshift = 1;
+ }
+ continue;
+ }
+ else if (/*(*/ c == ')' && havedig) {
+ *sp = s + 1;
+ break;
+ }
+ else
+ return; /* invalid form: don't change *sp */
+ havedig = 1;
+ if (xshift) {
+ xshift = 0;
+ x[0] = x[1];
+ x[1] = 0;
+ }
+ if (udx0)
+ x[0] = (x[0] << 4) | (x[1] >> 28);
+ x[1] = (x[1] << 4) | c;
+ }
+ if ((x[0] &= 0xfffff) || x[1]) {
+ word0(*rvp) = Exp_mask | x[0];
+ word1(*rvp) = x[1];
+ }
+ }
+#endif /*No_Hex_NaN*/
+#endif /* INFNAN_CHECK */
+
+ double
+strtod
+#ifdef KR_headers
+ (s00, se) CONST char *s00; char **se;
+#else
+ (CONST char *s00, char **se)
+#endif
+{
+#ifdef Avoid_Underflow
+ int scale;
+#endif
+ int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
+ e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
+ CONST char *s, *s0, *s1;
+ double aadj, aadj1, adj, rv, rv0;
+ Long L;
+ ULong y, z;
+ Bigint *bb, *bb1, *bd, *bd0, *bs, *delta;
+#ifdef SET_INEXACT
+ int inexact, oldinexact;
+#endif
+#ifdef Honor_FLT_ROUNDS
+ int rounding;
+#endif
+#ifdef USE_LOCALE
+ CONST char *s2;
+#endif
+
+ sign = nz0 = nz = 0;
+ dval(rv) = 0.;
+ for(s = s00;;s++) switch(*s) {
+ case '-':
+ sign = 1;
+ /* no break */
+ case '+':
+ if (*++s)
+ goto break2;
+ /* no break */
+ case 0:
+ goto ret0;
+ case '\t':
+ case '\n':
+ case '\v':
+ case '\f':
+ case '\r':
+ case ' ':
+ continue;
+ default:
+ goto break2;
+ }
+ break2:
+ if (*s == '0') {
+ nz0 = 1;
+ while(*++s == '0') ;
+ if (!*s)
+ goto ret;
+ }
+ s0 = s;
+ y = z = 0;
+ for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+ if (nd < 9)
+ y = 10*y + c - '0';
+ else if (nd < 16)
+ z = 10*z + c - '0';
+ nd0 = nd;
+#ifdef USE_LOCALE
+ s1 = localeconv()->decimal_point;
+ if (c == *s1) {
+ c = '.';
+ if (*++s1) {
+ s2 = s;
+ for(;;) {
+ if (*++s2 != *s1) {
+ c = 0;
+ break;
+ }
+ if (!*++s1) {
+ s = s2;
+ break;
+ }
+ }
+ }
+ }
+#endif
+ if (c == '.') {
+ c = *++s;
+ if (!nd) {
+ for(; c == '0'; c = *++s)
+ nz++;
+ if (c > '0' && c <= '9') {
+ s0 = s;
+ nf += nz;
+ nz = 0;
+ goto have_dig;
+ }
+ goto dig_done;
+ }
+ for(; c >= '0' && c <= '9'; c = *++s) {
+ have_dig:
+ nz++;
+ if (c -= '0') {
+ nf += nz;
+ for(i = 1; i < nz; i++)
+ if (nd++ < 9)
+ y *= 10;
+ else if (nd <= DBL_DIG + 1)
+ z *= 10;
+ if (nd++ < 9)
+ y = 10*y + c;
+ else if (nd <= DBL_DIG + 1)
+ z = 10*z + c;
+ nz = 0;
+ }
+ }
+ }
+ dig_done:
+ e = 0;
+ if (c == 'e' || c == 'E') {
+ if (!nd && !nz && !nz0) {
+ goto ret0;
+ }
+ s00 = s;
+ esign = 0;
+ switch(c = *++s) {
+ case '-':
+ esign = 1;
+ case '+':
+ c = *++s;
+ }
+ if (c >= '0' && c <= '9') {
+ while(c == '0')
+ c = *++s;
+ if (c > '0' && c <= '9') {
+ L = c - '0';
+ s1 = s;
+ while((c = *++s) >= '0' && c <= '9')
+ L = 10*L + c - '0';
+ if (s - s1 > 8 || L > 19999)
+ /* Avoid confusion from exponents
+ * so large that e might overflow.
+ */
+ e = 19999; /* safe for 16 bit ints */
+ else
+ e = (int)L;
+ if (esign)
+ e = -e;
+ }
+ else
+ e = 0;
+ }
+ else
+ s = s00;
+ }
+ if (!nd) {
+ if (!nz && !nz0) {
+#ifdef INFNAN_CHECK
+ /* Check for Nan and Infinity */
+ switch(c) {
+ case 'i':
+ case 'I':
+ if (match(&s,"nf")) {
+ --s;
+ if (!match(&s,"inity"))
+ ++s;
+ word0(rv) = 0x7ff00000;
+ word1(rv) = 0;
+ goto ret;
+ }
+ break;
+ case 'n':
+ case 'N':
+ if (match(&s, "an")) {
+ word0(rv) = NAN_WORD0;
+ word1(rv) = NAN_WORD1;
+#ifndef No_Hex_NaN
+ if (*s == '(') /*)*/
+ hexnan(&rv, &s);
+#endif
+ goto ret;
+ }
+ }
+#endif /* INFNAN_CHECK */
+ ret0:
+ s = s00;
+ sign = 0;
+ }
+ goto ret;
+ }
+ e1 = e -= nf;
+
+ /* Now we have nd0 digits, starting at s0, followed by a
+ * decimal point, followed by nd-nd0 digits. The number we're
+ * after is the integer represented by those digits times
+ * 10**e */
+
+ if (!nd0)
+ nd0 = nd;
+ k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1;
+ dval(rv) = y;
+ if (k > 9) {
+#ifdef SET_INEXACT
+ if (k > DBL_DIG)
+ oldinexact = get_inexact();
+#endif
+ dval(rv) = tens[k - 9] * dval(rv) + z;
+ }
+ bd0 = 0;
+ if (nd <= DBL_DIG
+#ifndef RND_PRODQUOT
+#ifndef Honor_FLT_ROUNDS
+ && Flt_Rounds == 1
+#endif
+#endif
+ ) {
+ if (!e)
+ goto ret;
+ if (e > 0) {
+ if (e <= Ten_pmax) {
+#ifdef VAX
+ goto vax_ovfl_check;
+#else
+#ifdef Honor_FLT_ROUNDS
+ /* round correctly FLT_ROUNDS = 2 or 3 */
+ if (sign) {
+ rv = -rv;
+ sign = 0;
+ }
+#endif
+ /* rv = */ rounded_product(dval(rv), tens[e]);
+ goto ret;
+#endif
+ }
+ i = DBL_DIG - nd;
+ if (e <= Ten_pmax + i) {
+ /* A fancier test would sometimes let us do
+ * this for larger i values.
+ */
+#ifdef Honor_FLT_ROUNDS
+ /* round correctly FLT_ROUNDS = 2 or 3 */
+ if (sign) {
+ rv = -rv;
+ sign = 0;
+ }
+#endif
+ e -= i;
+ dval(rv) *= tens[i];
+#ifdef VAX
+ /* VAX exponent range is so narrow we must
+ * worry about overflow here...
+ */
+ vax_ovfl_check:
+ word0(rv) -= P*Exp_msk1;
+ /* rv = */ rounded_product(dval(rv), tens[e]);
+ if ((word0(rv) & Exp_mask)
+ > Exp_msk1*(DBL_MAX_EXP+Bias-1-P))
+ goto ovfl;
+ word0(rv) += P*Exp_msk1;
+#else
+ /* rv = */ rounded_product(dval(rv), tens[e]);
+#endif
+ goto ret;
+ }
+ }
+#ifndef Inaccurate_Divide
+ else if (e >= -Ten_pmax) {
+#ifdef Honor_FLT_ROUNDS
+ /* round correctly FLT_ROUNDS = 2 or 3 */
+ if (sign) {
+ rv = -rv;
+ sign = 0;
+ }
+#endif
+ /* rv = */ rounded_quotient(dval(rv), tens[-e]);
+ goto ret;
+ }
+#endif
+ }
+ e1 += nd - k;
+
+#ifdef IEEE_Arith
+#ifdef SET_INEXACT
+ inexact = 1;
+ if (k <= DBL_DIG)
+ oldinexact = get_inexact();
+#endif
+#ifdef Avoid_Underflow
+ scale = 0;
+#endif
+#ifdef Honor_FLT_ROUNDS
+ if ((rounding = Flt_Rounds) >= 2) {
+ if (sign)
+ rounding = rounding == 2 ? 0 : 2;
+ else
+ if (rounding != 2)
+ rounding = 0;
+ }
+#endif
+#endif /*IEEE_Arith*/
+
+ /* Get starting approximation = rv * 10**e1 */
+
+ if (e1 > 0) {
+ if ((i = e1 & 15))
+ dval(rv) *= tens[i];
+ if (e1 &= ~15) {
+ if (e1 > DBL_MAX_10_EXP) {
+ ovfl:
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
+ /* Can't trust HUGE_VAL */
+#ifdef IEEE_Arith
+#ifdef Honor_FLT_ROUNDS
+ switch(rounding) {
+ case 0: /* toward 0 */
+ case 3: /* toward -infinity */
+ word0(rv) = Big0;
+ word1(rv) = Big1;
+ break;
+ default:
+ word0(rv) = Exp_mask;
+ word1(rv) = 0;
+ }
+#else /*Honor_FLT_ROUNDS*/
+ word0(rv) = Exp_mask;
+ word1(rv) = 0;
+#endif /*Honor_FLT_ROUNDS*/
+#ifdef SET_INEXACT
+ /* set overflow bit */
+ dval(rv0) = 1e300;
+ dval(rv0) *= dval(rv0);
+#endif
+#else /*IEEE_Arith*/
+ word0(rv) = Big0;
+ word1(rv) = Big1;
+#endif /*IEEE_Arith*/
+ if (bd0)
+ goto retfree;
+ goto ret;
+ }
+ e1 >>= 4;
+ for(j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ dval(rv) *= bigtens[j];
+ /* The last multiplication could overflow. */
+ word0(rv) -= P*Exp_msk1;
+ dval(rv) *= bigtens[j];
+ if ((z = word0(rv) & Exp_mask)
+ > Exp_msk1*(DBL_MAX_EXP+Bias-P))
+ goto ovfl;
+ if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) {
+ /* set to largest number */
+ /* (Can't trust DBL_MAX) */
+ word0(rv) = Big0;
+ word1(rv) = Big1;
+ }
+ else
+ word0(rv) += P*Exp_msk1;
+ }
+ }
+ else if (e1 < 0) {
+ e1 = -e1;
+ if ((i = e1 & 15))
+ dval(rv) /= tens[i];
+ if (e1 >>= 4) {
+ if (e1 >= 1 << n_bigtens)
+ goto undfl;
+#ifdef Avoid_Underflow
+ if (e1 & Scale_Bit)
+ scale = 2*P;
+ for(j = 0; e1 > 0; j++, e1 >>= 1)
+ if (e1 & 1)
+ dval(rv) *= tinytens[j];
+ if (scale && (j = 2*P + 1 - ((word0(rv) & Exp_mask)
+ >> Exp_shift)) > 0) {
+ /* scaled rv is denormal; zap j low bits */
+ if (j >= 32) {
+ word1(rv) = 0;
+ if (j >= 53)
+ word0(rv) = (P+2)*Exp_msk1;
+ else
+ word0(rv) &= 0xffffffff << (j-32);
+ }
+ else
+ word1(rv) &= 0xffffffff << j;
+ }
+#else
+ for(j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ dval(rv) *= tinytens[j];
+ /* The last multiplication could underflow. */
+ dval(rv0) = dval(rv);
+ dval(rv) *= tinytens[j];
+ if (!dval(rv)) {
+ dval(rv) = 2.*dval(rv0);
+ dval(rv) *= tinytens[j];
+#endif
+ if (!dval(rv)) {
+ undfl:
+ dval(rv) = 0.;
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
+ if (bd0)
+ goto retfree;
+ goto ret;
+ }
+#ifndef Avoid_Underflow
+ word0(rv) = Tiny0;
+ word1(rv) = Tiny1;
+ /* The refinement below will clean
+ * this approximation up.
+ */
+ }
+#endif
+ }
+ }
+
+ /* Now the hard part -- adjusting rv to the correct value.*/
+
+ /* Put digits into bd: true value = bd * 10^e */
+
+ bd0 = s2b(s0, nd0, nd, y);
+
+ for(;;) {
+ bd = Balloc(bd0->k);
+ Bcopy(bd, bd0);
+ bb = d2b(dval(rv), &bbe, &bbbits); /* rv = bb * 2^bbe */
+ bs = i2b(1);
+
+ if (e >= 0) {
+ bb2 = bb5 = 0;
+ bd2 = bd5 = e;
+ }
+ else {
+ bb2 = bb5 = -e;
+ bd2 = bd5 = 0;
+ }
+ if (bbe >= 0)
+ bb2 += bbe;
+ else
+ bd2 -= bbe;
+ bs2 = bb2;
+#ifdef Honor_FLT_ROUNDS
+ if (rounding != 1)
+ bs2++;
+#endif
+#ifdef Avoid_Underflow
+ j = bbe - scale;
+ i = j + bbbits - 1; /* logb(rv) */
+ if (i < Emin) /* denormal */
+ j += P - Emin;
+ else
+ j = P + 1 - bbbits;
+#else /*Avoid_Underflow*/
+#ifdef Sudden_Underflow
+#ifdef IBM
+ j = 1 + 4*P - 3 - bbbits + ((bbe + bbbits - 1) & 3);
+#else
+ j = P + 1 - bbbits;
+#endif
+#else /*Sudden_Underflow*/
+ j = bbe;
+ i = j + bbbits - 1; /* logb(rv) */
+ if (i < Emin) /* denormal */
+ j += P - Emin;
+ else
+ j = P + 1 - bbbits;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+ bb2 += j;
+ bd2 += j;
+#ifdef Avoid_Underflow
+ bd2 += scale;
+#endif
+ i = bb2 < bd2 ? bb2 : bd2;
+ if (i > bs2)
+ i = bs2;
+ if (i > 0) {
+ bb2 -= i;
+ bd2 -= i;
+ bs2 -= i;
+ }
+ if (bb5 > 0) {
+ bs = pow5mult(bs, bb5);
+ bb1 = mult(bs, bb);
+ Bfree(bb);
+ bb = bb1;
+ }
+ if (bb2 > 0)
+ bb = lshift(bb, bb2);
+ if (bd5 > 0)
+ bd = pow5mult(bd, bd5);
+ if (bd2 > 0)
+ bd = lshift(bd, bd2);
+ if (bs2 > 0)
+ bs = lshift(bs, bs2);
+ delta = diff(bb, bd);
+ dsign = delta->sign;
+ delta->sign = 0;
+ i = cmp(delta, bs);
+#ifdef Honor_FLT_ROUNDS
+ if (rounding != 1) {
+ if (i < 0) {
+ /* Error is less than an ulp */
+ if (!delta->x[0] && delta->wds <= 1) {
+ /* exact */
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ break;
+ }
+ if (rounding) {
+ if (dsign) {
+ adj = 1.;
+ goto apply_adj;
+ }
+ }
+ else if (!dsign) {
+ adj = -1.;
+ if (!word1(rv)
+ && !(word0(rv) & Frac_mask)) {
+ y = word0(rv) & Exp_mask;
+#ifdef Avoid_Underflow
+ if (!scale || y > 2*P*Exp_msk1)
+#else
+ if (y)
+#endif
+ {
+ delta = lshift(delta,Log2P);
+ if (cmp(delta, bs) <= 0)
+ adj = -0.5;
+ }
+ }
+ apply_adj:
+#ifdef Avoid_Underflow
+ if (scale && (y = word0(rv) & Exp_mask)
+ <= 2*P*Exp_msk1)
+ word0(adj) += (2*P+1)*Exp_msk1 - y;
+#else
+#ifdef Sudden_Underflow
+ if ((word0(rv) & Exp_mask) <=
+ P*Exp_msk1) {
+ word0(rv) += P*Exp_msk1;
+ dval(rv) += adj*ulp(dval(rv));
+ word0(rv) -= P*Exp_msk1;
+ }
+ else
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+ dval(rv) += adj*ulp(dval(rv));
+ }
+ break;
+ }
+ adj = ratio(delta, bs);
+ if (adj < 1.)
+ adj = 1.;
+ if (adj <= 0x7ffffffe) {
+ /* adj = rounding ? ceil(adj) : floor(adj); */
+ y = adj;
+ if (y != adj) {
+ if (!((rounding>>1) ^ dsign))
+ y++;
+ adj = y;
+ }
+ }
+#ifdef Avoid_Underflow
+ if (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1)
+ word0(adj) += (2*P+1)*Exp_msk1 - y;
+#else
+#ifdef Sudden_Underflow
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
+ word0(rv) += P*Exp_msk1;
+ adj *= ulp(dval(rv));
+ if (dsign)
+ dval(rv) += adj;
+ else
+ dval(rv) -= adj;
+ word0(rv) -= P*Exp_msk1;
+ goto cont;
+ }
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+ adj *= ulp(dval(rv));
+ if (dsign)
+ dval(rv) += adj;
+ else
+ dval(rv) -= adj;
+ goto cont;
+ }
+#endif /*Honor_FLT_ROUNDS*/
+
+ if (i < 0) {
+ /* Error is less than half an ulp -- check for
+ * special case of mantissa a power of two.
+ */
+ if (dsign || word1(rv) || word0(rv) & Bndry_mask
+#ifdef IEEE_Arith
+#ifdef Avoid_Underflow
+ || (word0(rv) & Exp_mask) <= (2*P+1)*Exp_msk1
+#else
+ || (word0(rv) & Exp_mask) <= Exp_msk1
+#endif
+#endif
+ ) {
+#ifdef SET_INEXACT
+ if (!delta->x[0] && delta->wds <= 1)
+ inexact = 0;
+#endif
+ break;
+ }
+ if (!delta->x[0] && delta->wds <= 1) {
+ /* exact result */
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ break;
+ }
+ delta = lshift(delta,Log2P);
+ if (cmp(delta, bs) > 0)
+ goto drop_down;
+ break;
+ }
+ if (i == 0) {
+ /* exactly half-way between */
+ if (dsign) {
+ if ((word0(rv) & Bndry_mask1) == Bndry_mask1
+ && word1(rv) == (
+#ifdef Avoid_Underflow
+ (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1)
+ ? (0xffffffff & (0xffffffff << (2*P+1-(y>>Exp_shift)))) :
+#endif
+ 0xffffffff)) {
+ /*boundary case -- increment exponent*/
+ word0(rv) = (word0(rv) & Exp_mask)
+ + Exp_msk1
+#ifdef IBM
+ | Exp_msk1 >> 4
+#endif
+ ;
+ word1(rv) = 0;
+#ifdef Avoid_Underflow
+ dsign = 0;
+#endif
+ break;
+ }
+ }
+ else if (!(word0(rv) & Bndry_mask) && !word1(rv)) {
+ drop_down:
+ /* boundary case -- decrement exponent */
+#ifdef Sudden_Underflow /*{{*/
+ L = word0(rv) & Exp_mask;
+#ifdef IBM
+ if (L < Exp_msk1)
+#else
+#ifdef Avoid_Underflow
+ if (L <= (scale ? (2*P+1)*Exp_msk1 : Exp_msk1))
+#else
+ if (L <= Exp_msk1)
+#endif /*Avoid_Underflow*/
+#endif /*IBM*/
+ goto undfl;
+ L -= Exp_msk1;
+#else /*Sudden_Underflow}{*/
+#ifdef Avoid_Underflow
+ if (scale) {
+ L = word0(rv) & Exp_mask;
+ if (L <= (2*P+1)*Exp_msk1) {
+ if (L > (P+2)*Exp_msk1)
+ /* round even ==> */
+ /* accept rv */
+ break;
+ /* rv = smallest denormal */
+ goto undfl;
+ }
+ }
+#endif /*Avoid_Underflow*/
+ L = (word0(rv) & Exp_mask) - Exp_msk1;
+#endif /*Sudden_Underflow}}*/
+ word0(rv) = L | Bndry_mask1;
+ word1(rv) = 0xffffffff;
+#ifdef IBM
+ goto cont;
+#else
+ break;
+#endif
+ }
+#ifndef ROUND_BIASED
+ if (!(word1(rv) & LSB))
+ break;
+#endif
+ if (dsign)
+ dval(rv) += ulp(dval(rv));
+#ifndef ROUND_BIASED
+ else {
+ dval(rv) -= ulp(dval(rv));
+#ifndef Sudden_Underflow
+ if (!dval(rv))
+ goto undfl;
+#endif
+ }
+#ifdef Avoid_Underflow
+ dsign = 1 - dsign;
+#endif
+#endif
+ break;
+ }
+ if ((aadj = ratio(delta, bs)) <= 2.) {
+ if (dsign)
+ aadj = aadj1 = 1.;
+ else if (word1(rv) || word0(rv) & Bndry_mask) {
+#ifndef Sudden_Underflow
+ if (word1(rv) == Tiny1 && !word0(rv))
+ goto undfl;
+#endif
+ aadj = 1.;
+ aadj1 = -1.;
+ }
+ else {
+ /* special case -- power of FLT_RADIX to be */
+ /* rounded down... */
+
+ if (aadj < 2./FLT_RADIX)
+ aadj = 1./FLT_RADIX;
+ else
+ aadj *= 0.5;
+ aadj1 = -aadj;
+ }
+ }
+ else {
+ aadj *= 0.5;
+ aadj1 = dsign ? aadj : -aadj;
+#ifdef Check_FLT_ROUNDS
+ switch(Rounding) {
+ case 2: /* towards +infinity */
+ aadj1 -= 0.5;
+ break;
+ case 0: /* towards 0 */
+ case 3: /* towards -infinity */
+ aadj1 += 0.5;
+ }
+#else
+ if (Flt_Rounds == 0)
+ aadj1 += 0.5;
+#endif /*Check_FLT_ROUNDS*/
+ }
+ y = word0(rv) & Exp_mask;
+
+ /* Check for overflow */
+
+ if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
+ dval(rv0) = dval(rv);
+ word0(rv) -= P*Exp_msk1;
+ adj = aadj1 * ulp(dval(rv));
+ dval(rv) += adj;
+ if ((word0(rv) & Exp_mask) >=
+ Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
+ if (word0(rv0) == Big0 && word1(rv0) == Big1)
+ goto ovfl;
+ word0(rv) = Big0;
+ word1(rv) = Big1;
+ goto cont;
+ }
+ else
+ word0(rv) += P*Exp_msk1;
+ }
+ else {
+#ifdef Avoid_Underflow
+ if (scale && y <= 2*P*Exp_msk1) {
+ if (aadj <= 0x7fffffff) {
+ if ((z = aadj) <= 0)
+ z = 1;
+ aadj = z;
+ aadj1 = dsign ? aadj : -aadj;
+ }
+ word0(aadj1) += (2*P+1)*Exp_msk1 - y;
+ }
+ adj = aadj1 * ulp(dval(rv));
+ dval(rv) += adj;
+#else
+#ifdef Sudden_Underflow
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
+ dval(rv0) = dval(rv);
+ word0(rv) += P*Exp_msk1;
+ adj = aadj1 * ulp(dval(rv));
+ dval(rv) += adj;
+#ifdef IBM
+ if ((word0(rv) & Exp_mask) < P*Exp_msk1)
+#else
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1)
+#endif
+ {
+ if (word0(rv0) == Tiny0
+ && word1(rv0) == Tiny1)
+ goto undfl;
+ word0(rv) = Tiny0;
+ word1(rv) = Tiny1;
+ goto cont;
+ }
+ else
+ word0(rv) -= P*Exp_msk1;
+ }
+ else {
+ adj = aadj1 * ulp(dval(rv));
+ dval(rv) += adj;
+ }
+#else /*Sudden_Underflow*/
+ /* Compute adj so that the IEEE rounding rules will
+ * correctly round rv + adj in some half-way cases.
+ * If rv * ulp(rv) is denormalized (i.e.,
+ * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid
+ * trouble from bits lost to denormalization;
+ * example: 1.2e-307 .
+ */
+ if (y <= (P-1)*Exp_msk1 && aadj > 1.) {
+ aadj1 = (double)(int)(aadj + 0.5);
+ if (!dsign)
+ aadj1 = -aadj1;
+ }
+ adj = aadj1 * ulp(dval(rv));
+ dval(rv) += adj;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+ }
+ z = word0(rv) & Exp_mask;
+#ifndef SET_INEXACT
+#ifdef Avoid_Underflow
+ if (!scale)
+#endif
+ if (y == z) {
+ /* Can we stop now? */
+ L = (Long)aadj;
+ aadj -= L;
+ /* The tolerances below are conservative. */
+ if (dsign || word1(rv) || word0(rv) & Bndry_mask) {
+ if (aadj < .4999999 || aadj > .5000001)
+ break;
+ }
+ else if (aadj < .4999999/FLT_RADIX)
+ break;
+ }
+#endif
+ cont:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(delta);
+ }
+#ifdef SET_INEXACT
+ if (inexact) {
+ if (!oldinexact) {
+ word0(rv0) = Exp_1 + (70 << Exp_shift);
+ word1(rv0) = 0;
+ dval(rv0) += 1.;
+ }
+ }
+ else if (!oldinexact)
+ clear_inexact();
+#endif
+#ifdef Avoid_Underflow
+ if (scale) {
+ word0(rv0) = Exp_1 - 2*P*Exp_msk1;
+ word1(rv0) = 0;
+ dval(rv) *= dval(rv0);
+#ifndef NO_ERRNO
+ /* try to avoid the bug of testing an 8087 register value */
+ if (word0(rv) == 0 && word1(rv) == 0)
+ errno = ERANGE;
+#endif
+ }
+#endif /* Avoid_Underflow */
+#ifdef SET_INEXACT
+ if (inexact && !(word0(rv) & Exp_mask)) {
+ /* set underflow bit */
+ dval(rv0) = 1e-300;
+ dval(rv0) *= dval(rv0);
+ }
+#endif
+ retfree:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(bd0);
+ Bfree(delta);
+ ret:
+ if (se)
+ *se = (char *)s;
+ return sign ? -dval(rv) : dval(rv);
+ }
+
+ static int
+quorem
+#ifdef KR_headers
+ (b, S) Bigint *b, *S;
+#else
+ (Bigint *b, Bigint *S)
+#endif
+{
+ int n;
+ ULong *bx, *bxe, q, *sx, *sxe;
+#ifdef ULLong
+ ULLong borrow, carry, y, ys;
+#else
+ ULong borrow, carry, y, ys;
+#ifdef Pack_32
+ ULong si, z, zs;
+#endif
+#endif
+
+ n = S->wds;
+#ifdef DEBUG
+ /*debug*/ if (b->wds > n)
+ /*debug*/ Bug("oversize b in quorem");
+#endif
+ if (b->wds < n)
+ return 0;
+ sx = S->x;
+ sxe = sx + --n;
+ bx = b->x;
+ bxe = bx + n;
+ q = *bxe / (*sxe + 1); /* ensure q <= true quotient */
+#ifdef DEBUG
+ /*debug*/ if (q > 9)
+ /*debug*/ Bug("oversized quotient in quorem");
+#endif
+ if (q) {
+ borrow = 0;
+ carry = 0;
+ do {
+#ifdef ULLong
+ ys = *sx++ * (ULLong)q + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & FFFFFFFF) - borrow;
+ borrow = y >> 32 & (ULong)1;
+ *bx++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+ si = *sx++;
+ ys = (si & 0xffff) * q + carry;
+ zs = (si >> 16) * q + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#else
+ ys = *sx++ * q + carry;
+ carry = ys >> 16;
+ y = *bx - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *bx++ = y & 0xffff;
+#endif
+#endif
+ }
+ while(sx <= sxe);
+ if (!*bxe) {
+ bx = b->x;
+ while(--bxe > bx && !*bxe)
+ --n;
+ b->wds = n;
+ }
+ }
+ if (cmp(b, S) >= 0) {
+ q++;
+ borrow = 0;
+ carry = 0;
+ bx = b->x;
+ sx = S->x;
+ do {
+#ifdef ULLong
+ ys = *sx++ + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & FFFFFFFF) - borrow;
+ borrow = y >> 32 & (ULong)1;
+ *bx++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+ si = *sx++;
+ ys = (si & 0xffff) + carry;
+ zs = (si >> 16) + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#else
+ ys = *sx++ + carry;
+ carry = ys >> 16;
+ y = *bx - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *bx++ = y & 0xffff;
+#endif
+#endif
+ }
+ while(sx <= sxe);
+ bx = b->x;
+ bxe = bx + n;
+ if (!*bxe) {
+ while(--bxe > bx && !*bxe)
+ --n;
+ b->wds = n;
+ }
+ }
+ return q;
+ }
+
+#ifndef MULTIPLE_THREADS
+ static char *dtoa_result;
+#endif
+
+ static char *
+#ifdef KR_headers
+rv_alloc(i) int i;
+#else
+rv_alloc(int i)
+#endif
+{
+ int j, k, *r;
+
+ j = sizeof(ULong);
+ for(k = 0;
+ sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= i;
+ j <<= 1)
+ k++;
+ r = (int*)Balloc(k);
+ *r = k;
+ return
+#ifndef MULTIPLE_THREADS
+ dtoa_result =
+#endif
+ (char *)(r+1);
+ }
+
+ static char *
+#ifdef KR_headers
+nrv_alloc(s, rve, n) char *s, **rve; int n;
+#else
+nrv_alloc(const char *s, char **rve, int n)
+#endif
+{
+ char *rv, *t;
+
+ t = rv = rv_alloc(n);
+ while ((*t = *s++)) t++;
+ if (rve)
+ *rve = t;
+ return rv;
+ }
+
+/* freedtoa(s) must be used to free values s returned by dtoa
+ * when MULTIPLE_THREADS is #defined. It should be used in all cases,
+ * but for consistency with earlier versions of dtoa, it is optional
+ * when MULTIPLE_THREADS is not defined.
+ */
+
+ void
+#ifdef KR_headers
+freedtoa(s) char *s;
+#else
+freedtoa(char *s)
+#endif
+{
+ Bigint *b = (Bigint *)((int *)s - 1);
+ b->maxwds = 1 << (b->k = *(int*)b);
+ Bfree(b);
+#ifndef MULTIPLE_THREADS
+ if (s == dtoa_result)
+ dtoa_result = 0;
+#endif
+ }
+
+/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
+ *
+ * Inspired by "How to Print Floating-Point Numbers Accurately" by
+ * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 112-126].
+ *
+ * Modifications:
+ * 1. Rather than iterating, we use a simple numeric overestimate
+ * to determine k = floor(log10(d)). We scale relevant
+ * quantities using O(log2(k)) rather than O(k) multiplications.
+ * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
+ * try to generate digits strictly left to right. Instead, we
+ * compute with fewer bits and propagate the carry if necessary
+ * when rounding the final digit up. This is often faster.
+ * 3. Under the assumption that input will be rounded nearest,
+ * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
+ * That is, we allow equality in stopping tests when the
+ * round-nearest rule will give the same floating-point value
+ * as would satisfaction of the stopping test with strict
+ * inequality.
+ * 4. We remove common factors of powers of 2 from relevant
+ * quantities.
+ * 5. When converting floating-point integers less than 1e16,
+ * we use floating-point arithmetic rather than resorting
+ * to multiple-precision integers.
+ * 6. When asked to produce fewer than 15 digits, we first try
+ * to get by with floating-point arithmetic; we resort to
+ * multiple-precision integer arithmetic only if we cannot
+ * guarantee that the floating-point calculation has given
+ * the correctly rounded result. For k requested digits and
+ * "uniformly" distributed input, the probability is
+ * something like 10^(k-15) that we must resort to the Long
+ * calculation.
+ */
+
+ char *
+dtoa
+#ifdef KR_headers
+ (d, mode, ndigits, decpt, sign, rve)
+ double d; int mode, ndigits, *decpt, *sign; char **rve;
+#else
+ (double d, int mode, int ndigits, int *decpt, int *sign, char **rve)
+#endif
+{
+ /* Arguments ndigits, decpt, sign are similar to those
+ of ecvt and fcvt; trailing zeros are suppressed from
+ the returned string. If not null, *rve is set to point
+ to the end of the return value. If d is +-Infinity or NaN,
+ then *decpt is set to 9999.
+
+ mode:
+ 0 ==> shortest string that yields d when read in
+ and rounded to nearest.
+ 1 ==> like 0, but with Steele & White stopping rule;
+ e.g. with IEEE P754 arithmetic , mode 0 gives
+ 1e23 whereas mode 1 gives 9.999999999999999e22.
+ 2 ==> max(1,ndigits) significant digits. This gives a
+ return value similar to that of ecvt, except
+ that trailing zeros are suppressed.
+ 3 ==> through ndigits past the decimal point. This
+ gives a return value similar to that from fcvt,
+ except that trailing zeros are suppressed, and
+ ndigits can be negative.
+ 4,5 ==> similar to 2 and 3, respectively, but (in
+ round-nearest mode) with the tests of mode 0 to
+ possibly return a shorter string that rounds to d.
+ With IEEE arithmetic and compilation with
+ -DHonor_FLT_ROUNDS, modes 4 and 5 behave the same
+ as modes 2 and 3 when FLT_ROUNDS != 1.
+ 6-9 ==> Debugging modes similar to mode - 4: don't try
+ fast floating-point estimate (if applicable).
+
+ Values of mode other than 0-9 are treated as mode 0.
+
+ Sufficient space is allocated to the return value
+ to hold the suppressed trailing zeros.
+ */
+
+ int bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1,
+ j, j1, k, k0, k_check, leftright, m2, m5, s2, s5,
+ spec_case, try_quick, bias_round_up;
+ Long L;
+#ifndef Sudden_Underflow
+ int denorm;
+ ULong x;
+#endif
+ Bigint *b, *b1, *delta, *mlo, *mhi, *S;
+ double d2, ds, eps;
+ char *s, *s0;
+#ifdef Honor_FLT_ROUNDS
+ int rounding;
+#endif
+#ifdef SET_INEXACT
+ int inexact, oldinexact;
+#endif
+
+ /* In mode 2 and 3 we bias rounding up when there are ties. */
+ bias_round_up = mode == 2 || mode == 3;
+
+ ilim = ilim1 = 0; /* to avoid Google3 compiler warnings */
+
+#ifndef MULTIPLE_THREADS
+ if (dtoa_result) {
+ freedtoa(dtoa_result);
+ dtoa_result = 0;
+ }
+#endif
+
+ if (word0(d) & Sign_bit) {
+ /* set sign for everything, including 0's and NaNs */
+ *sign = 1;
+ word0(d) &= ~Sign_bit; /* clear sign bit */
+ }
+ else
+ *sign = 0;
+
+#if defined(IEEE_Arith) + defined(VAX)
+#ifdef IEEE_Arith
+ if ((word0(d) & Exp_mask) == Exp_mask)
+#else
+ if (word0(d) == 0x8000)
+#endif
+ {
+ /* Infinity or NaN */
+ *decpt = 9999;
+#ifdef IEEE_Arith
+ if (!word1(d) && !(word0(d) & 0xfffff))
+ return nrv_alloc("Infinity", rve, 8);
+#endif
+ return nrv_alloc("NaN", rve, 3);
+ }
+#endif
+#ifdef IBM
+ dval(d) += 0; /* normalize */
+#endif
+ if (!dval(d)) {
+ *decpt = 1;
+ return nrv_alloc("0", rve, 1);
+ }
+
+#ifdef SET_INEXACT
+ try_quick = oldinexact = get_inexact();
+ inexact = 1;
+#endif
+#ifdef Honor_FLT_ROUNDS
+ if ((rounding = Flt_Rounds) >= 2) {
+ if (*sign)
+ rounding = rounding == 2 ? 0 : 2;
+ else
+ if (rounding != 2)
+ rounding = 0;
+ }
+#endif
+
+ b = d2b(dval(d), &be, &bbits);
+#ifdef Sudden_Underflow
+ i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1));
+#else
+ if ((i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1)))) {
+#endif
+ dval(d2) = dval(d);
+ word0(d2) &= Frac_mask1;
+ word0(d2) |= Exp_11;
+#ifdef IBM
+ if (j = 11 - hi0bits(word0(d2) & Frac_mask))
+ dval(d2) /= 1 << j;
+#endif
+
+ /* log(x) ~=~ log(1.5) + (x-1.5)/1.5
+ * log10(x) = log(x) / log(10)
+ * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
+ * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
+ *
+ * This suggests computing an approximation k to log10(d) by
+ *
+ * k = (i - Bias)*0.301029995663981
+ * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
+ *
+ * We want k to be too large rather than too small.
+ * The error in the first-order Taylor series approximation
+ * is in our favor, so we just round up the constant enough
+ * to compensate for any error in the multiplication of
+ * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
+ * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
+ * adding 1e-13 to the constant term more than suffices.
+ * Hence we adjust the constant term to 0.1760912590558.
+ * (We could get a more accurate k by invoking log10,
+ * but this is probably not worthwhile.)
+ */
+
+ i -= Bias;
+#ifdef IBM
+ i <<= 2;
+ i += j;
+#endif
+#ifndef Sudden_Underflow
+ denorm = 0;
+ }
+ else {
+ /* d is denormalized */
+
+ i = bbits + be + (Bias + (P-1) - 1);
+ x = i > 32 ? (word0(d) << (64 - i)) | (word1(d) >> (i - 32))
+ : word1(d) << (32 - i);
+ dval(d2) = x;
+ word0(d2) -= 31*Exp_msk1; /* adjust exponent */
+ i -= (Bias + (P-1) - 1) + 1;
+ denorm = 1;
+ }
+#endif
+ ds = (dval(d2)-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981;
+ k = (int)ds;
+ if (ds < 0. && ds != k)
+ k--; /* want k = floor(ds) */
+ k_check = 1;
+ if (k >= 0 && k <= Ten_pmax) {
+ if (dval(d) < tens[k])
+ k--;
+ k_check = 0;
+ }
+ j = bbits - i - 1;
+ if (j >= 0) {
+ b2 = 0;
+ s2 = j;
+ }
+ else {
+ b2 = -j;
+ s2 = 0;
+ }
+ if (k >= 0) {
+ b5 = 0;
+ s5 = k;
+ s2 += k;
+ }
+ else {
+ b2 -= k;
+ b5 = -k;
+ s5 = 0;
+ }
+ if (mode < 0 || mode > 9)
+ mode = 0;
+
+#ifndef SET_INEXACT
+#ifdef Check_FLT_ROUNDS
+ try_quick = Rounding == 1;
+#else
+ try_quick = 1;
+#endif
+#endif /*SET_INEXACT*/
+
+ if (mode > 5) {
+ mode -= 4;
+ try_quick = 0;
+ }
+ leftright = 1;
+ switch(mode) {
+ case 0:
+ case 1:
+ ilim = ilim1 = -1;
+ i = 18;
+ ndigits = 0;
+ break;
+ case 2:
+ leftright = 0;
+ /* no break */
+ case 4:
+ if (ndigits <= 0)
+ ndigits = 1;
+ ilim = ilim1 = i = ndigits;
+ break;
+ case 3:
+ leftright = 0;
+ /* no break */
+ case 5:
+ i = ndigits + k + 1;
+ ilim = i;
+ ilim1 = i - 1;
+ if (i <= 0)
+ i = 1;
+ }
+ s = s0 = rv_alloc(i);
+
+#ifdef Honor_FLT_ROUNDS
+ if (mode > 1 && rounding != 1)
+ leftright = 0;
+#endif
+
+ if (ilim >= 0 && ilim <= Quick_max && try_quick) {
+
+ /* Try to get by with floating-point arithmetic. */
+
+ i = 0;
+ dval(d2) = dval(d);
+ k0 = k;
+ ilim0 = ilim;
+ ieps = 2; /* conservative */
+ if (k > 0) {
+ ds = tens[k&0xf];
+ j = k >> 4;
+ if (j & Bletch) {
+ /* prevent overflows */
+ j &= Bletch - 1;
+ dval(d) /= bigtens[n_bigtens-1];
+ ieps++;
+ }
+ for(; j; j >>= 1, i++)
+ if (j & 1) {
+ ieps++;
+ ds *= bigtens[i];
+ }
+ dval(d) /= ds;
+ }
+ else if ((j1 = -k)) {
+ dval(d) *= tens[j1 & 0xf];
+ for(j = j1 >> 4; j; j >>= 1, i++)
+ if (j & 1) {
+ ieps++;
+ dval(d) *= bigtens[i];
+ }
+ }
+ if (k_check && dval(d) < 1. && ilim > 0) {
+ if (ilim1 <= 0)
+ goto fast_failed;
+ ilim = ilim1;
+ k--;
+ dval(d) *= 10.;
+ ieps++;
+ }
+ dval(eps) = ieps*dval(d) + 7.;
+ word0(eps) -= (P-1)*Exp_msk1;
+ if (ilim == 0) {
+ S = mhi = 0;
+ dval(d) -= 5.;
+ if (dval(d) > dval(eps))
+ goto one_digit;
+ if (dval(d) < -dval(eps))
+ goto no_digits;
+ goto fast_failed;
+ }
+#ifndef No_leftright
+ if (leftright) {
+ /* Use Steele & White method of only
+ * generating digits needed.
+ */
+ dval(eps) = 0.5/tens[ilim-1] - dval(eps);
+ for(i = 0;;) {
+ L = dval(d);
+ dval(d) -= L;
+ *s++ = '0' + (int)L;
+ if (dval(d) < dval(eps))
+ goto ret1;
+ if (1. - dval(d) < dval(eps))
+ goto bump_up;
+ if (++i >= ilim)
+ break;
+ dval(eps) *= 10.;
+ dval(d) *= 10.;
+ }
+ }
+ else {
+#endif
+ /* Generate ilim digits, then fix them up. */
+ dval(eps) *= tens[ilim-1];
+ for(i = 1;; i++, dval(d) *= 10.) {
+ L = (Long)(dval(d));
+ if (!(dval(d) -= L))
+ ilim = i;
+ *s++ = '0' + (int)L;
+ if (i == ilim) {
+ if (dval(d) > 0.5 + dval(eps))
+ goto bump_up;
+ else if (dval(d) < 0.5 - dval(eps)) {
+ while(*--s == '0');
+ s++;
+ goto ret1;
+ }
+ break;
+ }
+ }
+#ifndef No_leftright
+ }
+#endif
+ fast_failed:
+ s = s0;
+ dval(d) = dval(d2);
+ k = k0;
+ ilim = ilim0;
+ }
+
+ /* Do we have a "small" integer? */
+
+ if (be >= 0 && k <= Int_max) {
+ /* Yes. */
+ ds = tens[k];
+ if (ndigits < 0 && ilim <= 0) {
+ S = mhi = 0;
+ if (ilim < 0 || dval(d) < 5*ds || ((dval(d) == 5*ds) && !bias_round_up))
+ goto no_digits;
+ goto one_digit;
+ }
+
+ /* Limit looping by the number of digits to produce.
+ * Firefox had a crash bug because some plugins reduce
+ * the precision of double arithmetic. With reduced
+ * precision "dval(d) -= L*ds" might be imprecise and
+ * d might not become zero and the loop might not
+ * terminate.
+ *
+ * See https://bugzilla.mozilla.org/show_bug.cgi?id=358569
+ */
+ for(i = 1; i <= k+1; i++, dval(d) *= 10.) {
+ L = (Long)(dval(d) / ds);
+ dval(d) -= L*ds;
+#ifdef Check_FLT_ROUNDS
+ /* If FLT_ROUNDS == 2, L will usually be high by 1 */
+ if (dval(d) < 0) {
+ L--;
+ dval(d) += ds;
+ }
+#endif
+ *s++ = '0' + (int)L;
+ if (!dval(d)) {
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ break;
+ }
+ if (i == ilim) {
+#ifdef Honor_FLT_ROUNDS
+ if (mode > 1)
+ switch(rounding) {
+ case 0: goto ret1;
+ case 2: goto bump_up;
+ }
+#endif
+ dval(d) += dval(d);
+ if (dval(d) > ds || (dval(d) == ds && ((L & 1) || bias_round_up))) {
+ bump_up:
+ while(*--s == '9')
+ if (s == s0) {
+ k++;
+ *s = '0';
+ break;
+ }
+ ++*s++;
+ }
+ break;
+ }
+ }
+ goto ret1;
+ }
+
+ m2 = b2;
+ m5 = b5;
+ mhi = mlo = 0;
+ if (leftright) {
+ i =
+#ifndef Sudden_Underflow
+ denorm ? be + (Bias + (P-1) - 1 + 1) :
+#endif
+#ifdef IBM
+ 1 + 4*P - 3 - bbits + ((bbits + be - 1) & 3);
+#else
+ 1 + P - bbits;
+#endif
+ b2 += i;
+ s2 += i;
+ mhi = i2b(1);
+ }
+ if (m2 > 0 && s2 > 0) {
+ i = m2 < s2 ? m2 : s2;
+ b2 -= i;
+ m2 -= i;
+ s2 -= i;
+ }
+ if (b5 > 0) {
+ if (leftright) {
+ if (m5 > 0) {
+ mhi = pow5mult(mhi, m5);
+ b1 = mult(mhi, b);
+ Bfree(b);
+ b = b1;
+ }
+ if ((j = b5 - m5))
+ b = pow5mult(b, j);
+ }
+ else
+ b = pow5mult(b, b5);
+ }
+ S = i2b(1);
+ if (s5 > 0)
+ S = pow5mult(S, s5);
+
+ /* Check for special case that d is a normalized power of 2. */
+
+ spec_case = 0;
+ if ((mode < 2 || leftright)
+#ifdef Honor_FLT_ROUNDS
+ && rounding == 1
+#endif
+ ) {
+ if (!word1(d) && !(word0(d) & Bndry_mask)
+#ifndef Sudden_Underflow
+ && word0(d) & (Exp_mask & ~Exp_msk1)
+#endif
+ ) {
+ /* The special case */
+ b2 += Log2P;
+ s2 += Log2P;
+ spec_case = 1;
+ }
+ }
+
+ /* Arrange for convenient computation of quotients:
+ * shift left if necessary so divisor has 4 leading 0 bits.
+ *
+ * Perhaps we should just compute leading 28 bits of S once
+ * and for all and pass them and a shift to quorem, so it
+ * can do shifts and ors to compute the numerator for q.
+ */
+#ifdef Pack_32
+ if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f))
+ i = 32 - i;
+#else
+ if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0xf))
+ i = 16 - i;
+#endif
+ if (i > 4) {
+ i -= 4;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ else if (i < 4) {
+ i += 28;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ if (b2 > 0)
+ b = lshift(b, b2);
+ if (s2 > 0)
+ S = lshift(S, s2);
+ if (k_check) {
+ if (cmp(b,S) < 0) {
+ k--;
+ b = multadd(b, 10, 0); /* we botched the k estimate */
+ if (leftright)
+ mhi = multadd(mhi, 10, 0);
+ ilim = ilim1;
+ }
+ }
+ if (ilim <= 0 && (mode == 3 || mode == 5)) {
+ S = multadd(S, 5, 0);
+ if (ilim < 0 || cmp(b, S) < 0 || ((cmp(b, S) == 0) && !bias_round_up)) {
+ /* no digits, fcvt style */
+ no_digits:
+ k = -1 - ndigits;
+ goto ret;
+ }
+ one_digit:
+ *s++ = '1';
+ k++;
+ goto ret;
+ }
+ if (leftright) {
+ if (m2 > 0)
+ mhi = lshift(mhi, m2);
+
+ /* Compute mlo -- check for special case
+ * that d is a normalized power of 2.
+ */
+
+ mlo = mhi;
+ if (spec_case) {
+ mhi = Balloc(mhi->k);
+ Bcopy(mhi, mlo);
+ mhi = lshift(mhi, Log2P);
+ }
+
+ for(i = 1;;i++) {
+ dig = quorem(b,S) + '0';
+ /* Do we yet have the shortest decimal string
+ * that will round to d?
+ */
+ j = cmp(b, mlo);
+ delta = diff(S, mhi);
+ j1 = delta->sign ? 1 : cmp(b, delta);
+ Bfree(delta);
+#ifndef ROUND_BIASED
+ if (j1 == 0 && mode != 1 && !(word1(d) & 1)
+#ifdef Honor_FLT_ROUNDS
+ && rounding >= 1
+#endif
+ ) {
+ if (dig == '9')
+ goto round_9_up;
+ if (j > 0)
+ dig++;
+#ifdef SET_INEXACT
+ else if (!b->x[0] && b->wds <= 1)
+ inexact = 0;
+#endif
+ *s++ = dig;
+ goto ret;
+ }
+#endif
+ if (j < 0 || (j == 0 && mode != 1
+#ifndef ROUND_BIASED
+ && !(word1(d) & 1)
+#endif
+ )) {
+ if (!b->x[0] && b->wds <= 1) {
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ goto accept_dig;
+ }
+#ifdef Honor_FLT_ROUNDS
+ if (mode > 1)
+ switch(rounding) {
+ case 0: goto accept_dig;
+ case 2: goto keep_dig;
+ }
+#endif /*Honor_FLT_ROUNDS*/
+ if (j1 > 0) {
+ b = lshift(b, 1);
+ j1 = cmp(b, S);
+ if ((j1 > 0 || (j1 == 0 && ((dig & 1) || bias_round_up)))
+ && dig++ == '9')
+ goto round_9_up;
+ }
+ accept_dig:
+ *s++ = dig;
+ goto ret;
+ }
+ if (j1 > 0) {
+#ifdef Honor_FLT_ROUNDS
+ if (!rounding)
+ goto accept_dig;
+#endif
+ if (dig == '9') { /* possible if i == 1 */
+ round_9_up:
+ *s++ = '9';
+ goto roundoff;
+ }
+ *s++ = dig + 1;
+ goto ret;
+ }
+#ifdef Honor_FLT_ROUNDS
+ keep_dig:
+#endif
+ *s++ = dig;
+ if (i == ilim)
+ break;
+ b = multadd(b, 10, 0);
+ if (mlo == mhi)
+ mlo = mhi = multadd(mhi, 10, 0);
+ else {
+ mlo = multadd(mlo, 10, 0);
+ mhi = multadd(mhi, 10, 0);
+ }
+ }
+ }
+ else
+ for(i = 1;; i++) {
+ *s++ = dig = quorem(b,S) + '0';
+ if (!b->x[0] && b->wds <= 1) {
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ goto ret;
+ }
+ if (i >= ilim)
+ break;
+ b = multadd(b, 10, 0);
+ }
+
+ /* Round off last digit */
+
+#ifdef Honor_FLT_ROUNDS
+ switch(rounding) {
+ case 0: goto trimzeros;
+ case 2: goto roundoff;
+ }
+#endif
+ b = lshift(b, 1);
+ j = cmp(b, S);
+ if (j > 0 || (j == 0 && ((dig & 1) || bias_round_up))) {
+ roundoff:
+ while(*--s == '9')
+ if (s == s0) {
+ k++;
+ *s++ = '1';
+ goto ret;
+ }
+ ++*s++;
+ }
+ else {
+/* trimzeros: (never used) */
+ while(*--s == '0');
+ s++;
+ }
+ ret:
+ Bfree(S);
+ if (mhi) {
+ if (mlo && mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ ret1:
+#ifdef SET_INEXACT
+ if (inexact) {
+ if (!oldinexact) {
+ word0(d) = Exp_1 + (70 << Exp_shift);
+ word1(d) = 0;
+ dval(d) += 1.;
+ }
+ }
+ else if (!oldinexact)
+ clear_inexact();
+#endif
+ Bfree(b);
+ *s = 0;
+ *decpt = k + 1;
+ if (rve)
+ *rve = s;
+ return s0;
+ }
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_ASCIICType_h
+#define WTF_ASCIICType_h
+
+// The behavior of many of the functions in the <ctype.h> header is dependent
+// on the current locale. But in the WebKit project, all uses of those functions
+// are in code processing something that's not locale-specific. These equivalents
+// for some of the <ctype.h> functions are named more explicitly, not dependent
+// on the C library locale, and we should also optimize them as needed.
+
+// All functions return false or leave the character unchanged if passed a character
+// that is outside the range 0-7F. So they can be used on Unicode strings or
+// characters if the intent is to do processing only if the character is ASCII.
+
+ inline bool isASCIIAlpha(char c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+ inline bool isASCIIAlpha(unsigned short c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+ inline bool isASCIIAlpha(int c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+
+ inline bool isASCIIAlphanumeric(char c) { return c >= '0' && c <= '9' || (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+ inline bool isASCIIAlphanumeric(unsigned short c) { return c >= '0' && c <= '9' || (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+ inline bool isASCIIAlphanumeric(int c) { return c >= '0' && c <= '9' || (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+
+ inline bool isASCIIDigit(char c) { return (c >= '0') & (c <= '9'); }
+ inline bool isASCIIDigit(unsigned short c) { return (c >= '0') & (c <= '9'); }
+ inline bool isASCIIDigit(int c) { return (c >= '0') & (c <= '9'); }
+
+ inline bool isASCIIHexDigit(char c) { return c >= '0' && c <= '9' || (c | 0x20) >= 'a' && (c | 0x20) <= 'f'; }
+ inline bool isASCIIHexDigit(unsigned short c) { return c >= '0' && c <= '9' || (c | 0x20) >= 'a' && (c | 0x20) <= 'f'; }
+ inline bool isASCIIHexDigit(int c) { return c >= '0' && c <= '9' || (c | 0x20) >= 'a' && (c | 0x20) <= 'f'; }
+
+ inline bool isASCIILower(char c) { return c >= 'a' && c <= 'z'; }
+ inline bool isASCIILower(unsigned short c) { return c >= 'a' && c <= 'z'; }
+ inline bool isASCIILower(int c) { return c >= 'a' && c <= 'z'; }
+
+ inline bool isASCIIUpper(char c) { return c >= 'A' && c <= 'Z'; }
+ inline bool isASCIIUpper(unsigned short c) { return c >= 'A' && c <= 'Z'; }
+ inline bool isASCIIUpper(int c) { return c >= 'A' && c <= 'Z'; }
+
+ /*
+ Statistics from a run of Apple's page load test for callers of isASCIISpace:
+
+ character count
+ --------- -----
+ non-spaces 689383
+ 20 space 294720
+ 0A \n 89059
+ 09 \t 28320
+ 0D \r 0
+ 0C \f 0
+ 0B \v 0
+ */
+ inline bool isASCIISpace(char c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+ inline bool isASCIISpace(unsigned short c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+ inline bool isASCIISpace(int c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+
+ inline char toASCIILower(char c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+ inline unsigned short toASCIILower(unsigned short c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+ inline int toASCIILower(int c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+
+ inline char toASCIIUpper(char c) { return static_cast<char>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+ inline unsigned short toASCIIUpper(unsigned short c) { return static_cast<unsigned short>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+ inline int toASCIIUpper(int c) { return static_cast<int>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+
+ inline int toASCIIHexValue(char c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+ inline int toASCIIHexValue(unsigned short c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+ inline int toASCIIHexValue(int c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+
+#endif
--- /dev/null
+Originally written by: Philip Hazel
+Email local part: ph10
+Email domain: cam.ac.uk
+
+University of Cambridge Computing Service,
+Cambridge, England. Phone: +44 1223 334714.
+
+Copyright (c) 1997-2005 University of Cambridge. All rights reserved.
+
+Adapted for JavaScriptCore and WebKit by Apple Inc.
+
+Copyright (c) 2005, 2006, 2007 Apple Inc. All rights reserved.
--- /dev/null
+PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed.
+
+Copyright (c) 1997-2005 University of Cambridge. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the name of Apple
+ Inc. nor the names of their contributors may be used to endorse or
+ promote products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+-----------------------------------------------------------------------------
+The following license text is extracted from the header of the file
+ASCIICType.h and applies only to that file.
+-----------------------------------------------------------------------------
+
+Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------------------------------------------------------------------
+The following license text is from the file COPYING and applies to the other
+source files in this directory.
+-----------------------------------------------------------------------------
+
+PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed.
+
+Copyright (c) 1997-2005 University of Cambridge. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the name of Apple
+ Inc. nor the names of their contributors may be used to endorse or
+ promote products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+-----------------------------------------------------------------------------
+The following copyright lines are found in individual files other than
+ASCIICType.h
+-----------------------------------------------------------------------------
+
+
+Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+Copyright (C) 2002, 2004, 2006, 2007, 2008 Apple Inc. All rights reserved.
+Copyright (C) 2007 Eric Seidel <eric@webkit.org>
+Copyright (c) 1997-2005 University of Cambridge
+Copyright (c) 1997-2005 University of Cambridge. All rights reserved.
+Copyright (c) 1997-2006 University of Cambridge
+Copyright (c) 2005, 2006, 2007 Apple Inc. All rights reserved.
--- /dev/null
+
+/* On Unix-like systems config.in is converted by "configure" into config.h.
+Some other environments also support the use of "configure". PCRE is written in
+Standard C, but there are a few non-standard things it can cope with, allowing
+it to run on SunOS4 and other "close to standard" systems.
+
+On a non-Unix-like system you should just copy this file into config.h, and set
+up the macros the way you need them. You should normally change the definitions
+of HAVE_STRERROR and HAVE_MEMMOVE to 1. Unfortunately, because of the way
+autoconf works, these cannot be made the defaults. If your system has bcopy()
+and not memmove(), change the definition of HAVE_BCOPY instead of HAVE_MEMMOVE.
+If your system has neither bcopy() nor memmove(), leave them both as 0; an
+emulation function will be used. */
+
+/* If you are compiling for a system that uses EBCDIC instead of ASCII
+character codes, define this macro as 1. On systems that can use "configure",
+this can be done via --enable-ebcdic. */
+
+#ifndef EBCDIC
+#define EBCDIC 0
+#endif
+
+/* If you are compiling for a system other than a Unix-like system or Win32,
+and it needs some magic to be inserted before the definition of a function that
+is exported by the library, define this macro to contain the relevant magic. If
+you do not define this macro, it defaults to "extern" for a C compiler and
+"extern C" for a C++ compiler on non-Win32 systems. This macro apears at the
+start of every exported function that is part of the external API. It does not
+appear on functions that are "external" in the C sense, but which are internal
+to the library. */
+
+/* #define PCRE_DATA_SCOPE */
+
+/* Define the following macro to empty if the "const" keyword does not work. */
+
+#undef const
+
+/* Define the following macro to "unsigned" if <stddef.h> does not define
+size_t. */
+
+#undef size_t
+
+/* The following two definitions are mainly for the benefit of SunOS4, which
+does not have the strerror() or memmove() functions that should be present in
+all Standard C libraries. The macros HAVE_STRERROR and HAVE_MEMMOVE should
+normally be defined with the value 1 for other systems, but unfortunately we
+cannot make this the default because "configure" files generated by autoconf
+will only change 0 to 1; they won't change 1 to 0 if the functions are not
+found. */
+
+#define HAVE_STRERROR 1
+#define HAVE_MEMMOVE 1
+
+/* There are some non-Unix-like systems that don't even have bcopy(). If this
+macro is false, an emulation is used. If HAVE_MEMMOVE is set to 1, the value of
+HAVE_BCOPY is not relevant. */
+
+#define HAVE_BCOPY 0
+
+/* The value of NEWLINE determines the newline character. The default is to
+leave it up to the compiler, but some sites want to force a particular value.
+On Unix-like systems, "configure" can be used to override this default. */
+
+#ifndef NEWLINE
+#define NEWLINE '\n'
+#endif
+
+/* The value of LINK_SIZE determines the number of bytes used to store links as
+offsets within the compiled regex. The default is 2, which allows for compiled
+patterns up to 64K long. This covers the vast majority of cases. However, PCRE
+can also be compiled to use 3 or 4 bytes instead. This allows for longer
+patterns in extreme cases. On systems that support it, "configure" can be used
+to override this default. */
+
+#ifndef LINK_SIZE
+#define LINK_SIZE 2
+#endif
+
+/* When calling PCRE via the POSIX interface, additional working storage is
+required for holding the pointers to capturing substrings because PCRE requires
+three integers per substring, whereas the POSIX interface provides only two. If
+the number of expected substrings is small, the wrapper function uses space on
+the stack, because this is faster than using malloc() for each call. The
+threshold above which the stack is no longer used is defined by POSIX_MALLOC_
+THRESHOLD. On systems that support it, "configure" can be used to override this
+default. */
+
+#ifndef POSIX_MALLOC_THRESHOLD
+#define POSIX_MALLOC_THRESHOLD 10
+#endif
+
+/* PCRE uses recursive function calls to handle backtracking while matching.
+This can sometimes be a problem on systems that have stacks of limited size.
+Define NO_RECURSE to get a version that doesn't use recursion in the match()
+function; instead it creates its own stack by steam using pcre_recurse_malloc()
+to obtain memory from the heap. For more detail, see the comments and other
+stuff just above the match() function. On systems that support it, "configure"
+can be used to set this in the Makefile (use --disable-stack-for-recursion). */
+
+/* #define NO_RECURSE */
+
+/* The value of MATCH_LIMIT determines the default number of times the internal
+match() function can be called during a single execution of pcre_exec(). There
+is a runtime interface for setting a different limit. The limit exists in order
+to catch runaway regular expressions that take for ever to determine that they
+do not match. The default is set very large so that it does not accidentally
+catch legitimate cases. On systems that support it, "configure" can be used to
+override this default default. */
+
+#ifndef MATCH_LIMIT
+#define MATCH_LIMIT 10000000
+#endif
+
+/* The above limit applies to all calls of match(), whether or not they
+increase the recursion depth. In some environments it is desirable to limit the
+depth of recursive calls of match() more strictly, in order to restrict the
+maximum amount of stack (or heap, if NO_RECURSE is defined) that is used. The
+value of MATCH_LIMIT_RECURSION applies only to recursive calls of match(). To
+have any useful effect, it must be less than the value of MATCH_LIMIT. There is
+a runtime method for setting a different limit. On systems that support it,
+"configure" can be used to override this default default. */
+
+#ifndef MATCH_LIMIT_RECURSION
+#define MATCH_LIMIT_RECURSION MATCH_LIMIT
+#endif
+
+/* These three limits are parameterized just in case anybody ever wants to
+change them. Care must be taken if they are increased, because they guard
+against integer overflow caused by enormously large patterns. */
+
+#ifndef MAX_NAME_SIZE
+#define MAX_NAME_SIZE 32
+#endif
+
+#ifndef MAX_NAME_COUNT
+#define MAX_NAME_COUNT 10000
+#endif
+
+#ifndef MAX_DUPLENGTH
+#define MAX_DUPLENGTH 30000
+#endif
+
+/* End */
--- /dev/null
+/* This is the public header file for JavaScriptCore's variant of the PCRE
+library. While this library started out as a copy of PCRE, many of the
+features of PCRE have been removed. This library now supports only the
+regular expression features required by the JavaScript language
+specification, and has only the functions needed by JavaScriptCore and the
+rest of WebKit.
+
+ Copyright (c) 1997-2005 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+// FIXME: This file needs to be renamed to JSRegExp.h; it's no longer PCRE.
+
+#ifndef JSRegExp_h
+#define JSRegExp_h
+
+#include "../../../public/v8.h"
+
+typedef uint16_t UChar;
+
+struct JSRegExp;
+
+enum JSRegExpIgnoreCaseOption { JSRegExpDoNotIgnoreCase, JSRegExpIgnoreCase };
+enum JSRegExpMultilineOption { JSRegExpSingleLine, JSRegExpMultiline };
+
+/* jsRegExpExecute error codes */
+const int JSRegExpErrorNoMatch = -1;
+const int JSRegExpErrorHitLimit = -2;
+const int JSRegExpErrorNoMemory = -3;
+const int JSRegExpErrorInternal = -4;
+
+typedef void* malloc_t(size_t size);
+typedef void free_t(void* address);
+
+JSRegExp* jsRegExpCompile(const UChar* pattern, int patternLength,
+ JSRegExpIgnoreCaseOption, JSRegExpMultilineOption,
+ unsigned* numSubpatterns, const char** errorMessage,
+ malloc_t* allocate_function, free_t* free_function);
+
+int jsRegExpExecute(const JSRegExp*,
+ const UChar* subject, int subjectLength, int startOffset,
+ int* offsetsVector, int offsetsVectorLength);
+
+void jsRegExpFree(JSRegExp*);
+
+#endif
--- /dev/null
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* This file is automatically written by the dftables auxiliary
+program. If you edit it by hand, you might like to edit the Makefile to
+prevent its ever being regenerated.
+
+This file contains the default tables for characters with codes less than
+128 (ASCII characters). These tables are used when no external tables are
+passed to PCRE. */
+
+const unsigned char kjs_pcre_default_tables[480] = {
+
+/* This table is a lower casing table. */
+
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+
+/* This table is a case flipping table. */
+
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+
+/* This table contains bit maps for various character classes.
+Each map is 32 bytes long and the bits run from the least
+significant end of each byte. The classes are: space, digit, word. */
+
+ 0x00, 0x3E, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03,
+ 0xFE, 0xFF, 0xFF, 0x87, 0xFE, 0xFF, 0xFF, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+/* This table identifies various classes of character by individual bits:
+ 0x01 white space character
+ 0x08 hexadecimal digit
+ 0x10 alphanumeric or '_'
+*/
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0- 7 */
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 8- 15 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 16- 23 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24- 31 */
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* - ' */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ( - / */
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, /* 0 - 7 */
+ 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 8 - ? */
+ 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x10, /* @ - G */
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, /* H - O */
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, /* P - W */
+ 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10, /* X - _ */
+ 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x10, /* ` - g */
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, /* h - o */
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, /* p - w */
+ 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00}; /* x -127 */
+
+
+/* End of chartables.c */
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+ Copyright (C) 2007 Eric Seidel <eric@webkit.org>
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This module contains the external function jsRegExpExecute(), along with
+supporting internal functions that are not used by other modules. */
+
+#include "config.h"
+
+#include "pcre_internal.h"
+
+#include <string.h>
+#include "ASCIICType.h"
+
+/* Negative values for the firstchar and reqchar variables */
+
+#define REQ_UNSET (-2)
+#define REQ_NONE (-1)
+
+/*************************************************
+* Code parameters and static tables *
+*************************************************/
+
+/* Maximum number of items on the nested bracket stacks at compile time. This
+applies to the nesting of all kinds of parentheses. It does not limit
+un-nested, non-capturing parentheses. This number can be made bigger if
+necessary - it is used to dimension one int and one unsigned char vector at
+compile time. */
+
+#define BRASTACK_SIZE 200
+
+/* Table for handling escaped characters in the range '0'-'z'. Positive returns
+are simple data values; negative values are for special things like \d and so
+on. Zero means further processing is needed (for things like \x), or the escape
+is invalid. */
+
+static const short escapes[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 - 7 */
+ 0, 0, ':', ';', '<', '=', '>', '?', /* 8 - ? */
+ '@', 0, -ESC_B, 0, -ESC_D, 0, 0, 0, /* @ - G */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* H - O */
+ 0, 0, 0, -ESC_S, 0, 0, 0, -ESC_W, /* P - W */
+ 0, 0, 0, '[', '\\', ']', '^', '_', /* X - _ */
+ '`', 7, -ESC_b, 0, -ESC_d, 0, '\f', 0, /* ` - g */
+ 0, 0, 0, 0, 0, 0, '\n', 0, /* h - o */
+ 0, 0, '\r', -ESC_s, '\t', 0, '\v', -ESC_w, /* p - w */
+ 0, 0, 0 /* x - z */
+};
+
+/* Error code numbers. They are given names so that they can more easily be
+tracked. */
+
+enum ErrorCode {
+ ERR0, ERR1, ERR2, ERR3, ERR4, ERR5, ERR6, ERR7, ERR8, ERR9,
+ ERR10, ERR11, ERR12, ERR13, ERR14, ERR15, ERR16, ERR17
+};
+
+/* The texts of compile-time error messages. These are "char *" because they
+are passed to the outside world. */
+
+static const char* errorText(ErrorCode code)
+{
+ static const char errorTexts[] =
+ /* 1 */
+ "\\ at end of pattern\0"
+ "\\c at end of pattern\0"
+ "character value in \\x{...} sequence is too large\0"
+ "numbers out of order in {} quantifier\0"
+ /* 5 */
+ "number too big in {} quantifier\0"
+ "missing terminating ] for character class\0"
+ "internal error: code overflow\0"
+ "range out of order in character class\0"
+ "nothing to repeat\0"
+ /* 10 */
+ "unmatched parentheses\0"
+ "internal error: unexpected repeat\0"
+ "unrecognized character after (?\0"
+ "failed to get memory\0"
+ "missing )\0"
+ /* 15 */
+ "reference to non-existent subpattern\0"
+ "regular expression too large\0"
+ "parentheses nested too deeply"
+ ;
+
+ int i = code;
+ const char* text = errorTexts;
+ while (i > 1)
+ i -= !*text++;
+ return text;
+}
+
+/* Structure for passing "static" information around between the functions
+doing the compiling. */
+
+struct CompileData {
+ CompileData() {
+ top_backref = 0;
+ backrefMap = 0;
+ req_varyopt = 0;
+ needOuterBracket = false;
+ numCapturingBrackets = 0;
+ }
+ int top_backref; /* Maximum back reference */
+ unsigned backrefMap; /* Bitmap of low back refs */
+ int req_varyopt; /* "After variable item" flag for reqbyte */
+ bool needOuterBracket;
+ int numCapturingBrackets;
+};
+
+/* Definitions to allow mutual recursion */
+
+static bool compileBracket(int, int*, unsigned char**, const UChar**, const UChar*, ErrorCode*, int, int*, int*, CompileData&);
+static bool bracketIsAnchored(const unsigned char* code);
+static bool bracketNeedsLineStart(const unsigned char* code, unsigned captureMap, unsigned backrefMap);
+static int bracketFindFirstAssertedCharacter(const unsigned char* code, bool inassert);
+
+/*************************************************
+* Handle escapes *
+*************************************************/
+
+/* This function is called when a \ has been encountered. It either returns a
+positive value for a simple escape such as \n, or a negative value which
+encodes one of the more complicated things such as \d. When UTF-8 is enabled,
+a positive value greater than 255 may be returned. On entry, ptr is pointing at
+the \. On exit, it is on the final character of the escape sequence.
+
+Arguments:
+ ptrptr points to the pattern position pointer
+ errorcodeptr points to the errorcode variable
+ bracount number of previous extracting brackets
+ options the options bits
+ isclass true if inside a character class
+
+Returns: zero or positive => a data character
+ negative => a special escape sequence
+ on error, errorptr is set
+*/
+
+static int checkEscape(const UChar** ptrptr, const UChar* patternEnd, ErrorCode* errorcodeptr, int bracount, bool isclass)
+{
+ const UChar* ptr = *ptrptr + 1;
+
+ /* If backslash is at the end of the pattern, it's an error. */
+ if (ptr == patternEnd) {
+ *errorcodeptr = ERR1;
+ *ptrptr = ptr;
+ return 0;
+ }
+
+ int c = *ptr;
+
+ /* Non-alphamerics are literals. For digits or letters, do an initial lookup in
+ a table. A non-zero result is something that can be returned immediately.
+ Otherwise further processing may be required. */
+
+ if (c < '0' || c > 'z') { /* Not alphameric */
+ } else if (int escapeValue = escapes[c - '0']) {
+ c = escapeValue;
+ if (isclass) {
+ if (-c == ESC_b)
+ c = '\b'; /* \b is backslash in a class */
+ else if (-c == ESC_B)
+ c = 'B'; /* and \B is a capital B in a class (in browsers event though ECMAScript 15.10.2.19 says it raises an error) */
+ }
+ /* Escapes that need further processing, or are illegal. */
+
+ } else {
+ switch (c) {
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ /* Escape sequences starting with a non-zero digit are backreferences,
+ unless there are insufficient brackets, in which case they are octal
+ escape sequences. Those sequences end on the first non-octal character
+ or when we overflow 0-255, whichever comes first. */
+
+ if (!isclass) {
+ const UChar* oldptr = ptr;
+ c -= '0';
+ while ((ptr + 1 < patternEnd) && isASCIIDigit(ptr[1]) && c <= bracount)
+ c = c * 10 + *(++ptr) - '0';
+ if (c <= bracount) {
+ c = -(ESC_REF + c);
+ break;
+ }
+ ptr = oldptr; /* Put the pointer back and fall through */
+ }
+
+ /* Handle an octal number following \. If the first digit is 8 or 9,
+ this is not octal. */
+
+ if ((c = *ptr) >= '8')
+ break;
+
+ /* \0 always starts an octal number, but we may drop through to here with a
+ larger first octal digit. */
+
+ case '0': {
+ c -= '0';
+ int i;
+ for (i = 1; i <= 2; ++i) {
+ if (ptr + i >= patternEnd || ptr[i] < '0' || ptr[i] > '7')
+ break;
+ int cc = c * 8 + ptr[i] - '0';
+ if (cc > 255)
+ break;
+ c = cc;
+ }
+ ptr += i - 1;
+ break;
+ }
+
+ case 'x': {
+ c = 0;
+ int i;
+ for (i = 1; i <= 2; ++i) {
+ if (ptr + i >= patternEnd || !isASCIIHexDigit(ptr[i])) {
+ c = 'x';
+ i = 1;
+ break;
+ }
+ int cc = ptr[i];
+ if (cc >= 'a')
+ cc -= 32; /* Convert to upper case */
+ c = c * 16 + cc - ((cc < 'A') ? '0' : ('A' - 10));
+ }
+ ptr += i - 1;
+ break;
+ }
+
+ case 'u': {
+ c = 0;
+ int i;
+ for (i = 1; i <= 4; ++i) {
+ if (ptr + i >= patternEnd || !isASCIIHexDigit(ptr[i])) {
+ c = 'u';
+ i = 1;
+ break;
+ }
+ int cc = ptr[i];
+ if (cc >= 'a')
+ cc -= 32; /* Convert to upper case */
+ c = c * 16 + cc - ((cc < 'A') ? '0' : ('A' - 10));
+ }
+ ptr += i - 1;
+ break;
+ }
+
+ case 'c':
+ if (++ptr == patternEnd) {
+ *errorcodeptr = ERR2;
+ return 0;
+ }
+ c = *ptr;
+
+ /* A letter is upper-cased; then the 0x40 bit is flipped. This coding
+ is ASCII-specific, but then the whole concept of \cx is ASCII-specific. */
+ c = toASCIIUpper(c) ^ 0x40;
+ break;
+ }
+ }
+
+ *ptrptr = ptr;
+ return c;
+}
+
+/*************************************************
+* Check for counted repeat *
+*************************************************/
+
+/* This function is called when a '{' is encountered in a place where it might
+start a quantifier. It looks ahead to see if it really is a quantifier or not.
+It is only a quantifier if it is one of the forms {ddd} {ddd,} or {ddd,ddd}
+where the ddds are digits.
+
+Arguments:
+ p pointer to the first char after '{'
+
+Returns: true or false
+*/
+
+static bool isCountedRepeat(const UChar* p, const UChar* patternEnd)
+{
+ if (p >= patternEnd || !isASCIIDigit(*p))
+ return false;
+ p++;
+ while (p < patternEnd && isASCIIDigit(*p))
+ p++;
+ if (p < patternEnd && *p == '}')
+ return true;
+
+ if (p >= patternEnd || *p++ != ',')
+ return false;
+ if (p < patternEnd && *p == '}')
+ return true;
+
+ if (p >= patternEnd || !isASCIIDigit(*p))
+ return false;
+ p++;
+ while (p < patternEnd && isASCIIDigit(*p))
+ p++;
+
+ return (p < patternEnd && *p == '}');
+}
+
+/*************************************************
+* Read repeat counts *
+*************************************************/
+
+/* Read an item of the form {n,m} and return the values. This is called only
+after isCountedRepeat() has confirmed that a repeat-count quantifier exists,
+so the syntax is guaranteed to be correct, but we need to check the values.
+
+Arguments:
+ p pointer to first char after '{'
+ minp pointer to int for min
+ maxp pointer to int for max
+ returned as -1 if no max
+ errorcodeptr points to error code variable
+
+Returns: pointer to '}' on success;
+ current ptr on error, with errorcodeptr set non-zero
+*/
+
+static const UChar* readRepeatCounts(const UChar* p, int* minp, int* maxp, ErrorCode* errorcodeptr)
+{
+ int min = 0;
+ int max = -1;
+
+ /* Read the minimum value and do a paranoid check: a negative value indicates
+ an integer overflow. */
+
+ while (isASCIIDigit(*p))
+ min = min * 10 + *p++ - '0';
+ if (min < 0 || min > 65535) {
+ *errorcodeptr = ERR5;
+ return p;
+ }
+
+ /* Read the maximum value if there is one, and again do a paranoid on its size.
+ Also, max must not be less than min. */
+
+ if (*p == '}')
+ max = min;
+ else {
+ if (*(++p) != '}') {
+ max = 0;
+ while (isASCIIDigit(*p))
+ max = max * 10 + *p++ - '0';
+ if (max < 0 || max > 65535) {
+ *errorcodeptr = ERR5;
+ return p;
+ }
+ if (max < min) {
+ *errorcodeptr = ERR4;
+ return p;
+ }
+ }
+ }
+
+ /* Fill in the required variables, and pass back the pointer to the terminating
+ '}'. */
+
+ *minp = min;
+ *maxp = max;
+ return p;
+}
+
+/*************************************************
+* Find first significant op code *
+*************************************************/
+
+/* This is called by several functions that scan a compiled expression looking
+for a fixed first character, or an anchoring op code etc. It skips over things
+that do not influence this.
+
+Arguments:
+ code pointer to the start of the group
+Returns: pointer to the first significant opcode
+*/
+
+static const unsigned char* firstSignificantOpcode(const unsigned char* code)
+{
+ while (*code == OP_BRANUMBER)
+ code += 3;
+ return code;
+}
+
+static const unsigned char* firstSignificantOpcodeSkippingAssertions(const unsigned char* code)
+{
+ while (true) {
+ switch (*code) {
+ case OP_ASSERT_NOT:
+ advanceToEndOfBracket(code);
+ code += 1 + LINK_SIZE;
+ break;
+ case OP_WORD_BOUNDARY:
+ case OP_NOT_WORD_BOUNDARY:
+ ++code;
+ break;
+ case OP_BRANUMBER:
+ code += 3;
+ break;
+ default:
+ return code;
+ }
+ }
+}
+
+/*************************************************
+* Get othercase range *
+*************************************************/
+
+/* This function is passed the start and end of a class range, in UTF-8 mode
+with UCP support. It searches up the characters, looking for internal ranges of
+characters in the "other" case. Each call returns the next one, updating the
+start address.
+
+Arguments:
+ cptr points to starting character value; updated
+ d end value
+ ocptr where to put start of othercase range
+ odptr where to put end of othercase range
+
+Yield: true when range returned; false when no more
+*/
+
+static bool getOthercaseRange(int* cptr, int d, int* ocptr, int* odptr)
+{
+ int c, othercase = 0;
+
+ for (c = *cptr; c <= d; c++) {
+ if ((othercase = kjs_pcre_ucp_othercase(c)) >= 0)
+ break;
+ }
+
+ if (c > d)
+ return false;
+
+ *ocptr = othercase;
+ int next = othercase + 1;
+
+ for (++c; c <= d; c++) {
+ if (kjs_pcre_ucp_othercase(c) != next)
+ break;
+ next++;
+ }
+
+ *odptr = next - 1;
+ *cptr = c;
+
+ return true;
+}
+
+/*************************************************
+ * Convert character value to UTF-8 *
+ *************************************************/
+
+/* This function takes an integer value in the range 0 - 0x7fffffff
+ and encodes it as a UTF-8 character in 0 to 6 bytes.
+
+ Arguments:
+ cvalue the character value
+ buffer pointer to buffer for result - at least 6 bytes long
+
+ Returns: number of characters placed in the buffer
+ */
+
+static int encodeUTF8(int cvalue, unsigned char *buffer)
+{
+ int i;
+ for (i = 0; i < kjs_pcre_utf8_table1_size; i++)
+ if (cvalue <= kjs_pcre_utf8_table1[i])
+ break;
+ buffer += i;
+ for (int j = i; j > 0; j--) {
+ *buffer-- = 0x80 | (cvalue & 0x3f);
+ cvalue >>= 6;
+ }
+ *buffer = kjs_pcre_utf8_table2[i] | cvalue;
+ return i + 1;
+}
+
+/*************************************************
+* Compile one branch *
+*************************************************/
+
+/* Scan the pattern, compiling it into the code vector.
+
+Arguments:
+ options the option bits
+ brackets points to number of extracting brackets used
+ codeptr points to the pointer to the current code point
+ ptrptr points to the current pattern pointer
+ errorcodeptr points to error code variable
+ firstbyteptr set to initial literal character, or < 0 (REQ_UNSET, REQ_NONE)
+ reqbyteptr set to the last literal character required, else < 0
+ cd contains pointers to tables etc.
+
+Returns: true on success
+ false, with *errorcodeptr set non-zero on error
+*/
+
+static inline bool safelyCheckNextChar(const UChar* ptr, const UChar* patternEnd, UChar expected)
+{
+ return ((ptr + 1 < patternEnd) && ptr[1] == expected);
+}
+
+static bool
+compileBranch(int options, int* brackets, unsigned char** codeptr,
+ const UChar** ptrptr, const UChar* patternEnd, ErrorCode* errorcodeptr, int *firstbyteptr,
+ int* reqbyteptr, CompileData& cd)
+{
+ int repeat_type, op_type;
+ int repeat_min = 0, repeat_max = 0; /* To please picky compilers */
+ int bravalue = 0;
+ int reqvary, tempreqvary;
+ int c;
+ unsigned char* code = *codeptr;
+ unsigned char* tempcode;
+ bool groupsetfirstbyte = false;
+ const UChar* ptr = *ptrptr;
+ const UChar* tempptr;
+ unsigned char* previous = NULL;
+ unsigned char classbits[32];
+
+ bool class_utf8;
+ unsigned char* class_utf8data;
+ unsigned char utf8_char[6];
+
+ /* Initialize no first byte, no required byte. REQ_UNSET means "no char
+ matching encountered yet". It gets changed to REQ_NONE if we hit something that
+ matches a non-fixed char first char; reqbyte just remains unset if we never
+ find one.
+
+ When we hit a repeat whose minimum is zero, we may have to adjust these values
+ to take the zero repeat into account. This is implemented by setting them to
+ zerofirstbyte and zeroreqbyte when such a repeat is encountered. The individual
+ item types that can be repeated set these backoff variables appropriately. */
+
+ int firstbyte = REQ_UNSET;
+ int reqbyte = REQ_UNSET;
+ int zeroreqbyte = REQ_UNSET;
+ int zerofirstbyte = REQ_UNSET;
+
+ /* The variable req_caseopt contains either the REQ_IGNORE_CASE value or zero,
+ according to the current setting of the ignores-case flag. REQ_IGNORE_CASE is a bit
+ value > 255. It is added into the firstbyte or reqbyte variables to record the
+ case status of the value. This is used only for ASCII characters. */
+
+ int req_caseopt = (options & IgnoreCaseOption) ? REQ_IGNORE_CASE : 0;
+
+ /* Switch on next character until the end of the branch */
+
+ for (;; ptr++) {
+ bool negate_class;
+ bool should_flip_negation; /* If a negative special such as \S is used, we should negate the whole class to properly support Unicode. */
+ int class_charcount;
+ int class_lastchar;
+ int skipbytes;
+ int subreqbyte;
+ int subfirstbyte;
+ int mclength;
+ unsigned char mcbuffer[8];
+
+ /* Next byte in the pattern */
+
+ c = ptr < patternEnd ? *ptr : 0;
+
+ /* Fill in length of a previous callout, except when the next thing is
+ a quantifier. */
+
+ bool is_quantifier = c == '*' || c == '+' || c == '?' || (c == '{' && isCountedRepeat(ptr + 1, patternEnd));
+
+ switch (c) {
+ /* The branch terminates at end of string, |, or ). */
+
+ case 0:
+ if (ptr < patternEnd)
+ goto NORMAL_CHAR;
+ // End of string; fall through
+ case '|':
+ case ')':
+ *firstbyteptr = firstbyte;
+ *reqbyteptr = reqbyte;
+ *codeptr = code;
+ *ptrptr = ptr;
+ return true;
+
+ /* Handle single-character metacharacters. In multiline mode, ^ disables
+ the setting of any following char as a first character. */
+
+ case '^':
+ if (options & MatchAcrossMultipleLinesOption) {
+ if (firstbyte == REQ_UNSET)
+ firstbyte = REQ_NONE;
+ *code++ = OP_BOL;
+ } else
+ *code++ = OP_CIRC;
+ previous = NULL;
+ break;
+
+ case '$':
+ previous = NULL;
+ if (options & MatchAcrossMultipleLinesOption)
+ *code++ = OP_EOL;
+ else
+ *code++ = OP_DOLL;
+ break;
+
+ /* There can never be a first char if '.' is first, whatever happens about
+ repeats. The value of reqbyte doesn't change either. */
+
+ case '.':
+ if (firstbyte == REQ_UNSET)
+ firstbyte = REQ_NONE;
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+ previous = code;
+ *code++ = OP_NOT_NEWLINE;
+ break;
+
+ /* Character classes. If the included characters are all < 256, we build a
+ 32-byte bitmap of the permitted characters, except in the special case
+ where there is only one such character. For negated classes, we build the
+ map as usual, then invert it at the end. However, we use a different opcode
+ so that data characters > 255 can be handled correctly.
+
+ If the class contains characters outside the 0-255 range, a different
+ opcode is compiled. It may optionally have a bit map for characters < 256,
+ but those above are are explicitly listed afterwards. A flag byte tells
+ whether the bitmap is present, and whether this is a negated class or not.
+ */
+
+ case '[': {
+ previous = code;
+ should_flip_negation = false;
+
+ /* PCRE supports POSIX class stuff inside a class. Perl gives an error if
+ they are encountered at the top level, so we'll do that too. */
+
+ /* If the first character is '^', set the negation flag and skip it. */
+
+ if (ptr + 1 >= patternEnd) {
+ *errorcodeptr = ERR6;
+ return false;
+ }
+
+ if (ptr[1] == '^') {
+ negate_class = true;
+ ++ptr;
+ } else
+ negate_class = false;
+
+ /* Keep a count of chars with values < 256 so that we can optimize the case
+ of just a single character (as long as it's < 256). For higher valued UTF-8
+ characters, we don't yet do any optimization. */
+
+ class_charcount = 0;
+ class_lastchar = -1;
+
+ class_utf8 = false; /* No chars >= 256 */
+ class_utf8data = code + LINK_SIZE + 34; /* For UTF-8 items */
+
+ /* Initialize the 32-char bit map to all zeros. We have to build the
+ map in a temporary bit of store, in case the class contains only 1
+ character (< 256), because in that case the compiled code doesn't use the
+ bit map. */
+
+ memset(classbits, 0, 32 * sizeof(unsigned char));
+
+ /* Process characters until ] is reached. The first pass
+ through the regex checked the overall syntax, so we don't need to be very
+ strict here. At the start of the loop, c contains the first byte of the
+ character. */
+
+ while ((++ptr < patternEnd) && (c = *ptr) != ']') {
+ /* Backslash may introduce a single character, or it may introduce one
+ of the specials, which just set a flag. Escaped items are checked for
+ validity in the pre-compiling pass. The sequence \b is a special case.
+ Inside a class (and only there) it is treated as backspace. Elsewhere
+ it marks a word boundary. Other escapes have preset maps ready to
+ or into the one we are building. We assume they have more than one
+ character in them, so set class_charcount bigger than one. */
+
+ if (c == '\\') {
+ c = checkEscape(&ptr, patternEnd, errorcodeptr, cd.numCapturingBrackets, true);
+ if (c < 0) {
+ class_charcount += 2; /* Greater than 1 is what matters */
+ switch (-c) {
+ case ESC_d:
+ for (c = 0; c < 32; c++)
+ classbits[c] |= classBitmapForChar(c + cbit_digit);
+ continue;
+
+ case ESC_D:
+ should_flip_negation = true;
+ for (c = 0; c < 32; c++)
+ classbits[c] |= ~classBitmapForChar(c + cbit_digit);
+ continue;
+
+ case ESC_w:
+ for (c = 0; c < 32; c++)
+ classbits[c] |= classBitmapForChar(c + cbit_word);
+ continue;
+
+ case ESC_W:
+ should_flip_negation = true;
+ for (c = 0; c < 32; c++)
+ classbits[c] |= ~classBitmapForChar(c + cbit_word);
+ continue;
+
+ case ESC_s:
+ for (c = 0; c < 32; c++)
+ classbits[c] |= classBitmapForChar(c + cbit_space);
+ continue;
+
+ case ESC_S:
+ should_flip_negation = true;
+ for (c = 0; c < 32; c++)
+ classbits[c] |= ~classBitmapForChar(c + cbit_space);
+ continue;
+
+ /* Unrecognized escapes are faulted if PCRE is running in its
+ strict mode. By default, for compatibility with Perl, they are
+ treated as literals. */
+
+ default:
+ c = *ptr; /* The final character */
+ class_charcount -= 2; /* Undo the default count from above */
+ }
+ }
+
+ /* Fall through if we have a single character (c >= 0). This may be
+ > 256 in UTF-8 mode. */
+
+ } /* End of backslash handling */
+
+ /* A single character may be followed by '-' to form a range. However,
+ Perl does not permit ']' to be the end of the range. A '-' character
+ here is treated as a literal. */
+
+ if ((ptr + 2 < patternEnd) && ptr[1] == '-' && ptr[2] != ']') {
+ ptr += 2;
+
+ int d = *ptr;
+
+ /* The second part of a range can be a single-character escape, but
+ not any of the other escapes. Perl 5.6 treats a hyphen as a literal
+ in such circumstances. */
+
+ if (d == '\\') {
+ const UChar* oldptr = ptr;
+ d = checkEscape(&ptr, patternEnd, errorcodeptr, cd.numCapturingBrackets, true);
+
+ /* \X is literal X; any other special means the '-' was literal */
+ if (d < 0) {
+ ptr = oldptr - 2;
+ goto LONE_SINGLE_CHARACTER; /* A few lines below */
+ }
+ }
+
+ /* The check that the two values are in the correct order happens in
+ the pre-pass. Optimize one-character ranges */
+
+ if (d == c)
+ goto LONE_SINGLE_CHARACTER; /* A few lines below */
+
+ /* In UTF-8 mode, if the upper limit is > 255, or > 127 for caseless
+ matching, we have to use an XCLASS with extra data items. Caseless
+ matching for characters > 127 is available only if UCP support is
+ available. */
+
+ if ((d > 255 || ((options & IgnoreCaseOption) && d > 127))) {
+ class_utf8 = true;
+
+ /* With UCP support, we can find the other case equivalents of
+ the relevant characters. There may be several ranges. Optimize how
+ they fit with the basic range. */
+
+ if (options & IgnoreCaseOption) {
+ int occ, ocd;
+ int cc = c;
+ int origd = d;
+ while (getOthercaseRange(&cc, origd, &occ, &ocd)) {
+ if (occ >= c && ocd <= d)
+ continue; /* Skip embedded ranges */
+
+ if (occ < c && ocd >= c - 1) /* Extend the basic range */
+ { /* if there is overlap, */
+ c = occ; /* noting that if occ < c */
+ continue; /* we can't have ocd > d */
+ } /* because a subrange is */
+ if (ocd > d && occ <= d + 1) /* always shorter than */
+ { /* the basic range. */
+ d = ocd;
+ continue;
+ }
+
+ if (occ == ocd)
+ *class_utf8data++ = XCL_SINGLE;
+ else {
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += encodeUTF8(occ, class_utf8data);
+ }
+ class_utf8data += encodeUTF8(ocd, class_utf8data);
+ }
+ }
+
+ /* Now record the original range, possibly modified for UCP caseless
+ overlapping ranges. */
+
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += encodeUTF8(c, class_utf8data);
+ class_utf8data += encodeUTF8(d, class_utf8data);
+
+ /* With UCP support, we are done. Without UCP support, there is no
+ caseless matching for UTF-8 characters > 127; we can use the bit map
+ for the smaller ones. */
+
+ continue; /* With next character in the class */
+ }
+
+ /* We use the bit map for all cases when not in UTF-8 mode; else
+ ranges that lie entirely within 0-127 when there is UCP support; else
+ for partial ranges without UCP support. */
+
+ for (; c <= d; c++) {
+ classbits[c/8] |= (1 << (c&7));
+ if (options & IgnoreCaseOption) {
+ int uc = flipCase(c);
+ classbits[uc/8] |= (1 << (uc&7));
+ }
+ class_charcount++; /* in case a one-char range */
+ class_lastchar = c;
+ }
+
+ continue; /* Go get the next char in the class */
+ }
+
+ /* Handle a lone single character - we can get here for a normal
+ non-escape char, or after \ that introduces a single character or for an
+ apparent range that isn't. */
+
+ LONE_SINGLE_CHARACTER:
+
+ /* Handle a character that cannot go in the bit map */
+
+ if ((c > 255 || ((options & IgnoreCaseOption) && c > 127))) {
+ class_utf8 = true;
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += encodeUTF8(c, class_utf8data);
+
+ if (options & IgnoreCaseOption) {
+ int othercase;
+ if ((othercase = kjs_pcre_ucp_othercase(c)) >= 0) {
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += encodeUTF8(othercase, class_utf8data);
+ }
+ }
+ } else {
+ /* Handle a single-byte character */
+ classbits[c/8] |= (1 << (c&7));
+ if (options & IgnoreCaseOption) {
+ c = flipCase(c);
+ classbits[c/8] |= (1 << (c&7));
+ }
+ class_charcount++;
+ class_lastchar = c;
+ }
+ }
+
+ /* If class_charcount is 1, we saw precisely one character whose value is
+ less than 256. In non-UTF-8 mode we can always optimize. In UTF-8 mode, we
+ can optimize the negative case only if there were no characters >= 128
+ because OP_NOT and the related opcodes like OP_NOTSTAR operate on
+ single-bytes only. This is an historical hangover. Maybe one day we can
+ tidy these opcodes to handle multi-byte characters.
+
+ The optimization throws away the bit map. We turn the item into a
+ 1-character OP_CHAR[NC] if it's positive, or OP_NOT if it's negative. Note
+ that OP_NOT does not support multibyte characters. In the positive case, it
+ can cause firstbyte to be set. Otherwise, there can be no first char if
+ this item is first, whatever repeat count may follow. In the case of
+ reqbyte, save the previous value for reinstating. */
+
+ if (class_charcount == 1 && (!class_utf8 && (!negate_class || class_lastchar < 128))) {
+ zeroreqbyte = reqbyte;
+
+ /* The OP_NOT opcode works on one-byte characters only. */
+
+ if (negate_class) {
+ if (firstbyte == REQ_UNSET)
+ firstbyte = REQ_NONE;
+ zerofirstbyte = firstbyte;
+ *code++ = OP_NOT;
+ *code++ = class_lastchar;
+ break;
+ }
+
+ /* For a single, positive character, get the value into c, and
+ then we can handle this with the normal one-character code. */
+
+ c = class_lastchar;
+ goto NORMAL_CHAR;
+ } /* End of 1-char optimization */
+
+ /* The general case - not the one-char optimization. If this is the first
+ thing in the branch, there can be no first char setting, whatever the
+ repeat count. Any reqbyte setting must remain unchanged after any kind of
+ repeat. */
+
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+
+ /* If there are characters with values > 255, we have to compile an
+ extended class, with its own opcode. If there are no characters < 256,
+ we can omit the bitmap. */
+
+ if (class_utf8 && !should_flip_negation) {
+ *class_utf8data++ = XCL_END; /* Marks the end of extra data */
+ *code++ = OP_XCLASS;
+ code += LINK_SIZE;
+ *code = negate_class? XCL_NOT : 0;
+
+ /* If the map is required, install it, and move on to the end of
+ the extra data */
+
+ if (class_charcount > 0) {
+ *code++ |= XCL_MAP;
+ memcpy(code, classbits, 32);
+ code = class_utf8data;
+ }
+
+ /* If the map is not required, slide down the extra data. */
+
+ else {
+ int len = class_utf8data - (code + 33);
+ memmove(code + 1, code + 33, len);
+ code += len + 1;
+ }
+
+ /* Now fill in the complete length of the item */
+
+ putLinkValue(previous + 1, code - previous);
+ break; /* End of class handling */
+ }
+
+ /* If there are no characters > 255, negate the 32-byte map if necessary,
+ and copy it into the code vector. If this is the first thing in the branch,
+ there can be no first char setting, whatever the repeat count. Any reqbyte
+ setting must remain unchanged after any kind of repeat. */
+
+ *code++ = (negate_class == should_flip_negation) ? OP_CLASS : OP_NCLASS;
+ if (negate_class)
+ for (c = 0; c < 32; c++)
+ code[c] = ~classbits[c];
+ else
+ memcpy(code, classbits, 32);
+ code += 32;
+ break;
+ }
+
+ /* Various kinds of repeat; '{' is not necessarily a quantifier, but this
+ has been tested above. */
+
+ case '{':
+ if (!is_quantifier)
+ goto NORMAL_CHAR;
+ ptr = readRepeatCounts(ptr + 1, &repeat_min, &repeat_max, errorcodeptr);
+ if (*errorcodeptr)
+ goto FAILED;
+ goto REPEAT;
+
+ case '*':
+ repeat_min = 0;
+ repeat_max = -1;
+ goto REPEAT;
+
+ case '+':
+ repeat_min = 1;
+ repeat_max = -1;
+ goto REPEAT;
+
+ case '?':
+ repeat_min = 0;
+ repeat_max = 1;
+
+ REPEAT:
+ if (!previous) {
+ *errorcodeptr = ERR9;
+ goto FAILED;
+ }
+
+ if (repeat_min == 0) {
+ firstbyte = zerofirstbyte; /* Adjust for zero repeat */
+ reqbyte = zeroreqbyte; /* Ditto */
+ }
+
+ /* Remember whether this is a variable length repeat */
+
+ reqvary = (repeat_min == repeat_max) ? 0 : REQ_VARY;
+
+ op_type = 0; /* Default single-char op codes */
+
+ /* Save start of previous item, in case we have to move it up to make space
+ for an inserted OP_ONCE for the additional '+' extension. */
+ /* FIXME: Probably don't need this because we don't use OP_ONCE. */
+
+ tempcode = previous;
+
+ /* If the next character is '+', we have a possessive quantifier. This
+ implies greediness, whatever the setting of the PCRE_UNGREEDY option.
+ If the next character is '?' this is a minimizing repeat, by default,
+ but if PCRE_UNGREEDY is set, it works the other way round. We change the
+ repeat type to the non-default. */
+
+ if (safelyCheckNextChar(ptr, patternEnd, '?')) {
+ repeat_type = 1;
+ ptr++;
+ } else
+ repeat_type = 0;
+
+ /* If previous was a character match, abolish the item and generate a
+ repeat item instead. If a char item has a minumum of more than one, ensure
+ that it is set in reqbyte - it might not be if a sequence such as x{3} is
+ the first thing in a branch because the x will have gone into firstbyte
+ instead. */
+
+ if (*previous == OP_CHAR || *previous == OP_CHAR_IGNORING_CASE) {
+ /* Deal with UTF-8 characters that take up more than one byte. It's
+ easier to write this out separately than try to macrify it. Use c to
+ hold the length of the character in bytes, plus 0x80 to flag that it's a
+ length rather than a small character. */
+
+ if (code[-1] & 0x80) {
+ unsigned char *lastchar = code - 1;
+ while((*lastchar & 0xc0) == 0x80)
+ lastchar--;
+ c = code - lastchar; /* Length of UTF-8 character */
+ memcpy(utf8_char, lastchar, c); /* Save the char */
+ c |= 0x80; /* Flag c as a length */
+ }
+ else {
+ c = code[-1];
+ if (repeat_min > 1)
+ reqbyte = c | req_caseopt | cd.req_varyopt;
+ }
+
+ goto OUTPUT_SINGLE_REPEAT; /* Code shared with single character types */
+ }
+
+ else if (*previous == OP_ASCII_CHAR || *previous == OP_ASCII_LETTER_IGNORING_CASE) {
+ c = previous[1];
+ if (repeat_min > 1)
+ reqbyte = c | req_caseopt | cd.req_varyopt;
+ goto OUTPUT_SINGLE_REPEAT;
+ }
+
+ /* If previous was a single negated character ([^a] or similar), we use
+ one of the special opcodes, replacing it. The code is shared with single-
+ character repeats by setting opt_type to add a suitable offset into
+ repeat_type. OP_NOT is currently used only for single-byte chars. */
+
+ else if (*previous == OP_NOT) {
+ op_type = OP_NOTSTAR - OP_STAR; /* Use "not" opcodes */
+ c = previous[1];
+ goto OUTPUT_SINGLE_REPEAT;
+ }
+
+ /* If previous was a character type match (\d or similar), abolish it and
+ create a suitable repeat item. The code is shared with single-character
+ repeats by setting op_type to add a suitable offset into repeat_type. */
+
+ else if (*previous <= OP_NOT_NEWLINE) {
+ op_type = OP_TYPESTAR - OP_STAR; /* Use type opcodes */
+ c = *previous;
+
+ OUTPUT_SINGLE_REPEAT:
+ int prop_type = -1;
+ int prop_value = -1;
+
+ unsigned char* oldcode = code;
+ code = previous; /* Usually overwrite previous item */
+
+ /* If the maximum is zero then the minimum must also be zero; Perl allows
+ this case, so we do too - by simply omitting the item altogether. */
+
+ if (repeat_max == 0)
+ goto END_REPEAT;
+
+ /* Combine the op_type with the repeat_type */
+
+ repeat_type += op_type;
+
+ /* A minimum of zero is handled either as the special case * or ?, or as
+ an UPTO, with the maximum given. */
+
+ if (repeat_min == 0) {
+ if (repeat_max == -1)
+ *code++ = OP_STAR + repeat_type;
+ else if (repeat_max == 1)
+ *code++ = OP_QUERY + repeat_type;
+ else {
+ *code++ = OP_UPTO + repeat_type;
+ put2ByteValueAndAdvance(code, repeat_max);
+ }
+ }
+
+ /* A repeat minimum of 1 is optimized into some special cases. If the
+ maximum is unlimited, we use OP_PLUS. Otherwise, the original item it
+ left in place and, if the maximum is greater than 1, we use OP_UPTO with
+ one less than the maximum. */
+
+ else if (repeat_min == 1) {
+ if (repeat_max == -1)
+ *code++ = OP_PLUS + repeat_type;
+ else {
+ code = oldcode; /* leave previous item in place */
+ if (repeat_max == 1)
+ goto END_REPEAT;
+ *code++ = OP_UPTO + repeat_type;
+ put2ByteValueAndAdvance(code, repeat_max - 1);
+ }
+ }
+
+ /* The case {n,n} is just an EXACT, while the general case {n,m} is
+ handled as an EXACT followed by an UPTO. */
+
+ else {
+ *code++ = OP_EXACT + op_type; /* NB EXACT doesn't have repeat_type */
+ put2ByteValueAndAdvance(code, repeat_min);
+
+ /* If the maximum is unlimited, insert an OP_STAR. Before doing so,
+ we have to insert the character for the previous code. For a repeated
+ Unicode property match, there are two extra bytes that define the
+ required property. In UTF-8 mode, long characters have their length in
+ c, with the 0x80 bit as a flag. */
+
+ if (repeat_max < 0) {
+ if (c >= 128) {
+ memcpy(code, utf8_char, c & 7);
+ code += c & 7;
+ } else {
+ *code++ = c;
+ if (prop_type >= 0) {
+ *code++ = prop_type;
+ *code++ = prop_value;
+ }
+ }
+ *code++ = OP_STAR + repeat_type;
+ }
+
+ /* Else insert an UPTO if the max is greater than the min, again
+ preceded by the character, for the previously inserted code. */
+
+ else if (repeat_max != repeat_min) {
+ if (c >= 128) {
+ memcpy(code, utf8_char, c & 7);
+ code += c & 7;
+ } else
+ *code++ = c;
+ if (prop_type >= 0) {
+ *code++ = prop_type;
+ *code++ = prop_value;
+ }
+ repeat_max -= repeat_min;
+ *code++ = OP_UPTO + repeat_type;
+ put2ByteValueAndAdvance(code, repeat_max);
+ }
+ }
+
+ /* The character or character type itself comes last in all cases. */
+
+ if (c >= 128) {
+ memcpy(code, utf8_char, c & 7);
+ code += c & 7;
+ } else
+ *code++ = c;
+
+ /* For a repeated Unicode property match, there are two extra bytes that
+ define the required property. */
+
+ if (prop_type >= 0) {
+ *code++ = prop_type;
+ *code++ = prop_value;
+ }
+ }
+
+ /* If previous was a character class or a back reference, we put the repeat
+ stuff after it, but just skip the item if the repeat was {0,0}. */
+
+ else if (*previous == OP_CLASS ||
+ *previous == OP_NCLASS ||
+ *previous == OP_XCLASS ||
+ *previous == OP_REF)
+ {
+ if (repeat_max == 0) {
+ code = previous;
+ goto END_REPEAT;
+ }
+
+ if (repeat_min == 0 && repeat_max == -1)
+ *code++ = OP_CRSTAR + repeat_type;
+ else if (repeat_min == 1 && repeat_max == -1)
+ *code++ = OP_CRPLUS + repeat_type;
+ else if (repeat_min == 0 && repeat_max == 1)
+ *code++ = OP_CRQUERY + repeat_type;
+ else {
+ *code++ = OP_CRRANGE + repeat_type;
+ put2ByteValueAndAdvance(code, repeat_min);
+ if (repeat_max == -1)
+ repeat_max = 0; /* 2-byte encoding for max */
+ put2ByteValueAndAdvance(code, repeat_max);
+ }
+ }
+
+ /* If previous was a bracket group, we may have to replicate it in certain
+ cases. */
+
+ else if (*previous >= OP_BRA) {
+ int ketoffset = 0;
+ int len = code - previous;
+ unsigned char* bralink = NULL;
+
+ /* If the maximum repeat count is unlimited, find the end of the bracket
+ by scanning through from the start, and compute the offset back to it
+ from the current code pointer. There may be an OP_OPT setting following
+ the final KET, so we can't find the end just by going back from the code
+ pointer. */
+
+ if (repeat_max == -1) {
+ const unsigned char* ket = previous;
+ advanceToEndOfBracket(ket);
+ ketoffset = code - ket;
+ }
+
+ /* The case of a zero minimum is special because of the need to stick
+ OP_BRAZERO in front of it, and because the group appears once in the
+ data, whereas in other cases it appears the minimum number of times. For
+ this reason, it is simplest to treat this case separately, as otherwise
+ the code gets far too messy. There are several special subcases when the
+ minimum is zero. */
+
+ if (repeat_min == 0) {
+ /* If the maximum is also zero, we just omit the group from the output
+ altogether. */
+
+ if (repeat_max == 0) {
+ code = previous;
+ goto END_REPEAT;
+ }
+
+ /* If the maximum is 1 or unlimited, we just have to stick in the
+ BRAZERO and do no more at this point. However, we do need to adjust
+ any OP_RECURSE calls inside the group that refer to the group itself or
+ any internal group, because the offset is from the start of the whole
+ regex. Temporarily terminate the pattern while doing this. */
+
+ if (repeat_max <= 1) {
+ *code = OP_END;
+ memmove(previous+1, previous, len);
+ code++;
+ *previous++ = OP_BRAZERO + repeat_type;
+ }
+
+ /* If the maximum is greater than 1 and limited, we have to replicate
+ in a nested fashion, sticking OP_BRAZERO before each set of brackets.
+ The first one has to be handled carefully because it's the original
+ copy, which has to be moved up. The remainder can be handled by code
+ that is common with the non-zero minimum case below. We have to
+ adjust the value of repeat_max, since one less copy is required. */
+
+ else {
+ *code = OP_END;
+ memmove(previous + 2 + LINK_SIZE, previous, len);
+ code += 2 + LINK_SIZE;
+ *previous++ = OP_BRAZERO + repeat_type;
+ *previous++ = OP_BRA;
+
+ /* We chain together the bracket offset fields that have to be
+ filled in later when the ends of the brackets are reached. */
+
+ int offset = (!bralink) ? 0 : previous - bralink;
+ bralink = previous;
+ putLinkValueAllowZeroAndAdvance(previous, offset);
+ }
+
+ repeat_max--;
+ }
+
+ /* If the minimum is greater than zero, replicate the group as many
+ times as necessary, and adjust the maximum to the number of subsequent
+ copies that we need. If we set a first char from the group, and didn't
+ set a required char, copy the latter from the former. */
+
+ else {
+ if (repeat_min > 1) {
+ if (groupsetfirstbyte && reqbyte < 0)
+ reqbyte = firstbyte;
+ for (int i = 1; i < repeat_min; i++) {
+ memcpy(code, previous, len);
+ code += len;
+ }
+ }
+ if (repeat_max > 0)
+ repeat_max -= repeat_min;
+ }
+
+ /* This code is common to both the zero and non-zero minimum cases. If
+ the maximum is limited, it replicates the group in a nested fashion,
+ remembering the bracket starts on a stack. In the case of a zero minimum,
+ the first one was set up above. In all cases the repeat_max now specifies
+ the number of additional copies needed. */
+
+ if (repeat_max >= 0) {
+ for (int i = repeat_max - 1; i >= 0; i--) {
+ *code++ = OP_BRAZERO + repeat_type;
+
+ /* All but the final copy start a new nesting, maintaining the
+ chain of brackets outstanding. */
+
+ if (i != 0) {
+ *code++ = OP_BRA;
+ int offset = (!bralink) ? 0 : code - bralink;
+ bralink = code;
+ putLinkValueAllowZeroAndAdvance(code, offset);
+ }
+
+ memcpy(code, previous, len);
+ code += len;
+ }
+
+ /* Now chain through the pending brackets, and fill in their length
+ fields (which are holding the chain links pro tem). */
+
+ while (bralink) {
+ int offset = code - bralink + 1;
+ unsigned char* bra = code - offset;
+ int oldlinkoffset = getLinkValueAllowZero(bra + 1);
+ bralink = (!oldlinkoffset) ? 0 : bralink - oldlinkoffset;
+ *code++ = OP_KET;
+ putLinkValueAndAdvance(code, offset);
+ putLinkValue(bra + 1, offset);
+ }
+ }
+
+ /* If the maximum is unlimited, set a repeater in the final copy. We
+ can't just offset backwards from the current code point, because we
+ don't know if there's been an options resetting after the ket. The
+ correct offset was computed above. */
+
+ else
+ code[-ketoffset] = OP_KETRMAX + repeat_type;
+ }
+
+ /* Else there's some kind of shambles */
+
+ else {
+ *errorcodeptr = ERR11;
+ goto FAILED;
+ }
+
+ /* In all case we no longer have a previous item. We also set the
+ "follows varying string" flag for subsequently encountered reqbytes if
+ it isn't already set and we have just passed a varying length item. */
+
+ END_REPEAT:
+ previous = NULL;
+ cd.req_varyopt |= reqvary;
+ break;
+
+ /* Start of nested bracket sub-expression, or comment or lookahead or
+ lookbehind or option setting or condition. First deal with special things
+ that can come after a bracket; all are introduced by ?, and the appearance
+ of any of them means that this is not a referencing group. They were
+ checked for validity in the first pass over the string, so we don't have to
+ check for syntax errors here. */
+
+ case '(':
+ skipbytes = 0;
+
+ if (*(++ptr) == '?') {
+ switch (*(++ptr)) {
+ case ':': /* Non-extracting bracket */
+ bravalue = OP_BRA;
+ ptr++;
+ break;
+
+ case '=': /* Positive lookahead */
+ bravalue = OP_ASSERT;
+ ptr++;
+ break;
+
+ case '!': /* Negative lookahead */
+ bravalue = OP_ASSERT_NOT;
+ ptr++;
+ break;
+
+ /* Character after (? not specially recognized */
+
+ default:
+ *errorcodeptr = ERR12;
+ goto FAILED;
+ }
+ }
+
+ /* Else we have a referencing group; adjust the opcode. If the bracket
+ number is greater than EXTRACT_BASIC_MAX, we set the opcode one higher, and
+ arrange for the true number to follow later, in an OP_BRANUMBER item. */
+
+ else {
+ if (++(*brackets) > EXTRACT_BASIC_MAX) {
+ bravalue = OP_BRA + EXTRACT_BASIC_MAX + 1;
+ code[1 + LINK_SIZE] = OP_BRANUMBER;
+ put2ByteValue(code + 2 + LINK_SIZE, *brackets);
+ skipbytes = 3;
+ }
+ else
+ bravalue = OP_BRA + *brackets;
+ }
+
+ /* Process nested bracketed re. Assertions may not be repeated, but other
+ kinds can be. We copy code into a non-variable in order to be able
+ to pass its address because some compilers complain otherwise. Pass in a
+ new setting for the ims options if they have changed. */
+
+ previous = (bravalue >= OP_BRAZERO) ? code : 0;
+ *code = bravalue;
+ tempcode = code;
+ tempreqvary = cd.req_varyopt; /* Save value before bracket */
+
+ if (!compileBracket(
+ options,
+ brackets, /* Extracting bracket count */
+ &tempcode, /* Where to put code (updated) */
+ &ptr, /* Input pointer (updated) */
+ patternEnd,
+ errorcodeptr, /* Where to put an error message */
+ skipbytes, /* Skip over OP_BRANUMBER */
+ &subfirstbyte, /* For possible first char */
+ &subreqbyte, /* For possible last char */
+ cd)) /* Tables block */
+ goto FAILED;
+
+ /* At the end of compiling, code is still pointing to the start of the
+ group, while tempcode has been updated to point past the end of the group
+ and any option resetting that may follow it. The pattern pointer (ptr)
+ is on the bracket. */
+
+ /* Handle updating of the required and first characters. Update for normal
+ brackets of all kinds, and conditions with two branches (see code above).
+ If the bracket is followed by a quantifier with zero repeat, we have to
+ back off. Hence the definition of zeroreqbyte and zerofirstbyte outside the
+ main loop so that they can be accessed for the back off. */
+
+ zeroreqbyte = reqbyte;
+ zerofirstbyte = firstbyte;
+ groupsetfirstbyte = false;
+
+ if (bravalue >= OP_BRA) {
+ /* If we have not yet set a firstbyte in this branch, take it from the
+ subpattern, remembering that it was set here so that a repeat of more
+ than one can replicate it as reqbyte if necessary. If the subpattern has
+ no firstbyte, set "none" for the whole branch. In both cases, a zero
+ repeat forces firstbyte to "none". */
+
+ if (firstbyte == REQ_UNSET) {
+ if (subfirstbyte >= 0) {
+ firstbyte = subfirstbyte;
+ groupsetfirstbyte = true;
+ }
+ else
+ firstbyte = REQ_NONE;
+ zerofirstbyte = REQ_NONE;
+ }
+
+ /* If firstbyte was previously set, convert the subpattern's firstbyte
+ into reqbyte if there wasn't one, using the vary flag that was in
+ existence beforehand. */
+
+ else if (subfirstbyte >= 0 && subreqbyte < 0)
+ subreqbyte = subfirstbyte | tempreqvary;
+
+ /* If the subpattern set a required byte (or set a first byte that isn't
+ really the first byte - see above), set it. */
+
+ if (subreqbyte >= 0)
+ reqbyte = subreqbyte;
+ }
+
+ /* For a forward assertion, we take the reqbyte, if set. This can be
+ helpful if the pattern that follows the assertion doesn't set a different
+ char. For example, it's useful for /(?=abcde).+/. We can't set firstbyte
+ for an assertion, however because it leads to incorrect effect for patterns
+ such as /(?=a)a.+/ when the "real" "a" would then become a reqbyte instead
+ of a firstbyte. This is overcome by a scan at the end if there's no
+ firstbyte, looking for an asserted first char. */
+
+ else if (bravalue == OP_ASSERT && subreqbyte >= 0)
+ reqbyte = subreqbyte;
+
+ /* Now update the main code pointer to the end of the group. */
+
+ code = tempcode;
+
+ /* Error if hit end of pattern */
+
+ if (ptr >= patternEnd || *ptr != ')') {
+ *errorcodeptr = ERR14;
+ goto FAILED;
+ }
+ break;
+
+ /* Check \ for being a real metacharacter; if not, fall through and handle
+ it as a data character at the start of a string. Escape items are checked
+ for validity in the pre-compiling pass. */
+
+ case '\\':
+ tempptr = ptr;
+ c = checkEscape(&ptr, patternEnd, errorcodeptr, cd.numCapturingBrackets, false);
+
+ /* Handle metacharacters introduced by \. For ones like \d, the ESC_ values
+ are arranged to be the negation of the corresponding OP_values. For the
+ back references, the values are ESC_REF plus the reference number. Only
+ back references and those types that consume a character may be repeated.
+ We can test for values between ESC_b and ESC_w for the latter; this may
+ have to change if any new ones are ever created. */
+
+ if (c < 0) {
+ /* For metasequences that actually match a character, we disable the
+ setting of a first character if it hasn't already been set. */
+
+ if (firstbyte == REQ_UNSET && -c > ESC_b && -c <= ESC_w)
+ firstbyte = REQ_NONE;
+
+ /* Set values to reset to if this is followed by a zero repeat. */
+
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+
+ /* Back references are handled specially */
+
+ if (-c >= ESC_REF) {
+ int number = -c - ESC_REF;
+ previous = code;
+ *code++ = OP_REF;
+ put2ByteValueAndAdvance(code, number);
+ }
+
+ /* For the rest, we can obtain the OP value by negating the escape
+ value */
+
+ else {
+ previous = (-c > ESC_b && -c <= ESC_w) ? code : NULL;
+ *code++ = -c;
+ }
+ continue;
+ }
+
+ /* Fall through. */
+
+ /* Handle a literal character. It is guaranteed not to be whitespace or #
+ when the extended flag is set. If we are in UTF-8 mode, it may be a
+ multi-byte literal character. */
+
+ default:
+ NORMAL_CHAR:
+
+ previous = code;
+
+ if (c < 128) {
+ mclength = 1;
+ mcbuffer[0] = c;
+
+ if ((options & IgnoreCaseOption) && (c | 0x20) >= 'a' && (c | 0x20) <= 'z') {
+ *code++ = OP_ASCII_LETTER_IGNORING_CASE;
+ *code++ = c | 0x20;
+ } else {
+ *code++ = OP_ASCII_CHAR;
+ *code++ = c;
+ }
+ } else {
+ mclength = encodeUTF8(c, mcbuffer);
+
+ *code++ = (options & IgnoreCaseOption) ? OP_CHAR_IGNORING_CASE : OP_CHAR;
+ for (c = 0; c < mclength; c++)
+ *code++ = mcbuffer[c];
+ }
+
+ /* Set the first and required bytes appropriately. If no previous first
+ byte, set it from this character, but revert to none on a zero repeat.
+ Otherwise, leave the firstbyte value alone, and don't change it on a zero
+ repeat. */
+
+ if (firstbyte == REQ_UNSET) {
+ zerofirstbyte = REQ_NONE;
+ zeroreqbyte = reqbyte;
+
+ /* If the character is more than one byte long, we can set firstbyte
+ only if it is not to be matched caselessly. */
+
+ if (mclength == 1 || req_caseopt == 0) {
+ firstbyte = mcbuffer[0] | req_caseopt;
+ if (mclength != 1)
+ reqbyte = code[-1] | cd.req_varyopt;
+ }
+ else
+ firstbyte = reqbyte = REQ_NONE;
+ }
+
+ /* firstbyte was previously set; we can set reqbyte only the length is
+ 1 or the matching is caseful. */
+
+ else {
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+ if (mclength == 1 || req_caseopt == 0)
+ reqbyte = code[-1] | req_caseopt | cd.req_varyopt;
+ }
+
+ break; /* End of literal character handling */
+ }
+ } /* end of big loop */
+
+ /* Control never reaches here by falling through, only by a goto for all the
+ error states. Pass back the position in the pattern so that it can be displayed
+ to the user for diagnosing the error. */
+
+FAILED:
+ *ptrptr = ptr;
+ return false;
+}
+
+/*************************************************
+* Compile sequence of alternatives *
+*************************************************/
+
+/* On entry, ptr is pointing past the bracket character, but on return
+it points to the closing bracket, or vertical bar, or end of string.
+The code variable is pointing at the byte into which the BRA operator has been
+stored. If the ims options are changed at the start (for a (?ims: group) or
+during any branch, we need to insert an OP_OPT item at the start of every
+following branch to ensure they get set correctly at run time, and also pass
+the new options into every subsequent branch compile.
+
+Argument:
+ options option bits, including any changes for this subpattern
+ brackets -> int containing the number of extracting brackets used
+ codeptr -> the address of the current code pointer
+ ptrptr -> the address of the current pattern pointer
+ errorcodeptr -> pointer to error code variable
+ skipbytes skip this many bytes at start (for OP_BRANUMBER)
+ firstbyteptr place to put the first required character, or a negative number
+ reqbyteptr place to put the last required character, or a negative number
+ cd points to the data block with tables pointers etc.
+
+Returns: true on success
+*/
+
+static bool
+compileBracket(int options, int* brackets, unsigned char** codeptr,
+ const UChar** ptrptr, const UChar* patternEnd, ErrorCode* errorcodeptr, int skipbytes,
+ int* firstbyteptr, int* reqbyteptr, CompileData& cd)
+{
+ const UChar* ptr = *ptrptr;
+ unsigned char* code = *codeptr;
+ unsigned char* last_branch = code;
+ unsigned char* start_bracket = code;
+ int firstbyte = REQ_UNSET;
+ int reqbyte = REQ_UNSET;
+
+ /* Offset is set zero to mark that this bracket is still open */
+
+ putLinkValueAllowZero(code + 1, 0);
+ code += 1 + LINK_SIZE + skipbytes;
+
+ /* Loop for each alternative branch */
+
+ while (true) {
+ /* Now compile the branch */
+
+ int branchfirstbyte;
+ int branchreqbyte;
+ if (!compileBranch(options, brackets, &code, &ptr, patternEnd, errorcodeptr,
+ &branchfirstbyte, &branchreqbyte, cd)) {
+ *ptrptr = ptr;
+ return false;
+ }
+
+ /* If this is the first branch, the firstbyte and reqbyte values for the
+ branch become the values for the regex. */
+
+ if (*last_branch != OP_ALT) {
+ firstbyte = branchfirstbyte;
+ reqbyte = branchreqbyte;
+ }
+
+ /* If this is not the first branch, the first char and reqbyte have to
+ match the values from all the previous branches, except that if the previous
+ value for reqbyte didn't have REQ_VARY set, it can still match, and we set
+ REQ_VARY for the regex. */
+
+ else {
+ /* If we previously had a firstbyte, but it doesn't match the new branch,
+ we have to abandon the firstbyte for the regex, but if there was previously
+ no reqbyte, it takes on the value of the old firstbyte. */
+
+ if (firstbyte >= 0 && firstbyte != branchfirstbyte) {
+ if (reqbyte < 0)
+ reqbyte = firstbyte;
+ firstbyte = REQ_NONE;
+ }
+
+ /* If we (now or from before) have no firstbyte, a firstbyte from the
+ branch becomes a reqbyte if there isn't a branch reqbyte. */
+
+ if (firstbyte < 0 && branchfirstbyte >= 0 && branchreqbyte < 0)
+ branchreqbyte = branchfirstbyte;
+
+ /* Now ensure that the reqbytes match */
+
+ if ((reqbyte & ~REQ_VARY) != (branchreqbyte & ~REQ_VARY))
+ reqbyte = REQ_NONE;
+ else
+ reqbyte |= branchreqbyte; /* To "or" REQ_VARY */
+ }
+
+ /* Reached end of expression, either ')' or end of pattern. Go back through
+ the alternative branches and reverse the chain of offsets, with the field in
+ the BRA item now becoming an offset to the first alternative. If there are
+ no alternatives, it points to the end of the group. The length in the
+ terminating ket is always the length of the whole bracketed item. If any of
+ the ims options were changed inside the group, compile a resetting op-code
+ following, except at the very end of the pattern. Return leaving the pointer
+ at the terminating char. */
+
+ if (ptr >= patternEnd || *ptr != '|') {
+ int length = code - last_branch;
+ do {
+ int prev_length = getLinkValueAllowZero(last_branch + 1);
+ putLinkValue(last_branch + 1, length);
+ length = prev_length;
+ last_branch -= length;
+ } while (length > 0);
+
+ /* Fill in the ket */
+
+ *code = OP_KET;
+ putLinkValue(code + 1, code - start_bracket);
+ code += 1 + LINK_SIZE;
+
+ /* Set values to pass back */
+
+ *codeptr = code;
+ *ptrptr = ptr;
+ *firstbyteptr = firstbyte;
+ *reqbyteptr = reqbyte;
+ return true;
+ }
+
+ /* Another branch follows; insert an "or" node. Its length field points back
+ to the previous branch while the bracket remains open. At the end the chain
+ is reversed. It's done like this so that the start of the bracket has a
+ zero offset until it is closed, making it possible to detect recursion. */
+
+ *code = OP_ALT;
+ putLinkValue(code + 1, code - last_branch);
+ last_branch = code;
+ code += 1 + LINK_SIZE;
+ ptr++;
+ }
+ ASSERT_NOT_REACHED();
+}
+
+/*************************************************
+* Check for anchored expression *
+*************************************************/
+
+/* Try to find out if this is an anchored regular expression. Consider each
+alternative branch. If they all start OP_CIRC, or with a bracket
+all of whose alternatives start OP_CIRC (recurse ad lib), then
+it's anchored.
+
+Arguments:
+ code points to start of expression (the bracket)
+ captureMap a bitmap of which brackets we are inside while testing; this
+ handles up to substring 31; all brackets after that share
+ the zero bit
+ backrefMap the back reference bitmap
+*/
+
+static bool branchIsAnchored(const unsigned char* code)
+{
+ const unsigned char* scode = firstSignificantOpcode(code);
+ int op = *scode;
+
+ /* Brackets */
+ if (op >= OP_BRA || op == OP_ASSERT)
+ return bracketIsAnchored(scode);
+
+ /* Check for explicit anchoring */
+ return op == OP_CIRC;
+}
+
+static bool bracketIsAnchored(const unsigned char* code)
+{
+ do {
+ if (!branchIsAnchored(code + 1 + LINK_SIZE))
+ return false;
+ code += getLinkValue(code + 1);
+ } while (*code == OP_ALT); /* Loop for each alternative */
+ return true;
+}
+
+/*************************************************
+* Check for starting with ^ or .* *
+*************************************************/
+
+/* This is called to find out if every branch starts with ^ or .* so that
+"first char" processing can be done to speed things up in multiline
+matching and for non-DOTALL patterns that start with .* (which must start at
+the beginning or after \n)
+
+Except when the .* appears inside capturing parentheses, and there is a
+subsequent back reference to those parentheses. By keeping a bitmap of the
+first 31 back references, we can catch some of the more common cases more
+precisely; all the greater back references share a single bit.
+
+Arguments:
+ code points to start of expression (the bracket)
+ captureMap a bitmap of which brackets we are inside while testing; this
+ handles up to substring 31; all brackets after that share
+ the zero bit
+ backrefMap the back reference bitmap
+*/
+
+static bool branchNeedsLineStart(const unsigned char* code, unsigned captureMap, unsigned backrefMap)
+{
+ const unsigned char* scode = firstSignificantOpcode(code);
+ int op = *scode;
+
+ /* Capturing brackets */
+ if (op > OP_BRA) {
+ int captureNum = op - OP_BRA;
+ if (captureNum > EXTRACT_BASIC_MAX)
+ captureNum = get2ByteValue(scode + 2 + LINK_SIZE);
+ int bracketMask = (captureNum < 32) ? (1 << captureNum) : 1;
+ return bracketNeedsLineStart(scode, captureMap | bracketMask, backrefMap);
+ }
+
+ /* Other brackets */
+ if (op == OP_BRA || op == OP_ASSERT)
+ return bracketNeedsLineStart(scode, captureMap, backrefMap);
+
+ /* .* means "start at start or after \n" if it isn't in brackets that
+ may be referenced. */
+
+ if (op == OP_TYPESTAR || op == OP_TYPEMINSTAR)
+ return scode[1] == OP_NOT_NEWLINE && !(captureMap & backrefMap);
+
+ /* Explicit ^ */
+ return op == OP_CIRC || op == OP_BOL;
+}
+
+static bool bracketNeedsLineStart(const unsigned char* code, unsigned captureMap, unsigned backrefMap)
+{
+ do {
+ if (!branchNeedsLineStart(code + 1 + LINK_SIZE, captureMap, backrefMap))
+ return false;
+ code += getLinkValue(code + 1);
+ } while (*code == OP_ALT); /* Loop for each alternative */
+ return true;
+}
+
+/*************************************************
+* Check for asserted fixed first char *
+*************************************************/
+
+/* During compilation, the "first char" settings from forward assertions are
+discarded, because they can cause conflicts with actual literals that follow.
+However, if we end up without a first char setting for an unanchored pattern,
+it is worth scanning the regex to see if there is an initial asserted first
+char. If all branches start with the same asserted char, or with a bracket all
+of whose alternatives start with the same asserted char (recurse ad lib), then
+we return that char, otherwise -1.
+
+Arguments:
+ code points to start of expression (the bracket)
+ options pointer to the options (used to check casing changes)
+ inassert true if in an assertion
+
+Returns: -1 or the fixed first char
+*/
+
+static int branchFindFirstAssertedCharacter(const unsigned char* code, bool inassert)
+{
+ const unsigned char* scode = firstSignificantOpcodeSkippingAssertions(code);
+ int op = *scode;
+
+ if (op >= OP_BRA)
+ op = OP_BRA;
+
+ switch (op) {
+ default:
+ return -1;
+
+ case OP_BRA:
+ case OP_ASSERT:
+ return bracketFindFirstAssertedCharacter(scode, op == OP_ASSERT);
+
+ case OP_EXACT:
+ scode += 2;
+ /* Fall through */
+
+ case OP_CHAR:
+ case OP_CHAR_IGNORING_CASE:
+ case OP_ASCII_CHAR:
+ case OP_ASCII_LETTER_IGNORING_CASE:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ if (!inassert)
+ return -1;
+ return scode[1];
+ }
+}
+
+static int bracketFindFirstAssertedCharacter(const unsigned char* code, bool inassert)
+{
+ int c = -1;
+ do {
+ int d = branchFindFirstAssertedCharacter(code + 1 + LINK_SIZE, inassert);
+ if (d < 0)
+ return -1;
+ if (c < 0)
+ c = d;
+ else if (c != d)
+ return -1;
+ code += getLinkValue(code + 1);
+ } while (*code == OP_ALT);
+ return c;
+}
+
+static inline int multiplyWithOverflowCheck(int a, int b)
+{
+ if (!a || !b)
+ return 0;
+ if (a > MAX_PATTERN_SIZE / b)
+ return -1;
+ return a * b;
+}
+
+static int calculateCompiledPatternLength(const UChar* pattern, int patternLength, JSRegExpIgnoreCaseOption ignoreCase,
+ CompileData& cd, ErrorCode& errorcode)
+{
+ /* Make a pass over the pattern to compute the
+ amount of store required to hold the compiled code. This does not have to be
+ perfect as long as errors are overestimates. */
+
+ if (patternLength > MAX_PATTERN_SIZE) {
+ errorcode = ERR16;
+ return -1;
+ }
+
+ int length = 1 + LINK_SIZE; /* For initial BRA plus length */
+ int branch_extra = 0;
+ int lastitemlength = 0;
+ unsigned brastackptr = 0;
+ int brastack[BRASTACK_SIZE];
+ unsigned char bralenstack[BRASTACK_SIZE];
+ int bracount = 0;
+
+ const UChar* ptr = (const UChar*)(pattern - 1);
+ const UChar* patternEnd = (const UChar*)(pattern + patternLength);
+
+ while (++ptr < patternEnd) {
+ int minRepeats = 0, maxRepeats = 0;
+ int c = *ptr;
+
+ switch (c) {
+ /* A backslashed item may be an escaped data character or it may be a
+ character type. */
+
+ case '\\':
+ c = checkEscape(&ptr, patternEnd, &errorcode, cd.numCapturingBrackets, false);
+ if (errorcode != 0)
+ return -1;
+
+ lastitemlength = 1; /* Default length of last item for repeats */
+
+ if (c >= 0) { /* Data character */
+ length += 2; /* For a one-byte character */
+
+ if (c > 127) {
+ int i;
+ for (i = 0; i < kjs_pcre_utf8_table1_size; i++)
+ if (c <= kjs_pcre_utf8_table1[i]) break;
+ length += i;
+ lastitemlength += i;
+ }
+
+ continue;
+ }
+
+ /* Other escapes need one byte */
+
+ length++;
+
+ /* A back reference needs an additional 2 bytes, plus either one or 5
+ bytes for a repeat. We also need to keep the value of the highest
+ back reference. */
+
+ if (c <= -ESC_REF) {
+ int refnum = -c - ESC_REF;
+ cd.backrefMap |= (refnum < 32) ? (1 << refnum) : 1;
+ if (refnum > cd.top_backref)
+ cd.top_backref = refnum;
+ length += 2; /* For single back reference */
+ if (safelyCheckNextChar(ptr, patternEnd, '{') && isCountedRepeat(ptr + 2, patternEnd)) {
+ ptr = readRepeatCounts(ptr + 2, &minRepeats, &maxRepeats, &errorcode);
+ if (errorcode)
+ return -1;
+ if ((minRepeats == 0 && (maxRepeats == 1 || maxRepeats == -1)) ||
+ (minRepeats == 1 && maxRepeats == -1))
+ length++;
+ else
+ length += 5;
+ if (safelyCheckNextChar(ptr, patternEnd, '?'))
+ ptr++;
+ }
+ }
+ continue;
+
+ case '^': /* Single-byte metacharacters */
+ case '.':
+ case '$':
+ length++;
+ lastitemlength = 1;
+ continue;
+
+ case '*': /* These repeats won't be after brackets; */
+ case '+': /* those are handled separately */
+ case '?':
+ length++;
+ goto POSSESSIVE;
+
+ /* This covers the cases of braced repeats after a single char, metachar,
+ class, or back reference. */
+
+ case '{':
+ if (!isCountedRepeat(ptr + 1, patternEnd))
+ goto NORMAL_CHAR;
+ ptr = readRepeatCounts(ptr + 1, &minRepeats, &maxRepeats, &errorcode);
+ if (errorcode != 0)
+ return -1;
+
+ /* These special cases just insert one extra opcode */
+
+ if ((minRepeats == 0 && (maxRepeats == 1 || maxRepeats == -1)) ||
+ (minRepeats == 1 && maxRepeats == -1))
+ length++;
+
+ /* These cases might insert additional copies of a preceding character. */
+
+ else {
+ if (minRepeats != 1) {
+ length -= lastitemlength; /* Uncount the original char or metachar */
+ if (minRepeats > 0)
+ length += 3 + lastitemlength;
+ }
+ length += lastitemlength + ((maxRepeats > 0) ? 3 : 1);
+ }
+
+ if (safelyCheckNextChar(ptr, patternEnd, '?'))
+ ptr++; /* Needs no extra length */
+
+ POSSESSIVE: /* Test for possessive quantifier */
+ if (safelyCheckNextChar(ptr, patternEnd, '+')) {
+ ptr++;
+ length += 2 + 2 * LINK_SIZE; /* Allow for atomic brackets */
+ }
+ continue;
+
+ /* An alternation contains an offset to the next branch or ket. If any ims
+ options changed in the previous branch(es), and/or if we are in a
+ lookbehind assertion, extra space will be needed at the start of the
+ branch. This is handled by branch_extra. */
+
+ case '|':
+ if (brastackptr == 0)
+ cd.needOuterBracket = true;
+ length += 1 + LINK_SIZE + branch_extra;
+ continue;
+
+ /* A character class uses 33 characters provided that all the character
+ values are less than 256. Otherwise, it uses a bit map for low valued
+ characters, and individual items for others. Don't worry about character
+ types that aren't allowed in classes - they'll get picked up during the
+ compile. A character class that contains only one single-byte character
+ uses 2 or 3 bytes, depending on whether it is negated or not. Notice this
+ where we can. (In UTF-8 mode we can do this only for chars < 128.) */
+
+ case '[': {
+ int class_optcount;
+ if (*(++ptr) == '^') {
+ class_optcount = 10; /* Greater than one */
+ ptr++;
+ }
+ else
+ class_optcount = 0;
+
+ bool class_utf8 = false;
+
+ for (; ptr < patternEnd && *ptr != ']'; ++ptr) {
+ /* Check for escapes */
+
+ if (*ptr == '\\') {
+ c = checkEscape(&ptr, patternEnd, &errorcode, cd.numCapturingBrackets, true);
+ if (errorcode != 0)
+ return -1;
+
+ /* Handle escapes that turn into characters */
+
+ if (c >= 0)
+ goto NON_SPECIAL_CHARACTER;
+
+ /* Escapes that are meta-things. The normal ones just affect the
+ bit map, but Unicode properties require an XCLASS extended item. */
+
+ else
+ class_optcount = 10; /* \d, \s etc; make sure > 1 */
+ }
+
+ /* Anything else increments the possible optimization count. We have to
+ detect ranges here so that we can compute the number of extra ranges for
+ caseless wide characters when UCP support is available. If there are wide
+ characters, we are going to have to use an XCLASS, even for single
+ characters. */
+
+ else {
+ c = *ptr;
+
+ /* Come here from handling \ above when it escapes to a char value */
+
+ NON_SPECIAL_CHARACTER:
+ class_optcount++;
+
+ int d = -1;
+ if (safelyCheckNextChar(ptr, patternEnd, '-')) {
+ UChar const *hyptr = ptr++;
+ if (safelyCheckNextChar(ptr, patternEnd, '\\')) {
+ ptr++;
+ d = checkEscape(&ptr, patternEnd, &errorcode, cd.numCapturingBrackets, true);
+ if (errorcode != 0)
+ return -1;
+ }
+ else if ((ptr + 1 < patternEnd) && ptr[1] != ']')
+ d = *++ptr;
+ if (d < 0)
+ ptr = hyptr; /* go back to hyphen as data */
+ }
+
+ /* If d >= 0 we have a range. In UTF-8 mode, if the end is > 255, or >
+ 127 for caseless matching, we will need to use an XCLASS. */
+
+ if (d >= 0) {
+ class_optcount = 10; /* Ensure > 1 */
+ if (d < c) {
+ errorcode = ERR8;
+ return -1;
+ }
+
+ if ((d > 255 || (ignoreCase && d > 127))) {
+ unsigned char buffer[6];
+ if (!class_utf8) /* Allow for XCLASS overhead */
+ {
+ class_utf8 = true;
+ length += LINK_SIZE + 2;
+ }
+
+ /* If we have UCP support, find out how many extra ranges are
+ needed to map the other case of characters within this range. We
+ have to mimic the range optimization here, because extending the
+ range upwards might push d over a boundary that makes it use
+ another byte in the UTF-8 representation. */
+
+ if (ignoreCase) {
+ int occ, ocd;
+ int cc = c;
+ int origd = d;
+ while (getOthercaseRange(&cc, origd, &occ, &ocd)) {
+ if (occ >= c && ocd <= d)
+ continue; /* Skip embedded */
+
+ if (occ < c && ocd >= c - 1) /* Extend the basic range */
+ { /* if there is overlap, */
+ c = occ; /* noting that if occ < c */
+ continue; /* we can't have ocd > d */
+ } /* because a subrange is */
+ if (ocd > d && occ <= d + 1) /* always shorter than */
+ { /* the basic range. */
+ d = ocd;
+ continue;
+ }
+
+ /* An extra item is needed */
+
+ length += 1 + encodeUTF8(occ, buffer) +
+ ((occ == ocd) ? 0 : encodeUTF8(ocd, buffer));
+ }
+ }
+
+ /* The length of the (possibly extended) range */
+
+ length += 1 + encodeUTF8(c, buffer) + encodeUTF8(d, buffer);
+ }
+
+ }
+
+ /* We have a single character. There is nothing to be done unless we
+ are in UTF-8 mode. If the char is > 255, or 127 when caseless, we must
+ allow for an XCL_SINGLE item, doubled for caselessness if there is UCP
+ support. */
+
+ else {
+ if ((c > 255 || (ignoreCase && c > 127))) {
+ unsigned char buffer[6];
+ class_optcount = 10; /* Ensure > 1 */
+ if (!class_utf8) /* Allow for XCLASS overhead */
+ {
+ class_utf8 = true;
+ length += LINK_SIZE + 2;
+ }
+ length += (ignoreCase ? 2 : 1) * (1 + encodeUTF8(c, buffer));
+ }
+ }
+ }
+ }
+
+ if (ptr >= patternEnd) { /* Missing terminating ']' */
+ errorcode = ERR6;
+ return -1;
+ }
+
+ /* We can optimize when there was only one optimizable character.
+ Note that this does not detect the case of a negated single character.
+ In that case we do an incorrect length computation, but it's not a serious
+ problem because the computed length is too large rather than too small. */
+
+ if (class_optcount == 1)
+ goto NORMAL_CHAR;
+
+ /* Here, we handle repeats for the class opcodes. */
+ {
+ length += 33;
+
+ /* A repeat needs either 1 or 5 bytes. If it is a possessive quantifier,
+ we also need extra for wrapping the whole thing in a sub-pattern. */
+
+ if (safelyCheckNextChar(ptr, patternEnd, '{') && isCountedRepeat(ptr + 2, patternEnd)) {
+ ptr = readRepeatCounts(ptr + 2, &minRepeats, &maxRepeats, &errorcode);
+ if (errorcode != 0)
+ return -1;
+ if ((minRepeats == 0 && (maxRepeats == 1 || maxRepeats == -1)) ||
+ (minRepeats == 1 && maxRepeats == -1))
+ length++;
+ else
+ length += 5;
+ if (safelyCheckNextChar(ptr, patternEnd, '+')) {
+ ptr++;
+ length += 2 + 2 * LINK_SIZE;
+ } else if (safelyCheckNextChar(ptr, patternEnd, '?'))
+ ptr++;
+ }
+ }
+ continue;
+ }
+
+ /* Brackets may be genuine groups or special things */
+
+ case '(': {
+ int branch_newextra = 0;
+ int bracket_length = 1 + LINK_SIZE;
+ bool capturing = false;
+
+ /* Handle special forms of bracket, which all start (? */
+
+ if (safelyCheckNextChar(ptr, patternEnd, '?')) {
+ switch (c = (ptr + 2 < patternEnd ? ptr[2] : 0)) {
+ /* Non-referencing groups and lookaheads just move the pointer on, and
+ then behave like a non-special bracket, except that they don't increment
+ the count of extracting brackets. Ditto for the "once only" bracket,
+ which is in Perl from version 5.005. */
+
+ case ':':
+ case '=':
+ case '!':
+ ptr += 2;
+ break;
+
+ /* Else loop checking valid options until ) is met. Anything else is an
+ error. If we are without any brackets, i.e. at top level, the settings
+ act as if specified in the options, so massage the options immediately.
+ This is for backward compatibility with Perl 5.004. */
+
+ default:
+ errorcode = ERR12;
+ return -1;
+ }
+ } else
+ capturing = 1;
+
+ /* Capturing brackets must be counted so we can process escapes in a
+ Perlish way. If the number exceeds EXTRACT_BASIC_MAX we are going to need
+ an additional 3 bytes of memory per capturing bracket. */
+
+ if (capturing) {
+ bracount++;
+ if (bracount > EXTRACT_BASIC_MAX)
+ bracket_length += 3;
+ }
+
+ /* Save length for computing whole length at end if there's a repeat that
+ requires duplication of the group. Also save the current value of
+ branch_extra, and start the new group with the new value. If non-zero, this
+ will either be 2 for a (?imsx: group, or 3 for a lookbehind assertion. */
+
+ if (brastackptr >= sizeof(brastack)/sizeof(int)) {
+ errorcode = ERR17;
+ return -1;
+ }
+
+ bralenstack[brastackptr] = branch_extra;
+ branch_extra = branch_newextra;
+
+ brastack[brastackptr++] = length;
+ length += bracket_length;
+ continue;
+ }
+
+ /* Handle ket. Look for subsequent maxRepeats/minRepeats; for certain sets of values we
+ have to replicate this bracket up to that many times. If brastackptr is
+ 0 this is an unmatched bracket which will generate an error, but take care
+ not to try to access brastack[-1] when computing the length and restoring
+ the branch_extra value. */
+
+ case ')': {
+ int duplength;
+ length += 1 + LINK_SIZE;
+ if (brastackptr > 0) {
+ duplength = length - brastack[--brastackptr];
+ branch_extra = bralenstack[brastackptr];
+ }
+ else
+ duplength = 0;
+
+ /* Leave ptr at the final char; for readRepeatCounts this happens
+ automatically; for the others we need an increment. */
+
+ if ((ptr + 1 < patternEnd) && (c = ptr[1]) == '{' && isCountedRepeat(ptr + 2, patternEnd)) {
+ ptr = readRepeatCounts(ptr + 2, &minRepeats, &maxRepeats, &errorcode);
+ if (errorcode)
+ return -1;
+ } else if (c == '*') {
+ minRepeats = 0;
+ maxRepeats = -1;
+ ptr++;
+ } else if (c == '+') {
+ minRepeats = 1;
+ maxRepeats = -1;
+ ptr++;
+ } else if (c == '?') {
+ minRepeats = 0;
+ maxRepeats = 1;
+ ptr++;
+ } else {
+ minRepeats = 1;
+ maxRepeats = 1;
+ }
+
+ /* If the minimum is zero, we have to allow for an OP_BRAZERO before the
+ group, and if the maximum is greater than zero, we have to replicate
+ maxval-1 times; each replication acquires an OP_BRAZERO plus a nesting
+ bracket set. */
+
+ int repeatsLength;
+ if (minRepeats == 0) {
+ length++;
+ if (maxRepeats > 0) {
+ repeatsLength = multiplyWithOverflowCheck(maxRepeats - 1, duplength + 3 + 2 * LINK_SIZE);
+ if (repeatsLength < 0) {
+ errorcode = ERR16;
+ return -1;
+ }
+ length += repeatsLength;
+ if (length > MAX_PATTERN_SIZE) {
+ errorcode = ERR16;
+ return -1;
+ }
+ }
+ }
+
+ /* When the minimum is greater than zero, we have to replicate up to
+ minval-1 times, with no additions required in the copies. Then, if there
+ is a limited maximum we have to replicate up to maxval-1 times allowing
+ for a BRAZERO item before each optional copy and nesting brackets for all
+ but one of the optional copies. */
+
+ else {
+ repeatsLength = multiplyWithOverflowCheck(minRepeats - 1, duplength);
+ if (repeatsLength < 0) {
+ errorcode = ERR16;
+ return -1;
+ }
+ length += repeatsLength;
+ if (maxRepeats > minRepeats) { /* Need this test as maxRepeats=-1 means no limit */
+ repeatsLength = multiplyWithOverflowCheck(maxRepeats - minRepeats, duplength + 3 + 2 * LINK_SIZE);
+ if (repeatsLength < 0) {
+ errorcode = ERR16;
+ return -1;
+ }
+ length += repeatsLength - (2 + 2 * LINK_SIZE);
+ }
+ if (length > MAX_PATTERN_SIZE) {
+ errorcode = ERR16;
+ return -1;
+ }
+ }
+
+ /* Allow space for once brackets for "possessive quantifier" */
+
+ if (safelyCheckNextChar(ptr, patternEnd, '+')) {
+ ptr++;
+ length += 2 + 2 * LINK_SIZE;
+ }
+ continue;
+ }
+
+ /* Non-special character. It won't be space or # in extended mode, so it is
+ always a genuine character. If we are in a \Q...\E sequence, check for the
+ end; if not, we have a literal. */
+
+ default:
+ NORMAL_CHAR:
+ length += 2; /* For a one-byte character */
+ lastitemlength = 1; /* Default length of last item for repeats */
+
+ if (c > 127) {
+ int i;
+ for (i = 0; i < kjs_pcre_utf8_table1_size; i++)
+ if (c <= kjs_pcre_utf8_table1[i])
+ break;
+ length += i;
+ lastitemlength += i;
+ }
+
+ continue;
+ }
+ }
+
+ length += 2 + LINK_SIZE; /* For final KET and END */
+
+ cd.numCapturingBrackets = bracount;
+ return length;
+}
+
+/*************************************************
+* Compile a Regular Expression *
+*************************************************/
+
+/* This function takes a string and returns a pointer to a block of store
+holding a compiled version of the expression. The original API for this
+function had no error code return variable; it is retained for backwards
+compatibility. The new function is given a new name.
+
+Arguments:
+ pattern the regular expression
+ options various option bits
+ errorcodeptr pointer to error code variable (pcre_compile2() only)
+ can be NULL if you don't want a code value
+ errorptr pointer to pointer to error text
+ erroroffset ptr offset in pattern where error was detected
+ tables pointer to character tables or NULL
+
+Returns: pointer to compiled data block, or NULL on error,
+ with errorptr and erroroffset set
+*/
+
+static inline JSRegExp* returnError(ErrorCode errorcode, const char** errorptr)
+{
+ *errorptr = errorText(errorcode);
+ return 0;
+}
+
+JSRegExp* jsRegExpCompile(const UChar* pattern, int patternLength,
+ JSRegExpIgnoreCaseOption ignoreCase, JSRegExpMultilineOption multiline,
+ unsigned* numSubpatterns, const char** errorptr,
+ malloc_t* allocate_function, free_t* free_function)
+{
+ /* We can't pass back an error message if errorptr is NULL; I guess the best we
+ can do is just return NULL, but we can set a code value if there is a code pointer. */
+ if (!errorptr)
+ return 0;
+ *errorptr = NULL;
+
+ CompileData cd;
+
+ ErrorCode errorcode = ERR0;
+ /* Call this once just to count the brackets. */
+ calculateCompiledPatternLength(pattern, patternLength, ignoreCase, cd, errorcode);
+ /* Call it again to compute the length. */
+ int length = calculateCompiledPatternLength(pattern, patternLength, ignoreCase, cd, errorcode);
+ if (errorcode)
+ return returnError(errorcode, errorptr);
+
+ if (length > MAX_PATTERN_SIZE)
+ return returnError(ERR16, errorptr);
+
+ size_t size = length + sizeof(JSRegExp);
+ JSRegExp* re = reinterpret_cast<JSRegExp*>((*allocate_function)(size));
+
+ if (!re)
+ return returnError(ERR13, errorptr);
+
+ re->options = (ignoreCase ? IgnoreCaseOption : 0) | (multiline ? MatchAcrossMultipleLinesOption : 0);
+
+ /* The starting points of the name/number translation table and of the code are
+ passed around in the compile data block. */
+
+ const unsigned char* codeStart = (const unsigned char*)(re + 1);
+
+ /* Set up a starting, non-extracting bracket, then compile the expression. On
+ error, errorcode will be set non-zero, so we don't need to look at the result
+ of the function here. */
+
+ const UChar* ptr = (const UChar*)pattern;
+ const UChar* patternEnd = pattern + patternLength;
+ unsigned char* code = (unsigned char*)codeStart;
+ int firstbyte, reqbyte;
+ int bracketCount = 0;
+ if (!cd.needOuterBracket)
+ compileBranch(re->options, &bracketCount, &code, &ptr, patternEnd, &errorcode, &firstbyte, &reqbyte, cd);
+ else {
+ *code = OP_BRA;
+ compileBracket(re->options, &bracketCount, &code, &ptr, patternEnd, &errorcode, 0, &firstbyte, &reqbyte, cd);
+ }
+ re->top_bracket = bracketCount;
+ re->top_backref = cd.top_backref;
+
+ /* If not reached end of pattern on success, there's an excess bracket. */
+
+ if (errorcode == 0 && ptr < patternEnd)
+ errorcode = ERR10;
+
+ /* Fill in the terminating state and check for disastrous overflow, but
+ if debugging, leave the test till after things are printed out. */
+
+ *code++ = OP_END;
+
+ ASSERT(code - codeStart <= length);
+ if (code - codeStart > length)
+ errorcode = ERR7;
+
+ /* Give an error if there's back reference to a non-existent capturing
+ subpattern. */
+
+ if (re->top_backref > re->top_bracket)
+ errorcode = ERR15;
+
+ /* Failed to compile, or error while post-processing */
+
+ if (errorcode != ERR0) {
+ (*free_function)(reinterpret_cast<void*>(re));
+ return returnError(errorcode, errorptr);
+ }
+
+ /* If the anchored option was not passed, set the flag if we can determine that
+ the pattern is anchored by virtue of ^ characters or \A or anything else (such
+ as starting with .* when DOTALL is set).
+
+ Otherwise, if we know what the first character has to be, save it, because that
+ speeds up unanchored matches no end. If not, see if we can set the
+ UseMultiLineFirstByteOptimizationOption flag. This is helpful for multiline matches when all branches
+ start with ^. and also when all branches start with .* for non-DOTALL matches.
+ */
+
+ if (cd.needOuterBracket ? bracketIsAnchored(codeStart) : branchIsAnchored(codeStart))
+ re->options |= IsAnchoredOption;
+ else {
+ if (firstbyte < 0) {
+ firstbyte = (cd.needOuterBracket
+ ? bracketFindFirstAssertedCharacter(codeStart, false)
+ : branchFindFirstAssertedCharacter(codeStart, false))
+ | ((re->options & IgnoreCaseOption) ? REQ_IGNORE_CASE : 0);
+ }
+ if (firstbyte >= 0) {
+ int ch = firstbyte & 255;
+ if (ch < 127) {
+ re->first_byte = ((firstbyte & REQ_IGNORE_CASE) && flipCase(ch) == ch) ? ch : firstbyte;
+ re->options |= UseFirstByteOptimizationOption;
+ }
+ } else {
+ if (cd.needOuterBracket ? bracketNeedsLineStart(codeStart, 0, cd.backrefMap) : branchNeedsLineStart(codeStart, 0, cd.backrefMap))
+ re->options |= UseMultiLineFirstByteOptimizationOption;
+ }
+ }
+
+ /* For an anchored pattern, we use the "required byte" only if it follows a
+ variable length item in the regex. Remove the caseless flag for non-caseable
+ bytes. */
+
+ if (reqbyte >= 0 && (!(re->options & IsAnchoredOption) || (reqbyte & REQ_VARY))) {
+ int ch = reqbyte & 255;
+ if (ch < 127) {
+ re->req_byte = ((reqbyte & REQ_IGNORE_CASE) && flipCase(ch) == ch) ? (reqbyte & ~REQ_IGNORE_CASE) : reqbyte;
+ re->options |= UseRequiredByteOptimizationOption;
+ }
+ }
+
+ if (numSubpatterns)
+ *numSubpatterns = re->top_bracket;
+ return re;
+}
+
+void jsRegExpFree(JSRegExp* re, free_t* free_function)
+{
+ (*free_function)(reinterpret_cast<void*>(re));
+}
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+ Copyright (C) 2007 Eric Seidel <eric@webkit.org>
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This module contains jsRegExpExecute(), the externally visible function
+that does pattern matching using an NFA algorithm, following the rules from
+the JavaScript specification. There are also some supporting functions. */
+
+#include "config.h"
+
+#include "pcre_internal.h"
+
+#include "ASCIICType.h"
+
+#include <ctype.h>
+#include <limits.h>
+#include <string.h> /* for memcpy */
+
+#ifdef __GNUC__
+#define USE_COMPUTED_GOTO_FOR_MATCH_RECURSION
+//#define USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+#endif
+
+/* Avoid warnings on Windows. */
+#undef min
+#undef max
+
+#ifndef USE_COMPUTED_GOTO_FOR_MATCH_RECURSION
+typedef int ReturnLocation;
+#else
+typedef void* ReturnLocation;
+#endif
+
+/* Structure for building a chain of data for holding the values of
+the subject pointer at the start of each bracket, used to detect when
+an empty string has been matched by a bracket to break infinite loops. */
+struct BracketChainNode {
+ BracketChainNode* previousBracket;
+ const UChar* bracketStart;
+};
+
+struct MatchFrame {
+ ReturnLocation returnLocation;
+ struct MatchFrame* previousFrame;
+
+ /* Function arguments that may change */
+ struct {
+ const UChar* subjectPtr;
+ const unsigned char* instructionPtr;
+ int offsetTop;
+ BracketChainNode* bracketChain;
+ } args;
+
+
+ /* PCRE uses "fake" recursion built off of gotos, thus
+ stack-based local variables are not safe to use. Instead we have to
+ store local variables on the current MatchFrame. */
+ struct {
+ const unsigned char* data;
+ const unsigned char* startOfRepeatingBracket;
+ const UChar* subjectPtrAtStartOfInstruction; // Several instrutions stash away a subjectPtr here for later compare
+ const unsigned char* instructionPtrAtStartOfOnce;
+
+ int repeatOthercase;
+
+ int ctype;
+ int fc;
+ int fi;
+ int length;
+ int max;
+ int number;
+ int offset;
+ int saveOffset1;
+ int saveOffset2;
+ int saveOffset3;
+
+ BracketChainNode bracketChainNode;
+ } locals;
+};
+
+/* Structure for passing "static" information around between the functions
+doing traditional NFA matching, so that they are thread-safe. */
+
+struct MatchData {
+ int* offsetVector; /* Offset vector */
+ int offsetEnd; /* One past the end */
+ int offsetMax; /* The maximum usable for return data */
+ bool offsetOverflow; /* Set if too many extractions */
+ const UChar* startSubject; /* Start of the subject string */
+ const UChar* endSubject; /* End of the subject string */
+ const UChar* endMatchPtr; /* Subject position at end match */
+ int endOffsetTop; /* Highwater mark at end of match */
+ bool multiline;
+ bool ignoreCase;
+};
+
+/* The maximum remaining length of subject we are prepared to search for a
+req_byte match. */
+
+#define REQ_BYTE_MAX 1000
+
+/* The below limit restricts the number of "recursive" match calls in order to
+avoid spending exponential time on complex regular expressions. */
+
+static const unsigned matchLimit = 100000;
+
+#ifdef DEBUG
+/*************************************************
+* Debugging function to print chars *
+*************************************************/
+
+/* Print a sequence of chars in printable format, stopping at the end of the
+subject if the requested.
+
+Arguments:
+ p points to characters
+ length number to print
+ isSubject true if printing from within md.startSubject
+ md pointer to matching data block, if isSubject is true
+*/
+
+static void pchars(const UChar* p, int length, bool isSubject, const MatchData& md)
+{
+ if (isSubject && length > md.endSubject - p)
+ length = md.endSubject - p;
+ while (length-- > 0) {
+ int c;
+ if (isprint(c = *(p++)))
+ printf("%c", c);
+ else if (c < 256)
+ printf("\\x%02x", c);
+ else
+ printf("\\x{%x}", c);
+ }
+}
+#endif
+
+/*************************************************
+* Match a back-reference *
+*************************************************/
+
+/* If a back reference hasn't been set, the length that is passed is greater
+than the number of characters left in the string, so the match fails.
+
+Arguments:
+ offset index into the offset vector
+ subjectPtr points into the subject
+ length length to be matched
+ md points to match data block
+
+Returns: true if matched
+*/
+
+static bool matchRef(int offset, const UChar* subjectPtr, int length, const MatchData& md)
+{
+ const UChar* p = md.startSubject + md.offsetVector[offset];
+
+#ifdef DEBUG
+ if (subjectPtr >= md.endSubject)
+ printf("matching subject <null>");
+ else {
+ printf("matching subject ");
+ pchars(subjectPtr, length, true, md);
+ }
+ printf(" against backref ");
+ pchars(p, length, false, md);
+ printf("\n");
+#endif
+
+ /* Always fail if not enough characters left */
+
+ if (length > md.endSubject - subjectPtr)
+ return false;
+
+ /* Separate the caselesss case for speed */
+
+ if (md.ignoreCase) {
+ while (length-- > 0) {
+ UChar c = *p++;
+ int othercase = kjs_pcre_ucp_othercase(c);
+ UChar d = *subjectPtr++;
+ if (c != d && othercase != d)
+ return false;
+ }
+ }
+ else {
+ while (length-- > 0)
+ if (*p++ != *subjectPtr++)
+ return false;
+ }
+
+ return true;
+}
+
+#ifndef USE_COMPUTED_GOTO_FOR_MATCH_RECURSION
+
+/* Use numbered labels and switch statement at the bottom of the match function. */
+
+#define RMATCH_WHERE(num) num
+#define RRETURN_LABEL RRETURN_SWITCH
+
+#else
+
+/* Use GCC's computed goto extension. */
+
+/* For one test case this is more than 40% faster than the switch statement.
+We could avoid the use of the num argument entirely by using local labels,
+but using it for the GCC case as well as the non-GCC case allows us to share
+a bit more code and notice if we use conflicting numbers.*/
+
+#define RMATCH_WHERE(num) &&RRETURN_##num
+#define RRETURN_LABEL *stack.currentFrame->returnLocation
+
+#endif
+
+#define RECURSIVE_MATCH_COMMON(num) \
+ goto RECURSE;\
+ RRETURN_##num: \
+ stack.popCurrentFrame();
+
+#define RECURSIVE_MATCH(num, ra, rb) \
+ do { \
+ stack.pushNewFrame((ra), (rb), RMATCH_WHERE(num)); \
+ RECURSIVE_MATCH_COMMON(num) \
+ } while (0)
+
+#define RECURSIVE_MATCH_STARTNG_NEW_GROUP(num, ra, rb) \
+ do { \
+ stack.pushNewFrame((ra), (rb), RMATCH_WHERE(num)); \
+ startNewGroup(stack.currentFrame); \
+ RECURSIVE_MATCH_COMMON(num) \
+ } while (0)
+
+#define RRETURN goto RRETURN_LABEL
+
+#define RRETURN_NO_MATCH do { isMatch = false; RRETURN; } while (0)
+
+/*************************************************
+* Match from current position *
+*************************************************/
+
+/* On entry instructionPtr points to the first opcode, and subjectPtr to the first character
+in the subject string, while substringStart holds the value of subjectPtr at the start of the
+last bracketed group - used for breaking infinite loops matching zero-length
+strings. This function is called recursively in many circumstances. Whenever it
+returns a negative (error) response, the outer match() call must also return the
+same response.
+
+Arguments:
+ subjectPtr pointer in subject
+ instructionPtr position in code
+ offsetTop current top pointer
+ md pointer to "static" info for the match
+
+Returns: 1 if matched ) these values are >= 0
+ 0 if failed to match )
+ a negative error value if aborted by an error condition
+ (e.g. stopped by repeated call or recursion limit)
+*/
+
+static const unsigned FRAMES_ON_STACK = 16;
+
+struct MatchStack {
+ MatchStack()
+ : framesEnd(frames + FRAMES_ON_STACK)
+ , currentFrame(frames)
+ , size(1) // match() creates accesses the first frame w/o calling pushNewFrame
+ {
+ ASSERT((sizeof(frames) / sizeof(frames[0])) == FRAMES_ON_STACK);
+ }
+
+ MatchFrame frames[FRAMES_ON_STACK];
+ MatchFrame* framesEnd;
+ MatchFrame* currentFrame;
+ unsigned size;
+
+ inline bool canUseStackBufferForNextFrame()
+ {
+ return size < FRAMES_ON_STACK;
+ }
+
+ inline MatchFrame* allocateNextFrame()
+ {
+ if (canUseStackBufferForNextFrame())
+ return currentFrame + 1;
+ return new MatchFrame;
+ }
+
+ inline void pushNewFrame(const unsigned char* instructionPtr, BracketChainNode* bracketChain, ReturnLocation returnLocation)
+ {
+ MatchFrame* newframe = allocateNextFrame();
+ newframe->previousFrame = currentFrame;
+
+ newframe->args.subjectPtr = currentFrame->args.subjectPtr;
+ newframe->args.offsetTop = currentFrame->args.offsetTop;
+ newframe->args.instructionPtr = instructionPtr;
+ newframe->args.bracketChain = bracketChain;
+ newframe->returnLocation = returnLocation;
+ size++;
+
+ currentFrame = newframe;
+ }
+
+ inline void popCurrentFrame()
+ {
+ MatchFrame* oldFrame = currentFrame;
+ currentFrame = currentFrame->previousFrame;
+ if (size > FRAMES_ON_STACK)
+ delete oldFrame;
+ size--;
+ }
+
+ void popAllFrames()
+ {
+ while (size)
+ popCurrentFrame();
+ }
+};
+
+static int matchError(int errorCode, MatchStack& stack)
+{
+ stack.popAllFrames();
+ return errorCode;
+}
+
+/* Get the next UTF-8 character, not advancing the pointer, incrementing length
+ if there are extra bytes. This is called when we know we are in UTF-8 mode. */
+
+static inline void getUTF8CharAndIncrementLength(int& c, const unsigned char* subjectPtr, int& len)
+{
+ c = *subjectPtr;
+ if ((c & 0xc0) == 0xc0) {
+ int gcaa = kjs_pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */
+ int gcss = 6 * gcaa;
+ c = (c & kjs_pcre_utf8_table3[gcaa]) << gcss;
+ for (int gcii = 1; gcii <= gcaa; gcii++) {
+ gcss -= 6;
+ c |= (subjectPtr[gcii] & 0x3f) << gcss;
+ }
+ len += gcaa;
+ }
+}
+
+static inline void startNewGroup(MatchFrame* currentFrame)
+{
+ /* At the start of a bracketed group, add the current subject pointer to the
+ stack of such pointers, to be re-instated at the end of the group when we hit
+ the closing ket. When match() is called in other circumstances, we don't add to
+ this stack. */
+
+ currentFrame->locals.bracketChainNode.previousBracket = currentFrame->args.bracketChain;
+ currentFrame->locals.bracketChainNode.bracketStart = currentFrame->args.subjectPtr;
+ currentFrame->args.bracketChain = ¤tFrame->locals.bracketChainNode;
+}
+
+// FIXME: "minimize" means "not greedy", we should invert the callers to ask for "greedy" to be less confusing
+static inline void repeatInformationFromInstructionOffset(short instructionOffset, bool& minimize, int& minimumRepeats, int& maximumRepeats)
+{
+ // Instruction offsets are based off of OP_CRSTAR, OP_STAR, OP_TYPESTAR, OP_NOTSTAR
+ static const char minimumRepeatsFromInstructionOffset[] = { 0, 0, 1, 1, 0, 0 };
+ static const int maximumRepeatsFromInstructionOffset[] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, 1, 1 };
+
+ ASSERT(instructionOffset >= 0);
+ ASSERT(instructionOffset <= (OP_CRMINQUERY - OP_CRSTAR));
+
+ minimize = (instructionOffset & 1); // this assumes ordering: Instruction, MinimizeInstruction, Instruction2, MinimizeInstruction2
+ minimumRepeats = minimumRepeatsFromInstructionOffset[instructionOffset];
+ maximumRepeats = maximumRepeatsFromInstructionOffset[instructionOffset];
+}
+
+static int match(const UChar* subjectPtr, const unsigned char* instructionPtr, int offsetTop, MatchData& md)
+{
+ bool isMatch = false;
+ int min;
+ bool minimize = false; /* Initialization not really needed, but some compilers think so. */
+ unsigned matchCount = 0;
+
+ MatchStack stack;
+
+ /* The opcode jump table. */
+#ifdef USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+#define EMIT_JUMP_TABLE_ENTRY(opcode) &&LABEL_OP_##opcode,
+ static void* opcodeJumpTable[256] = { FOR_EACH_OPCODE(EMIT_JUMP_TABLE_ENTRY) };
+#undef EMIT_JUMP_TABLE_ENTRY
+#endif
+
+ /* One-time setup of the opcode jump table. */
+#ifdef USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+ for (int i = 255; !opcodeJumpTable[i]; i--)
+ opcodeJumpTable[i] = &&CAPTURING_BRACKET;
+#endif
+
+#ifdef USE_COMPUTED_GOTO_FOR_MATCH_RECURSION
+ // Shark shows this as a hot line
+ // Using a static const here makes this line disappear, but makes later access hotter (not sure why)
+ stack.currentFrame->returnLocation = &&RETURN;
+#else
+ stack.currentFrame->returnLocation = 0;
+#endif
+ stack.currentFrame->args.subjectPtr = subjectPtr;
+ stack.currentFrame->args.instructionPtr = instructionPtr;
+ stack.currentFrame->args.offsetTop = offsetTop;
+ stack.currentFrame->args.bracketChain = 0;
+ startNewGroup(stack.currentFrame);
+
+ /* This is where control jumps back to to effect "recursion" */
+
+RECURSE:
+ if (++matchCount > matchLimit)
+ return matchError(JSRegExpErrorHitLimit, stack);
+
+ /* Now start processing the operations. */
+
+#ifndef USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+ while (true)
+#endif
+ {
+
+#ifdef USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+#define BEGIN_OPCODE(opcode) LABEL_OP_##opcode
+#define NEXT_OPCODE goto *opcodeJumpTable[*stack.currentFrame->args.instructionPtr]
+#else
+#define BEGIN_OPCODE(opcode) case OP_##opcode
+#define NEXT_OPCODE continue
+#endif
+
+#ifdef USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+ NEXT_OPCODE;
+#else
+ switch (*stack.currentFrame->args.instructionPtr)
+#endif
+ {
+ /* Non-capturing bracket: optimized */
+
+ BEGIN_OPCODE(BRA):
+ NON_CAPTURING_BRACKET:
+ DPRINTF(("start bracket 0\n"));
+ do {
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(2, stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ stack.currentFrame->args.instructionPtr += getLinkValue(stack.currentFrame->args.instructionPtr + 1);
+ } while (*stack.currentFrame->args.instructionPtr == OP_ALT);
+ DPRINTF(("bracket 0 failed\n"));
+ RRETURN;
+
+ /* Skip over large extraction number data if encountered. */
+
+ BEGIN_OPCODE(BRANUMBER):
+ stack.currentFrame->args.instructionPtr += 3;
+ NEXT_OPCODE;
+
+ /* End of the pattern. */
+
+ BEGIN_OPCODE(END):
+ md.endMatchPtr = stack.currentFrame->args.subjectPtr; /* Record where we ended */
+ md.endOffsetTop = stack.currentFrame->args.offsetTop; /* and how many extracts were taken */
+ isMatch = true;
+ RRETURN;
+
+ /* Assertion brackets. Check the alternative branches in turn - the
+ matching won't pass the KET for an assertion. If any one branch matches,
+ the assertion is true. Lookbehind assertions have an OP_REVERSE item at the
+ start of each branch to move the current point backwards, so the code at
+ this level is identical to the lookahead case. */
+
+ BEGIN_OPCODE(ASSERT):
+ do {
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(6, stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE, NULL);
+ if (isMatch)
+ break;
+ stack.currentFrame->args.instructionPtr += getLinkValue(stack.currentFrame->args.instructionPtr + 1);
+ } while (*stack.currentFrame->args.instructionPtr == OP_ALT);
+ if (*stack.currentFrame->args.instructionPtr == OP_KET)
+ RRETURN_NO_MATCH;
+
+ /* Continue from after the assertion, updating the offsets high water
+ mark, since extracts may have been taken during the assertion. */
+
+ advanceToEndOfBracket(stack.currentFrame->args.instructionPtr);
+ stack.currentFrame->args.instructionPtr += 1 + LINK_SIZE;
+ stack.currentFrame->args.offsetTop = md.endOffsetTop;
+ NEXT_OPCODE;
+
+ /* Negative assertion: all branches must fail to match */
+
+ BEGIN_OPCODE(ASSERT_NOT):
+ do {
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(7, stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE, NULL);
+ if (isMatch)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr += getLinkValue(stack.currentFrame->args.instructionPtr + 1);
+ } while (*stack.currentFrame->args.instructionPtr == OP_ALT);
+
+ stack.currentFrame->args.instructionPtr += 1 + LINK_SIZE;
+ NEXT_OPCODE;
+
+ /* An alternation is the end of a branch; scan along to find the end of the
+ bracketed group and go to there. */
+
+ BEGIN_OPCODE(ALT):
+ advanceToEndOfBracket(stack.currentFrame->args.instructionPtr);
+ NEXT_OPCODE;
+
+ /* BRAZERO and BRAMINZERO occur just before a bracket group, indicating
+ that it may occur zero times. It may repeat infinitely, or not at all -
+ i.e. it could be ()* or ()? in the pattern. Brackets with fixed upper
+ repeat limits are compiled as a number of copies, with the optional ones
+ preceded by BRAZERO or BRAMINZERO. */
+
+ BEGIN_OPCODE(BRAZERO): {
+ stack.currentFrame->locals.startOfRepeatingBracket = stack.currentFrame->args.instructionPtr + 1;
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(14, stack.currentFrame->locals.startOfRepeatingBracket, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ advanceToEndOfBracket(stack.currentFrame->locals.startOfRepeatingBracket);
+ stack.currentFrame->args.instructionPtr = stack.currentFrame->locals.startOfRepeatingBracket + 1 + LINK_SIZE;
+ NEXT_OPCODE;
+ }
+
+ BEGIN_OPCODE(BRAMINZERO): {
+ stack.currentFrame->locals.startOfRepeatingBracket = stack.currentFrame->args.instructionPtr + 1;
+ advanceToEndOfBracket(stack.currentFrame->locals.startOfRepeatingBracket);
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(15, stack.currentFrame->locals.startOfRepeatingBracket + 1 + LINK_SIZE, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+ }
+
+ /* End of a group, repeated or non-repeating. If we are at the end of
+ an assertion "group", stop matching and return 1, but record the
+ current high water mark for use by positive assertions. Do this also
+ for the "once" (not-backup up) groups. */
+
+ BEGIN_OPCODE(KET):
+ BEGIN_OPCODE(KETRMIN):
+ BEGIN_OPCODE(KETRMAX):
+ stack.currentFrame->locals.instructionPtrAtStartOfOnce = stack.currentFrame->args.instructionPtr - getLinkValue(stack.currentFrame->args.instructionPtr + 1);
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.bracketChain->bracketStart;
+
+ /* Back up the stack of bracket start pointers. */
+
+ stack.currentFrame->args.bracketChain = stack.currentFrame->args.bracketChain->previousBracket;
+
+ if (*stack.currentFrame->locals.instructionPtrAtStartOfOnce == OP_ASSERT || *stack.currentFrame->locals.instructionPtrAtStartOfOnce == OP_ASSERT_NOT) {
+ md.endOffsetTop = stack.currentFrame->args.offsetTop;
+ isMatch = true;
+ RRETURN;
+ }
+
+ /* In all other cases except a conditional group we have to check the
+ group number back at the start and if necessary complete handling an
+ extraction by setting the offsets and bumping the high water mark. */
+
+ stack.currentFrame->locals.number = *stack.currentFrame->locals.instructionPtrAtStartOfOnce - OP_BRA;
+
+ /* For extended extraction brackets (large number), we have to fish out
+ the number from a dummy opcode at the start. */
+
+ if (stack.currentFrame->locals.number > EXTRACT_BASIC_MAX)
+ stack.currentFrame->locals.number = get2ByteValue(stack.currentFrame->locals.instructionPtrAtStartOfOnce + 2 + LINK_SIZE);
+ stack.currentFrame->locals.offset = stack.currentFrame->locals.number << 1;
+
+#ifdef DEBUG
+ printf("end bracket %d", stack.currentFrame->locals.number);
+ printf("\n");
+#endif
+
+ /* Test for a numbered group. This includes groups called as a result
+ of recursion. Note that whole-pattern recursion is coded as a recurse
+ into group 0, so it won't be picked up here. Instead, we catch it when
+ the OP_END is reached. */
+
+ if (stack.currentFrame->locals.number > 0) {
+ if (stack.currentFrame->locals.offset >= md.offsetMax)
+ md.offsetOverflow = true;
+ else {
+ md.offsetVector[stack.currentFrame->locals.offset] =
+ md.offsetVector[md.offsetEnd - stack.currentFrame->locals.number];
+ md.offsetVector[stack.currentFrame->locals.offset+1] = stack.currentFrame->args.subjectPtr - md.startSubject;
+ if (stack.currentFrame->args.offsetTop <= stack.currentFrame->locals.offset)
+ stack.currentFrame->args.offsetTop = stack.currentFrame->locals.offset + 2;
+ }
+ }
+
+ /* For a non-repeating ket, just continue at this level. This also
+ happens for a repeating ket if no characters were matched in the group.
+ This is the forcible breaking of infinite loops as implemented in Perl
+ 5.005. If there is an options reset, it will get obeyed in the normal
+ course of events. */
+
+ if (*stack.currentFrame->args.instructionPtr == OP_KET || stack.currentFrame->args.subjectPtr == stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
+ stack.currentFrame->args.instructionPtr += 1 + LINK_SIZE;
+ NEXT_OPCODE;
+ }
+
+ /* The repeating kets try the rest of the pattern or restart from the
+ preceding bracket, in the appropriate order. */
+
+ if (*stack.currentFrame->args.instructionPtr == OP_KETRMIN) {
+ RECURSIVE_MATCH(16, stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(17, stack.currentFrame->locals.instructionPtrAtStartOfOnce, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ } else { /* OP_KETRMAX */
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(18, stack.currentFrame->locals.instructionPtrAtStartOfOnce, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ RECURSIVE_MATCH(19, stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ }
+ RRETURN;
+
+ /* Start of subject. */
+
+ BEGIN_OPCODE(CIRC):
+ if (stack.currentFrame->args.subjectPtr != md.startSubject)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ /* After internal newline if multiline. */
+
+ BEGIN_OPCODE(BOL):
+ if (stack.currentFrame->args.subjectPtr != md.startSubject && !isNewline(stack.currentFrame->args.subjectPtr[-1]))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ /* End of subject. */
+
+ BEGIN_OPCODE(DOLL):
+ if (stack.currentFrame->args.subjectPtr < md.endSubject)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ /* Before internal newline if multiline. */
+
+ BEGIN_OPCODE(EOL):
+ if (stack.currentFrame->args.subjectPtr < md.endSubject && !isNewline(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ /* Word boundary assertions */
+
+ BEGIN_OPCODE(NOT_WORD_BOUNDARY):
+ BEGIN_OPCODE(WORD_BOUNDARY): {
+ bool currentCharIsWordChar = false;
+ bool previousCharIsWordChar = false;
+
+ if (stack.currentFrame->args.subjectPtr > md.startSubject)
+ previousCharIsWordChar = isWordChar(stack.currentFrame->args.subjectPtr[-1]);
+ if (stack.currentFrame->args.subjectPtr < md.endSubject)
+ currentCharIsWordChar = isWordChar(*stack.currentFrame->args.subjectPtr);
+
+ /* Now see if the situation is what we want */
+ bool wordBoundaryDesired = (*stack.currentFrame->args.instructionPtr++ == OP_WORD_BOUNDARY);
+ if (wordBoundaryDesired ? currentCharIsWordChar == previousCharIsWordChar : currentCharIsWordChar != previousCharIsWordChar)
+ RRETURN_NO_MATCH;
+ NEXT_OPCODE;
+ }
+
+ /* Match a single character type; inline for speed */
+
+ BEGIN_OPCODE(NOT_NEWLINE):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (isNewline(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ BEGIN_OPCODE(NOT_DIGIT):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (isASCIIDigit(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ BEGIN_OPCODE(DIGIT):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (!isASCIIDigit(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ BEGIN_OPCODE(NOT_WHITESPACE):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (isSpaceChar(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ BEGIN_OPCODE(WHITESPACE):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (!isSpaceChar(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ BEGIN_OPCODE(NOT_WORDCHAR):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (isWordChar(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ BEGIN_OPCODE(WORDCHAR):
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (!isWordChar(*stack.currentFrame->args.subjectPtr++))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ NEXT_OPCODE;
+
+ /* Match a back reference, possibly repeatedly. Look past the end of the
+ item to see if there is repeat information following. The code is similar
+ to that for character classes, but repeated for efficiency. Then obey
+ similar code to character type repeats - written out again for speed.
+ However, if the referenced string is the empty string, always treat
+ it as matched, any number of times (otherwise there could be infinite
+ loops). */
+
+ BEGIN_OPCODE(REF):
+ stack.currentFrame->locals.offset = get2ByteValue(stack.currentFrame->args.instructionPtr + 1) << 1; /* Doubled ref number */
+ stack.currentFrame->args.instructionPtr += 3; /* Advance past item */
+
+ /* If the reference is unset, set the length to be longer than the amount
+ of subject left; this ensures that every attempt at a match fails. We
+ can't just fail here, because of the possibility of quantifiers with zero
+ minima. */
+
+ if (stack.currentFrame->locals.offset >= stack.currentFrame->args.offsetTop || md.offsetVector[stack.currentFrame->locals.offset] < 0)
+ stack.currentFrame->locals.length = 0;
+ else
+ stack.currentFrame->locals.length = md.offsetVector[stack.currentFrame->locals.offset+1] - md.offsetVector[stack.currentFrame->locals.offset];
+
+ /* Set up for repetition, or handle the non-repeated case */
+
+ switch (*stack.currentFrame->args.instructionPtr) {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ repeatInformationFromInstructionOffset(*stack.currentFrame->args.instructionPtr++ - OP_CRSTAR, minimize, min, stack.currentFrame->locals.max);
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ minimize = (*stack.currentFrame->args.instructionPtr == OP_CRMINRANGE);
+ min = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 3);
+ if (stack.currentFrame->locals.max == 0)
+ stack.currentFrame->locals.max = INT_MAX;
+ stack.currentFrame->args.instructionPtr += 5;
+ break;
+
+ default: /* No repeat follows */
+ if (!matchRef(stack.currentFrame->locals.offset, stack.currentFrame->args.subjectPtr, stack.currentFrame->locals.length, md))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.subjectPtr += stack.currentFrame->locals.length;
+ NEXT_OPCODE;
+ }
+
+ /* If the length of the reference is zero, just continue with the
+ main loop. */
+
+ if (stack.currentFrame->locals.length == 0)
+ NEXT_OPCODE;
+
+ /* First, ensure the minimum number of matches are present. */
+
+ for (int i = 1; i <= min; i++) {
+ if (!matchRef(stack.currentFrame->locals.offset, stack.currentFrame->args.subjectPtr, stack.currentFrame->locals.length, md))
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.subjectPtr += stack.currentFrame->locals.length;
+ }
+
+ /* If min = max, continue at the same level without recursion.
+ They are not both allowed to be zero. */
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ /* If minimizing, keep trying and advancing the pointer */
+
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(20, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || !matchRef(stack.currentFrame->locals.offset, stack.currentFrame->args.subjectPtr, stack.currentFrame->locals.length, md))
+ RRETURN;
+ stack.currentFrame->args.subjectPtr += stack.currentFrame->locals.length;
+ }
+ /* Control never reaches here */
+ }
+
+ /* If maximizing, find the longest string and work backwards */
+
+ else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (!matchRef(stack.currentFrame->locals.offset, stack.currentFrame->args.subjectPtr, stack.currentFrame->locals.length, md))
+ break;
+ stack.currentFrame->args.subjectPtr += stack.currentFrame->locals.length;
+ }
+ while (stack.currentFrame->args.subjectPtr >= stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
+ RECURSIVE_MATCH(21, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ stack.currentFrame->args.subjectPtr -= stack.currentFrame->locals.length;
+ }
+ RRETURN_NO_MATCH;
+ }
+ /* Control never reaches here */
+
+ /* Match a bit-mapped character class, possibly repeatedly. This op code is
+ used when all the characters in the class have values in the range 0-255,
+ and either the matching is caseful, or the characters are in the range
+ 0-127 when UTF-8 processing is enabled. The only difference between
+ OP_CLASS and OP_NCLASS occurs when a data character outside the range is
+ encountered.
+
+ First, look past the end of the item to see if there is repeat information
+ following. Then obey similar code to character type repeats - written out
+ again for speed. */
+
+ BEGIN_OPCODE(NCLASS):
+ BEGIN_OPCODE(CLASS):
+ stack.currentFrame->locals.data = stack.currentFrame->args.instructionPtr + 1; /* Save for matching */
+ stack.currentFrame->args.instructionPtr += 33; /* Advance past the item */
+
+ switch (*stack.currentFrame->args.instructionPtr) {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ repeatInformationFromInstructionOffset(*stack.currentFrame->args.instructionPtr++ - OP_CRSTAR, minimize, min, stack.currentFrame->locals.max);
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ minimize = (*stack.currentFrame->args.instructionPtr == OP_CRMINRANGE);
+ min = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 3);
+ if (stack.currentFrame->locals.max == 0)
+ stack.currentFrame->locals.max = INT_MAX;
+ stack.currentFrame->args.instructionPtr += 5;
+ break;
+
+ default: /* No repeat follows */
+ min = stack.currentFrame->locals.max = 1;
+ break;
+ }
+
+ /* First, ensure the minimum number of matches are present. */
+
+ for (int i = 1; i <= min; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ int c = *stack.currentFrame->args.subjectPtr++;
+ if (c > 255) {
+ if (stack.currentFrame->locals.data[-1] == OP_CLASS)
+ RRETURN_NO_MATCH;
+ } else {
+ if (!(stack.currentFrame->locals.data[c / 8] & (1 << (c & 7))))
+ RRETURN_NO_MATCH;
+ }
+ }
+
+ /* If max == min we can continue with the main loop without the
+ need to recurse. */
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ /* If minimizing, keep testing the rest of the expression and advancing
+ the pointer while it matches the class. */
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(22, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN;
+ int c = *stack.currentFrame->args.subjectPtr++;
+ if (c > 255) {
+ if (stack.currentFrame->locals.data[-1] == OP_CLASS)
+ RRETURN;
+ } else {
+ if ((stack.currentFrame->locals.data[c/8] & (1 << (c&7))) == 0)
+ RRETURN;
+ }
+ }
+ /* Control never reaches here */
+ }
+ /* If maximizing, find the longest possible run, then work backwards. */
+ else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (c > 255) {
+ if (stack.currentFrame->locals.data[-1] == OP_CLASS)
+ break;
+ } else {
+ if (!(stack.currentFrame->locals.data[c / 8] & (1 << (c & 7))))
+ break;
+ }
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ for (;;) {
+ RECURSIVE_MATCH(24, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
+ break; /* Stop if tried at original pos */
+ }
+
+ RRETURN;
+ }
+ /* Control never reaches here */
+
+ /* Match an extended character class. */
+
+ BEGIN_OPCODE(XCLASS):
+ stack.currentFrame->locals.data = stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE; /* Save for matching */
+ stack.currentFrame->args.instructionPtr += getLinkValue(stack.currentFrame->args.instructionPtr + 1); /* Advance past the item */
+
+ switch (*stack.currentFrame->args.instructionPtr) {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ repeatInformationFromInstructionOffset(*stack.currentFrame->args.instructionPtr++ - OP_CRSTAR, minimize, min, stack.currentFrame->locals.max);
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ minimize = (*stack.currentFrame->args.instructionPtr == OP_CRMINRANGE);
+ min = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 3);
+ if (stack.currentFrame->locals.max == 0)
+ stack.currentFrame->locals.max = INT_MAX;
+ stack.currentFrame->args.instructionPtr += 5;
+ break;
+
+ default: /* No repeat follows */
+ min = stack.currentFrame->locals.max = 1;
+ }
+
+ /* First, ensure the minimum number of matches are present. */
+
+ for (int i = 1; i <= min; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ int c = *stack.currentFrame->args.subjectPtr++;
+ if (!kjs_pcre_xclass(c, stack.currentFrame->locals.data))
+ RRETURN_NO_MATCH;
+ }
+
+ /* If max == min we can continue with the main loop without the
+ need to recurse. */
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ /* If minimizing, keep testing the rest of the expression and advancing
+ the pointer while it matches the class. */
+
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(26, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN;
+ int c = *stack.currentFrame->args.subjectPtr++;
+ if (!kjs_pcre_xclass(c, stack.currentFrame->locals.data))
+ RRETURN;
+ }
+ /* Control never reaches here */
+ }
+
+ /* If maximizing, find the longest possible run, then work backwards. */
+
+ else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (!kjs_pcre_xclass(c, stack.currentFrame->locals.data))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ for(;;) {
+ RECURSIVE_MATCH(27, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
+ break; /* Stop if tried at original pos */
+ }
+ RRETURN;
+ }
+
+ /* Control never reaches here */
+
+ /* Match a single character, casefully */
+
+ BEGIN_OPCODE(CHAR):
+ stack.currentFrame->locals.length = 1;
+ stack.currentFrame->args.instructionPtr++;
+ getUTF8CharAndIncrementLength(stack.currentFrame->locals.fc, stack.currentFrame->args.instructionPtr, stack.currentFrame->locals.length);
+ stack.currentFrame->args.instructionPtr += stack.currentFrame->locals.length;
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ if (stack.currentFrame->locals.fc != *stack.currentFrame->args.subjectPtr++)
+ RRETURN_NO_MATCH;
+ NEXT_OPCODE;
+
+ /* Match a single character, caselessly */
+
+ BEGIN_OPCODE(CHAR_IGNORING_CASE): {
+ stack.currentFrame->locals.length = 1;
+ stack.currentFrame->args.instructionPtr++;
+ getUTF8CharAndIncrementLength(stack.currentFrame->locals.fc, stack.currentFrame->args.instructionPtr, stack.currentFrame->locals.length);
+ stack.currentFrame->args.instructionPtr += stack.currentFrame->locals.length;
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ int dc = *stack.currentFrame->args.subjectPtr++;
+ if (stack.currentFrame->locals.fc != dc && kjs_pcre_ucp_othercase(stack.currentFrame->locals.fc) != dc)
+ RRETURN_NO_MATCH;
+ NEXT_OPCODE;
+ }
+
+ /* Match a single ASCII character. */
+
+ BEGIN_OPCODE(ASCII_CHAR):
+ if (md.endSubject == stack.currentFrame->args.subjectPtr)
+ RRETURN_NO_MATCH;
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->args.instructionPtr[1])
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ stack.currentFrame->args.instructionPtr += 2;
+ NEXT_OPCODE;
+
+ /* Match one of two cases of an ASCII letter. */
+
+ BEGIN_OPCODE(ASCII_LETTER_IGNORING_CASE):
+ if (md.endSubject == stack.currentFrame->args.subjectPtr)
+ RRETURN_NO_MATCH;
+ if ((*stack.currentFrame->args.subjectPtr | 0x20) != stack.currentFrame->args.instructionPtr[1])
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ stack.currentFrame->args.instructionPtr += 2;
+ NEXT_OPCODE;
+
+ /* Match a single character repeatedly; different opcodes share code. */
+
+ BEGIN_OPCODE(EXACT):
+ min = stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ minimize = false;
+ stack.currentFrame->args.instructionPtr += 3;
+ goto REPEATCHAR;
+
+ BEGIN_OPCODE(UPTO):
+ BEGIN_OPCODE(MINUPTO):
+ min = 0;
+ stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ minimize = *stack.currentFrame->args.instructionPtr == OP_MINUPTO;
+ stack.currentFrame->args.instructionPtr += 3;
+ goto REPEATCHAR;
+
+ BEGIN_OPCODE(STAR):
+ BEGIN_OPCODE(MINSTAR):
+ BEGIN_OPCODE(PLUS):
+ BEGIN_OPCODE(MINPLUS):
+ BEGIN_OPCODE(QUERY):
+ BEGIN_OPCODE(MINQUERY):
+ repeatInformationFromInstructionOffset(*stack.currentFrame->args.instructionPtr++ - OP_STAR, minimize, min, stack.currentFrame->locals.max);
+
+ /* Common code for all repeated single-character matches. We can give
+ up quickly if there are fewer than the minimum number of characters left in
+ the subject. */
+
+ REPEATCHAR:
+
+ stack.currentFrame->locals.length = 1;
+ getUTF8CharAndIncrementLength(stack.currentFrame->locals.fc, stack.currentFrame->args.instructionPtr, stack.currentFrame->locals.length);
+ if (min * (stack.currentFrame->locals.fc > 0xFFFF ? 2 : 1) > md.endSubject - stack.currentFrame->args.subjectPtr)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr += stack.currentFrame->locals.length;
+
+ if (stack.currentFrame->locals.fc <= 0xFFFF) {
+ int othercase = md.ignoreCase ? kjs_pcre_ucp_othercase(stack.currentFrame->locals.fc) : -1;
+
+ for (int i = 1; i <= min; i++) {
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc && *stack.currentFrame->args.subjectPtr != othercase)
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ if (minimize) {
+ stack.currentFrame->locals.repeatOthercase = othercase;
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(28, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN;
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc && *stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.repeatOthercase)
+ RRETURN;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ /* Control never reaches here */
+ } else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc && *stack.currentFrame->args.subjectPtr != othercase)
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ while (stack.currentFrame->args.subjectPtr >= stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
+ RECURSIVE_MATCH(29, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ --stack.currentFrame->args.subjectPtr;
+ }
+ RRETURN_NO_MATCH;
+ }
+ /* Control never reaches here */
+ } else {
+ /* No case on surrogate pairs, so no need to bother with "othercase". */
+
+ for (int i = 1; i <= min; i++) {
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.subjectPtr += 2;
+ }
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(30, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN;
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc)
+ RRETURN;
+ stack.currentFrame->args.subjectPtr += 2;
+ }
+ /* Control never reaches here */
+ } else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr > md.endSubject - 2)
+ break;
+ if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc)
+ break;
+ stack.currentFrame->args.subjectPtr += 2;
+ }
+ while (stack.currentFrame->args.subjectPtr >= stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
+ RECURSIVE_MATCH(31, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ stack.currentFrame->args.subjectPtr -= 2;
+ }
+ RRETURN_NO_MATCH;
+ }
+ /* Control never reaches here */
+ }
+ /* Control never reaches here */
+
+ /* Match a negated single one-byte character. */
+
+ BEGIN_OPCODE(NOT): {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->args.instructionPtr++;
+ int c = *stack.currentFrame->args.subjectPtr++;
+ if (md.ignoreCase) {
+ if (c < 128)
+ c = toLowerCase(c);
+ if (toLowerCase(*stack.currentFrame->args.instructionPtr++) == c)
+ RRETURN_NO_MATCH;
+ } else {
+ if (*stack.currentFrame->args.instructionPtr++ == c)
+ RRETURN_NO_MATCH;
+ }
+ NEXT_OPCODE;
+ }
+
+ /* Match a negated single one-byte character repeatedly. This is almost a
+ repeat of the code for a repeated single character, but I haven't found a
+ nice way of commoning these up that doesn't require a test of the
+ positive/negative option for each character match. Maybe that wouldn't add
+ very much to the time taken, but character matching *is* what this is all
+ about... */
+
+ BEGIN_OPCODE(NOTEXACT):
+ min = stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ minimize = false;
+ stack.currentFrame->args.instructionPtr += 3;
+ goto REPEATNOTCHAR;
+
+ BEGIN_OPCODE(NOTUPTO):
+ BEGIN_OPCODE(NOTMINUPTO):
+ min = 0;
+ stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ minimize = *stack.currentFrame->args.instructionPtr == OP_NOTMINUPTO;
+ stack.currentFrame->args.instructionPtr += 3;
+ goto REPEATNOTCHAR;
+
+ BEGIN_OPCODE(NOTSTAR):
+ BEGIN_OPCODE(NOTMINSTAR):
+ BEGIN_OPCODE(NOTPLUS):
+ BEGIN_OPCODE(NOTMINPLUS):
+ BEGIN_OPCODE(NOTQUERY):
+ BEGIN_OPCODE(NOTMINQUERY):
+ repeatInformationFromInstructionOffset(*stack.currentFrame->args.instructionPtr++ - OP_NOTSTAR, minimize, min, stack.currentFrame->locals.max);
+
+ /* Common code for all repeated single-byte matches. We can give up quickly
+ if there are fewer than the minimum number of bytes left in the
+ subject. */
+
+ REPEATNOTCHAR:
+ if (min > md.endSubject - stack.currentFrame->args.subjectPtr)
+ RRETURN_NO_MATCH;
+ stack.currentFrame->locals.fc = *stack.currentFrame->args.instructionPtr++;
+
+ /* The code is duplicated for the caseless and caseful cases, for speed,
+ since matching characters is likely to be quite common. First, ensure the
+ minimum number of matches are present. If min = max, continue at the same
+ level without recursing. Otherwise, if minimizing, keep trying the rest of
+ the expression and advancing one matching character if failing, up to the
+ maximum. Alternatively, if maximizing, find the maximum number of
+ characters and work backwards. */
+
+ DPRINTF(("negative matching %c{%d,%d}\n", stack.currentFrame->locals.fc, min, stack.currentFrame->locals.max));
+
+ if (md.ignoreCase) {
+ if (stack.currentFrame->locals.fc < 128)
+ stack.currentFrame->locals.fc = toLowerCase(stack.currentFrame->locals.fc);
+
+ for (int i = 1; i <= min; i++) {
+ int d = *stack.currentFrame->args.subjectPtr++;
+ if (d < 128)
+ d = toLowerCase(d);
+ if (stack.currentFrame->locals.fc == d)
+ RRETURN_NO_MATCH;
+ }
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(38, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ int d = *stack.currentFrame->args.subjectPtr++;
+ if (d < 128)
+ d = toLowerCase(d);
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject || stack.currentFrame->locals.fc == d)
+ RRETURN;
+ }
+ /* Control never reaches here */
+ }
+
+ /* Maximize case */
+
+ else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int d = *stack.currentFrame->args.subjectPtr;
+ if (d < 128)
+ d = toLowerCase(d);
+ if (stack.currentFrame->locals.fc == d)
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ for (;;) {
+ RECURSIVE_MATCH(40, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
+ break; /* Stop if tried at original pos */
+ }
+
+ RRETURN;
+ }
+ /* Control never reaches here */
+ }
+
+ /* Caseful comparisons */
+
+ else {
+ for (int i = 1; i <= min; i++) {
+ int d = *stack.currentFrame->args.subjectPtr++;
+ if (stack.currentFrame->locals.fc == d)
+ RRETURN_NO_MATCH;
+ }
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(42, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ int d = *stack.currentFrame->args.subjectPtr++;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject || stack.currentFrame->locals.fc == d)
+ RRETURN;
+ }
+ /* Control never reaches here */
+ }
+
+ /* Maximize case */
+
+ else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr;
+
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int d = *stack.currentFrame->args.subjectPtr;
+ if (stack.currentFrame->locals.fc == d)
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ for (;;) {
+ RECURSIVE_MATCH(44, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
+ break; /* Stop if tried at original pos */
+ }
+
+ RRETURN;
+ }
+ }
+ /* Control never reaches here */
+
+ /* Match a single character type repeatedly; several different opcodes
+ share code. This is very similar to the code for single characters, but we
+ repeat it in the interests of efficiency. */
+
+ BEGIN_OPCODE(TYPEEXACT):
+ min = stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ minimize = true;
+ stack.currentFrame->args.instructionPtr += 3;
+ goto REPEATTYPE;
+
+ BEGIN_OPCODE(TYPEUPTO):
+ BEGIN_OPCODE(TYPEMINUPTO):
+ min = 0;
+ stack.currentFrame->locals.max = get2ByteValue(stack.currentFrame->args.instructionPtr + 1);
+ minimize = *stack.currentFrame->args.instructionPtr == OP_TYPEMINUPTO;
+ stack.currentFrame->args.instructionPtr += 3;
+ goto REPEATTYPE;
+
+ BEGIN_OPCODE(TYPESTAR):
+ BEGIN_OPCODE(TYPEMINSTAR):
+ BEGIN_OPCODE(TYPEPLUS):
+ BEGIN_OPCODE(TYPEMINPLUS):
+ BEGIN_OPCODE(TYPEQUERY):
+ BEGIN_OPCODE(TYPEMINQUERY):
+ repeatInformationFromInstructionOffset(*stack.currentFrame->args.instructionPtr++ - OP_TYPESTAR, minimize, min, stack.currentFrame->locals.max);
+
+ /* Common code for all repeated single character type matches. Note that
+ in UTF-8 mode, '.' matches a character of any length, but for the other
+ character types, the valid characters are all one-byte long. */
+
+ REPEATTYPE:
+ stack.currentFrame->locals.ctype = *stack.currentFrame->args.instructionPtr++; /* Code for the character type */
+
+ /* First, ensure the minimum number of matches are present. Use inline
+ code for maximizing the speed, and do the type test once at the start
+ (i.e. keep it out of the loop). Also we can test that there are at least
+ the minimum number of characters before we start. */
+
+ if (min > md.endSubject - stack.currentFrame->args.subjectPtr)
+ RRETURN_NO_MATCH;
+ if (min > 0) {
+ switch (stack.currentFrame->locals.ctype) {
+ case OP_NOT_NEWLINE:
+ for (int i = 1; i <= min; i++) {
+ if (isNewline(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ for (int i = 1; i <= min; i++) {
+ if (isASCIIDigit(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_DIGIT:
+ for (int i = 1; i <= min; i++) {
+ if (!isASCIIDigit(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_NOT_WHITESPACE:
+ for (int i = 1; i <= min; i++) {
+ if (isSpaceChar(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_WHITESPACE:
+ for (int i = 1; i <= min; i++) {
+ if (!isSpaceChar(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (int i = 1; i <= min; i++) {
+ if (isWordChar(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_WORDCHAR:
+ for (int i = 1; i <= min; i++) {
+ if (!isWordChar(*stack.currentFrame->args.subjectPtr))
+ RRETURN_NO_MATCH;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ return matchError(JSRegExpErrorInternal, stack);
+ } /* End switch(stack.currentFrame->locals.ctype) */
+ }
+
+ /* If min = max, continue at the same level without recursing */
+
+ if (min == stack.currentFrame->locals.max)
+ NEXT_OPCODE;
+
+ /* If minimizing, we have to test the rest of the pattern before each
+ subsequent match. */
+
+ if (minimize) {
+ for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
+ RECURSIVE_MATCH(48, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
+ RRETURN;
+
+ int c = *stack.currentFrame->args.subjectPtr++;
+ switch (stack.currentFrame->locals.ctype) {
+ case OP_NOT_NEWLINE:
+ if (isNewline(c))
+ RRETURN;
+ break;
+
+ case OP_NOT_DIGIT:
+ if (isASCIIDigit(c))
+ RRETURN;
+ break;
+
+ case OP_DIGIT:
+ if (!isASCIIDigit(c))
+ RRETURN;
+ break;
+
+ case OP_NOT_WHITESPACE:
+ if (isSpaceChar(c))
+ RRETURN;
+ break;
+
+ case OP_WHITESPACE:
+ if (!isSpaceChar(c))
+ RRETURN;
+ break;
+
+ case OP_NOT_WORDCHAR:
+ if (isWordChar(c))
+ RRETURN;
+ break;
+
+ case OP_WORDCHAR:
+ if (!isWordChar(c))
+ RRETURN;
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ return matchError(JSRegExpErrorInternal, stack);
+ }
+ }
+ /* Control never reaches here */
+ }
+
+ /* If maximizing it is worth using inline code for speed, doing the type
+ test once at the start (i.e. keep it out of the loop). */
+
+ else {
+ stack.currentFrame->locals.subjectPtrAtStartOfInstruction = stack.currentFrame->args.subjectPtr; /* Remember where we started */
+
+ switch (stack.currentFrame->locals.ctype) {
+ case OP_NOT_NEWLINE:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject || isNewline(*stack.currentFrame->args.subjectPtr))
+ break;
+ stack.currentFrame->args.subjectPtr++;
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (isASCIIDigit(c))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_DIGIT:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (!isASCIIDigit(c))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_NOT_WHITESPACE:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (isSpaceChar(c))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_WHITESPACE:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (!isSpaceChar(c))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (isWordChar(c))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ case OP_WORDCHAR:
+ for (int i = min; i < stack.currentFrame->locals.max; i++) {
+ if (stack.currentFrame->args.subjectPtr >= md.endSubject)
+ break;
+ int c = *stack.currentFrame->args.subjectPtr;
+ if (!isWordChar(c))
+ break;
+ ++stack.currentFrame->args.subjectPtr;
+ }
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ return matchError(JSRegExpErrorInternal, stack);
+ }
+
+ /* stack.currentFrame->args.subjectPtr is now past the end of the maximum run */
+
+ for (;;) {
+ RECURSIVE_MATCH(52, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
+ break; /* Stop if tried at original pos */
+ }
+
+ /* Get here if we can't make it match with any permitted repetitions */
+
+ RRETURN;
+ }
+ /* Control never reaches here */
+
+ BEGIN_OPCODE(CRMINPLUS):
+ BEGIN_OPCODE(CRMINQUERY):
+ BEGIN_OPCODE(CRMINRANGE):
+ BEGIN_OPCODE(CRMINSTAR):
+ BEGIN_OPCODE(CRPLUS):
+ BEGIN_OPCODE(CRQUERY):
+ BEGIN_OPCODE(CRRANGE):
+ BEGIN_OPCODE(CRSTAR):
+ ASSERT_NOT_REACHED();
+ return matchError(JSRegExpErrorInternal, stack);
+
+#ifdef USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP
+ CAPTURING_BRACKET:
+#else
+ default:
+#endif
+ /* Opening capturing bracket. If there is space in the offset vector, save
+ the current subject position in the working slot at the top of the vector. We
+ mustn't change the current values of the data slot, because they may be set
+ from a previous iteration of this group, and be referred to by a reference
+ inside the group.
+
+ If the bracket fails to match, we need to restore this value and also the
+ values of the final offsets, in case they were set by a previous iteration of
+ the same bracket.
+
+ If there isn't enough space in the offset vector, treat this as if it were a
+ non-capturing bracket. Don't worry about setting the flag for the error case
+ here; that is handled in the code for KET. */
+
+ ASSERT(*stack.currentFrame->args.instructionPtr > OP_BRA);
+
+ stack.currentFrame->locals.number = *stack.currentFrame->args.instructionPtr - OP_BRA;
+
+ /* For extended extraction brackets (large number), we have to fish out the
+ number from a dummy opcode at the start. */
+
+ if (stack.currentFrame->locals.number > EXTRACT_BASIC_MAX)
+ stack.currentFrame->locals.number = get2ByteValue(stack.currentFrame->args.instructionPtr + 2 + LINK_SIZE);
+ stack.currentFrame->locals.offset = stack.currentFrame->locals.number << 1;
+
+#ifdef DEBUG
+ printf("start bracket %d subject=", stack.currentFrame->locals.number);
+ pchars(stack.currentFrame->args.subjectPtr, 16, true, md);
+ printf("\n");
+#endif
+
+ if (stack.currentFrame->locals.offset < md.offsetMax) {
+ stack.currentFrame->locals.saveOffset1 = md.offsetVector[stack.currentFrame->locals.offset];
+ stack.currentFrame->locals.saveOffset2 = md.offsetVector[stack.currentFrame->locals.offset + 1];
+ stack.currentFrame->locals.saveOffset3 = md.offsetVector[md.offsetEnd - stack.currentFrame->locals.number];
+
+ DPRINTF(("saving %d %d %d\n", stack.currentFrame->locals.saveOffset1, stack.currentFrame->locals.saveOffset2, stack.currentFrame->locals.saveOffset3));
+ md.offsetVector[md.offsetEnd - stack.currentFrame->locals.number] = stack.currentFrame->args.subjectPtr - md.startSubject;
+
+ do {
+ RECURSIVE_MATCH_STARTNG_NEW_GROUP(1, stack.currentFrame->args.instructionPtr + 1 + LINK_SIZE, stack.currentFrame->args.bracketChain);
+ if (isMatch)
+ RRETURN;
+ stack.currentFrame->args.instructionPtr += getLinkValue(stack.currentFrame->args.instructionPtr + 1);
+ } while (*stack.currentFrame->args.instructionPtr == OP_ALT);
+
+ DPRINTF(("bracket %d failed\n", stack.currentFrame->locals.number));
+
+ md.offsetVector[stack.currentFrame->locals.offset] = stack.currentFrame->locals.saveOffset1;
+ md.offsetVector[stack.currentFrame->locals.offset + 1] = stack.currentFrame->locals.saveOffset2;
+ md.offsetVector[md.offsetEnd - stack.currentFrame->locals.number] = stack.currentFrame->locals.saveOffset3;
+
+ RRETURN;
+ }
+
+ /* Insufficient room for saving captured contents */
+
+ goto NON_CAPTURING_BRACKET;
+ }
+
+ /* Do not stick any code in here without much thought; it is assumed
+ that "continue" in the code above comes out to here to repeat the main
+ loop. */
+
+ } /* End of main loop */
+
+ ASSERT_NOT_REACHED();
+
+#ifndef USE_COMPUTED_GOTO_FOR_MATCH_RECURSION
+
+RRETURN_SWITCH:
+ switch (stack.currentFrame->returnLocation) {
+ case 0: goto RETURN;
+ case 1: goto RRETURN_1;
+ case 2: goto RRETURN_2;
+ case 6: goto RRETURN_6;
+ case 7: goto RRETURN_7;
+ case 14: goto RRETURN_14;
+ case 15: goto RRETURN_15;
+ case 16: goto RRETURN_16;
+ case 17: goto RRETURN_17;
+ case 18: goto RRETURN_18;
+ case 19: goto RRETURN_19;
+ case 20: goto RRETURN_20;
+ case 21: goto RRETURN_21;
+ case 22: goto RRETURN_22;
+ case 24: goto RRETURN_24;
+ case 26: goto RRETURN_26;
+ case 27: goto RRETURN_27;
+ case 28: goto RRETURN_28;
+ case 29: goto RRETURN_29;
+ case 30: goto RRETURN_30;
+ case 31: goto RRETURN_31;
+ case 38: goto RRETURN_38;
+ case 40: goto RRETURN_40;
+ case 42: goto RRETURN_42;
+ case 44: goto RRETURN_44;
+ case 48: goto RRETURN_48;
+ case 52: goto RRETURN_52;
+ }
+
+ ASSERT_NOT_REACHED();
+ return matchError(JSRegExpErrorInternal, stack);
+
+#endif
+
+RETURN:
+ return isMatch;
+}
+
+
+/*************************************************
+* Execute a Regular Expression *
+*************************************************/
+
+/* This function applies a compiled re to a subject string and picks out
+portions of the string if it matches. Two elements in the vector are set for
+each substring: the offsets to the start and end of the substring.
+
+Arguments:
+ re points to the compiled expression
+ extra_data points to extra data or is NULL
+ subject points to the subject string
+ length length of subject string (may contain binary zeros)
+ start_offset where to start in the subject string
+ options option bits
+ offsets points to a vector of ints to be filled in with offsets
+ offsetcount the number of elements in the vector
+
+Returns: > 0 => success; value is the number of elements filled in
+ = 0 => success, but offsets is not big enough
+ -1 => failed to match
+ < -1 => some kind of unexpected problem
+*/
+
+static void tryFirstByteOptimization(const UChar*& subjectPtr, const UChar* endSubject, int first_byte, bool first_byte_caseless, bool useMultiLineFirstCharOptimization, const UChar* originalSubjectStart)
+{
+ // If first_byte is set, try scanning to the first instance of that byte
+ // no need to try and match against any earlier part of the subject string.
+ if (first_byte >= 0) {
+ UChar first_char = first_byte;
+ if (first_byte_caseless)
+ while (subjectPtr < endSubject) {
+ int c = *subjectPtr;
+ if (c > 127)
+ break;
+ if (toLowerCase(c) == first_char)
+ break;
+ subjectPtr++;
+ }
+ else {
+ while (subjectPtr < endSubject && *subjectPtr != first_char)
+ subjectPtr++;
+ }
+ } else if (useMultiLineFirstCharOptimization) {
+ /* Or to just after \n for a multiline match if possible */
+ // I'm not sure why this != originalSubjectStart check is necessary -- ecs 11/18/07
+ if (subjectPtr > originalSubjectStart) {
+ while (subjectPtr < endSubject && !isNewline(subjectPtr[-1]))
+ subjectPtr++;
+ }
+ }
+}
+
+static bool tryRequiredByteOptimization(const UChar*& subjectPtr, const UChar* endSubject, int req_byte, int req_byte2, bool req_byte_caseless, bool hasFirstByte, const UChar*& reqBytePtr)
+{
+ /* If req_byte is set, we know that that character must appear in the subject
+ for the match to succeed. If the first character is set, req_byte must be
+ later in the subject; otherwise the test starts at the match point. This
+ optimization can save a huge amount of backtracking in patterns with nested
+ unlimited repeats that aren't going to match. Writing separate code for
+ cased/caseless versions makes it go faster, as does using an autoincrement
+ and backing off on a match.
+
+ HOWEVER: when the subject string is very, very long, searching to its end can
+ take a long time, and give bad performance on quite ordinary patterns. This
+ showed up when somebody was matching /^C/ on a 32-megabyte string... so we
+ don't do this when the string is sufficiently long.
+ */
+
+ if (req_byte >= 0 && endSubject - subjectPtr < REQ_BYTE_MAX) {
+ const UChar* p = subjectPtr + (hasFirstByte ? 1 : 0);
+
+ /* We don't need to repeat the search if we haven't yet reached the
+ place we found it at last time. */
+
+ if (p > reqBytePtr) {
+ if (req_byte_caseless) {
+ while (p < endSubject) {
+ int pp = *p++;
+ if (pp == req_byte || pp == req_byte2) {
+ p--;
+ break;
+ }
+ }
+ } else {
+ while (p < endSubject) {
+ if (*p++ == req_byte) {
+ p--;
+ break;
+ }
+ }
+ }
+
+ /* If we can't find the required character, break the matching loop */
+
+ if (p >= endSubject)
+ return true;
+
+ /* If we have found the required character, save the point where we
+ found it, so that we don't search again next time round the loop if
+ the start hasn't passed this character yet. */
+
+ reqBytePtr = p;
+ }
+ }
+ return false;
+}
+
+int jsRegExpExecute(const JSRegExp* re,
+ const UChar* subject, int length, int start_offset, int* offsets,
+ int offsetcount)
+{
+ ASSERT(re);
+ ASSERT(subject);
+ ASSERT(offsetcount >= 0);
+ ASSERT(offsets || offsetcount == 0);
+
+ MatchData matchBlock;
+ matchBlock.startSubject = subject;
+ matchBlock.endSubject = matchBlock.startSubject + length;
+ const UChar* endSubject = matchBlock.endSubject;
+
+ matchBlock.multiline = (re->options & MatchAcrossMultipleLinesOption);
+ matchBlock.ignoreCase = (re->options & IgnoreCaseOption);
+
+ /* If the expression has got more back references than the offsets supplied can
+ hold, we get a temporary chunk of working store to use during the matching.
+ Otherwise, we can use the vector supplied, rounding down its size to a multiple
+ of 3. */
+
+ int ocount = offsetcount - (offsetcount % 3);
+
+ // FIXME: This is lame that we have to second-guess our caller here.
+ // The API should change to either fail-hard when we don't have enough offset space
+ // or that we shouldn't ask our callers to pre-allocate in the first place.
+ bool using_temporary_offsets = false;
+ if (re->top_backref > 0 && re->top_backref >= ocount/3) {
+ ocount = re->top_backref * 3 + 3;
+ matchBlock.offsetVector = new int[ocount];
+ if (!matchBlock.offsetVector)
+ return JSRegExpErrorNoMemory;
+ using_temporary_offsets = true;
+ } else
+ matchBlock.offsetVector = offsets;
+
+ matchBlock.offsetEnd = ocount;
+ matchBlock.offsetMax = (2*ocount)/3;
+ matchBlock.offsetOverflow = false;
+
+ /* Compute the minimum number of offsets that we need to reset each time. Doing
+ this makes a huge difference to execution time when there aren't many brackets
+ in the pattern. */
+
+ int resetcount = 2 + re->top_bracket * 2;
+ if (resetcount > offsetcount)
+ resetcount = ocount;
+
+ /* Reset the working variable associated with each extraction. These should
+ never be used unless previously set, but they get saved and restored, and so we
+ initialize them to avoid reading uninitialized locations. */
+
+ if (matchBlock.offsetVector) {
+ int* iptr = matchBlock.offsetVector + ocount;
+ int* iend = iptr - resetcount/2 + 1;
+ while (--iptr >= iend)
+ *iptr = -1;
+ }
+
+ /* Set up the first character to match, if available. The first_byte value is
+ never set for an anchored regular expression, but the anchoring may be forced
+ at run time, so we have to test for anchoring. The first char may be unset for
+ an unanchored pattern, of course. If there's no first char and the pattern was
+ studied, there may be a bitmap of possible first characters. */
+
+ bool first_byte_caseless = false;
+ int first_byte = -1;
+ if (re->options & UseFirstByteOptimizationOption) {
+ first_byte = re->first_byte & 255;
+ if ((first_byte_caseless = (re->first_byte & REQ_IGNORE_CASE)))
+ first_byte = toLowerCase(first_byte);
+ }
+
+ /* For anchored or unanchored matches, there may be a "last known required
+ character" set. */
+
+ bool req_byte_caseless = false;
+ int req_byte = -1;
+ int req_byte2 = -1;
+ if (re->options & UseRequiredByteOptimizationOption) {
+ req_byte = re->req_byte & 255; // FIXME: This optimization could be made to work for UTF16 chars as well...
+ req_byte_caseless = (re->req_byte & REQ_IGNORE_CASE);
+ req_byte2 = flipCase(req_byte);
+ }
+
+ /* Loop for handling unanchored repeated matching attempts; for anchored regexs
+ the loop runs just once. */
+
+ const UChar* startMatch = subject + start_offset;
+ const UChar* reqBytePtr = startMatch - 1;
+ bool useMultiLineFirstCharOptimization = re->options & UseMultiLineFirstByteOptimizationOption;
+
+ do {
+ /* Reset the maximum number of extractions we might see. */
+ if (matchBlock.offsetVector) {
+ int* iptr = matchBlock.offsetVector;
+ int* iend = iptr + resetcount;
+ while (iptr < iend)
+ *iptr++ = -1;
+ }
+
+ tryFirstByteOptimization(startMatch, endSubject, first_byte, first_byte_caseless, useMultiLineFirstCharOptimization, matchBlock.startSubject + start_offset);
+ if (tryRequiredByteOptimization(startMatch, endSubject, req_byte, req_byte2, req_byte_caseless, first_byte >= 0, reqBytePtr))
+ break;
+
+ /* When a match occurs, substrings will be set for all internal extractions;
+ we just need to set up the whole thing as substring 0 before returning. If
+ there were too many extractions, set the return code to zero. In the case
+ where we had to get some local store to hold offsets for backreferences, copy
+ those back references that we can. In this case there need not be overflow
+ if certain parts of the pattern were not used. */
+
+ /* The code starts after the JSRegExp block and the capture name table. */
+ const unsigned char* start_code = (const unsigned char*)(re + 1);
+
+ int returnCode = match(startMatch, start_code, 2, matchBlock);
+
+ /* When the result is no match, advance the pointer to the next character
+ and continue. */
+ if (returnCode == 0) {
+ startMatch++;
+ continue;
+ }
+
+ if (returnCode != 1) {
+ ASSERT(returnCode == JSRegExpErrorHitLimit || returnCode == JSRegExpErrorNoMemory);
+ DPRINTF((">>>> error: returning %d\n", returnCode));
+ return returnCode;
+ }
+
+ /* We have a match! Copy the offset information from temporary store if
+ necessary */
+
+ if (using_temporary_offsets) {
+ if (offsetcount >= 4) {
+ memcpy(offsets + 2, matchBlock.offsetVector + 2, (offsetcount - 2) * sizeof(int));
+ DPRINTF(("Copied offsets from temporary memory\n"));
+ }
+ if (matchBlock.endOffsetTop > offsetcount)
+ matchBlock.offsetOverflow = true;
+
+ DPRINTF(("Freeing temporary memory\n"));
+ delete [] matchBlock.offsetVector;
+ }
+
+ returnCode = matchBlock.offsetOverflow ? 0 : matchBlock.endOffsetTop / 2;
+
+ if (offsetcount < 2)
+ returnCode = 0;
+ else {
+ offsets[0] = startMatch - matchBlock.startSubject;
+ offsets[1] = matchBlock.endMatchPtr - matchBlock.startSubject;
+ }
+
+ DPRINTF((">>>> returning %d\n", returnCode));
+ return returnCode;
+ } while (!(re->options & IsAnchoredOption) && startMatch <= endSubject);
+
+ if (using_temporary_offsets) {
+ DPRINTF(("Freeing temporary memory\n"));
+ delete [] matchBlock.offsetVector;
+ }
+
+ DPRINTF((">>>> returning PCRE_ERROR_NOMATCH\n"));
+ return JSRegExpErrorNoMatch;
+}
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This header contains definitions that are shared between the different
+modules, but which are not relevant to the exported API. This includes some
+functions whose names all begin with "_pcre_". */
+
+#ifndef PCRE_INTERNAL_H
+#define PCRE_INTERNAL_H
+
+/* Bit definitions for entries in the pcre_ctypes table. */
+
+#define ctype_space 0x01
+#define ctype_xdigit 0x08
+#define ctype_word 0x10 /* alphameric or '_' */
+
+/* Offsets for the bitmap tables in pcre_cbits. Each table contains a set
+of bits for a class map. Some classes are built by combining these tables. */
+
+#define cbit_space 0 /* \s */
+#define cbit_digit 32 /* \d */
+#define cbit_word 64 /* \w */
+#define cbit_length 96 /* Length of the cbits table */
+
+/* Offsets of the various tables from the base tables pointer, and
+total length. */
+
+#define lcc_offset 0
+#define fcc_offset 128
+#define cbits_offset 256
+#define ctypes_offset (cbits_offset + cbit_length)
+#define tables_length (ctypes_offset + 128)
+
+#ifndef DFTABLES
+
+// TODO: Hook this up to something that checks assertions.
+#define ASSERT(x) do { } while(0)
+#define ASSERT_NOT_REACHED() do {} while(0)
+
+#ifdef WIN32
+#pragma warning(disable: 4232)
+#pragma warning(disable: 4244)
+#endif
+
+#include "pcre.h"
+
+/* The value of LINK_SIZE determines the number of bytes used to store links as
+offsets within the compiled regex. The default is 2, which allows for compiled
+patterns up to 64K long. */
+
+#define LINK_SIZE 2
+
+/* Define DEBUG to get debugging output on stdout. */
+
+#if 0
+#define DEBUG
+#endif
+
+/* Use a macro for debugging printing, 'cause that eliminates the use of #ifdef
+inline, and there are *still* stupid compilers about that don't like indented
+pre-processor statements, or at least there were when I first wrote this. After
+all, it had only been about 10 years then... */
+
+#ifdef DEBUG
+#define DPRINTF(p) printf p
+#else
+#define DPRINTF(p) /*nothing*/
+#endif
+
+/* PCRE keeps offsets in its compiled code as 2-byte quantities (always stored
+in big-endian order) by default. These are used, for example, to link from the
+start of a subpattern to its alternatives and its end. The use of 2 bytes per
+offset limits the size of the compiled regex to around 64K, which is big enough
+for almost everybody. However, I received a request for an even bigger limit.
+For this reason, and also to make the code easier to maintain, the storing and
+loading of offsets from the byte string is now handled by the functions that are
+defined here. */
+
+/* PCRE uses some other 2-byte quantities that do not change when the size of
+offsets changes. There are used for repeat counts and for other things such as
+capturing parenthesis numbers in back references. */
+
+static inline void put2ByteValue(unsigned char* opcodePtr, int value)
+{
+ ASSERT(value >= 0 && value <= 0xFFFF);
+ opcodePtr[0] = value >> 8;
+ opcodePtr[1] = value;
+}
+
+static inline int get2ByteValue(const unsigned char* opcodePtr)
+{
+ return (opcodePtr[0] << 8) | opcodePtr[1];
+}
+
+static inline void put2ByteValueAndAdvance(unsigned char*& opcodePtr, int value)
+{
+ put2ByteValue(opcodePtr, value);
+ opcodePtr += 2;
+}
+
+static inline void putLinkValueAllowZero(unsigned char* opcodePtr, int value)
+{
+ put2ByteValue(opcodePtr, value);
+}
+
+static inline int getLinkValueAllowZero(const unsigned char* opcodePtr)
+{
+ return get2ByteValue(opcodePtr);
+}
+
+#define MAX_PATTERN_SIZE (1 << 16)
+
+static inline void putLinkValue(unsigned char* opcodePtr, int value)
+{
+ ASSERT(value);
+ putLinkValueAllowZero(opcodePtr, value);
+}
+
+static inline int getLinkValue(const unsigned char* opcodePtr)
+{
+ int value = getLinkValueAllowZero(opcodePtr);
+ ASSERT(value);
+ return value;
+}
+
+static inline void putLinkValueAndAdvance(unsigned char*& opcodePtr, int value)
+{
+ putLinkValue(opcodePtr, value);
+ opcodePtr += LINK_SIZE;
+}
+
+static inline void putLinkValueAllowZeroAndAdvance(unsigned char*& opcodePtr, int value)
+{
+ putLinkValueAllowZero(opcodePtr, value);
+ opcodePtr += LINK_SIZE;
+}
+
+// FIXME: These are really more of a "compiled regexp state" than "regexp options"
+enum RegExpOptions {
+ UseFirstByteOptimizationOption = 0x40000000, /* first_byte is set */
+ UseRequiredByteOptimizationOption = 0x20000000, /* req_byte is set */
+ UseMultiLineFirstByteOptimizationOption = 0x10000000, /* start after \n for multiline */
+ IsAnchoredOption = 0x02000000, /* can't use partial with this regex */
+ IgnoreCaseOption = 0x00000001,
+ MatchAcrossMultipleLinesOption = 0x00000002
+};
+
+/* Flags added to firstbyte or reqbyte; a "non-literal" item is either a
+variable-length repeat, or a anything other than literal characters. */
+
+#define REQ_IGNORE_CASE 0x0100 /* indicates should ignore case */
+#define REQ_VARY 0x0200 /* reqbyte followed non-literal item */
+
+/* Miscellaneous definitions */
+
+/* Flag bits and data types for the extended class (OP_XCLASS) for classes that
+contain UTF-8 characters with values greater than 255. */
+
+#define XCL_NOT 0x01 /* Flag: this is a negative class */
+#define XCL_MAP 0x02 /* Flag: a 32-byte map is present */
+
+#define XCL_END 0 /* Marks end of individual items */
+#define XCL_SINGLE 1 /* Single item (one multibyte char) follows */
+#define XCL_RANGE 2 /* A range (two multibyte chars) follows */
+
+/* These are escaped items that aren't just an encoding of a particular data
+value such as \n. They must have non-zero values, as check_escape() returns
+their negation. Also, they must appear in the same order as in the opcode
+definitions below, up to ESC_w. The final one must be
+ESC_REF as subsequent values are used for \1, \2, \3, etc. There is are two
+tests in the code for an escape > ESC_b and <= ESC_w to
+detect the types that may be repeated. These are the types that consume
+characters. If any new escapes are put in between that don't consume a
+character, that code will have to change. */
+
+enum { ESC_B = 1, ESC_b, ESC_D, ESC_d, ESC_S, ESC_s, ESC_W, ESC_w, ESC_REF };
+
+/* Opcode table: OP_BRA must be last, as all values >= it are used for brackets
+that extract substrings. Starting from 1 (i.e. after OP_END), the values up to
+OP_EOD must correspond in order to the list of escapes immediately above.
+Note that whenever this list is updated, the two macro definitions that follow
+must also be updated to match. */
+
+#define FOR_EACH_OPCODE(macro) \
+ macro(END) \
+ \
+ macro(NOT_WORD_BOUNDARY) \
+ macro(WORD_BOUNDARY) \
+ macro(NOT_DIGIT) \
+ macro(DIGIT) \
+ macro(NOT_WHITESPACE) \
+ macro(WHITESPACE) \
+ macro(NOT_WORDCHAR) \
+ macro(WORDCHAR) \
+ \
+ macro(NOT_NEWLINE) \
+ \
+ macro(CIRC) \
+ macro(DOLL) \
+ macro(BOL) \
+ macro(EOL) \
+ macro(CHAR) \
+ macro(CHAR_IGNORING_CASE) \
+ macro(ASCII_CHAR) \
+ macro(ASCII_LETTER_IGNORING_CASE) \
+ macro(NOT) \
+ \
+ macro(STAR) \
+ macro(MINSTAR) \
+ macro(PLUS) \
+ macro(MINPLUS) \
+ macro(QUERY) \
+ macro(MINQUERY) \
+ macro(UPTO) \
+ macro(MINUPTO) \
+ macro(EXACT) \
+ \
+ macro(NOTSTAR) \
+ macro(NOTMINSTAR) \
+ macro(NOTPLUS) \
+ macro(NOTMINPLUS) \
+ macro(NOTQUERY) \
+ macro(NOTMINQUERY) \
+ macro(NOTUPTO) \
+ macro(NOTMINUPTO) \
+ macro(NOTEXACT) \
+ \
+ macro(TYPESTAR) \
+ macro(TYPEMINSTAR) \
+ macro(TYPEPLUS) \
+ macro(TYPEMINPLUS) \
+ macro(TYPEQUERY) \
+ macro(TYPEMINQUERY) \
+ macro(TYPEUPTO) \
+ macro(TYPEMINUPTO) \
+ macro(TYPEEXACT) \
+ \
+ macro(CRSTAR) \
+ macro(CRMINSTAR) \
+ macro(CRPLUS) \
+ macro(CRMINPLUS) \
+ macro(CRQUERY) \
+ macro(CRMINQUERY) \
+ macro(CRRANGE) \
+ macro(CRMINRANGE) \
+ \
+ macro(CLASS) \
+ macro(NCLASS) \
+ macro(XCLASS) \
+ \
+ macro(REF) \
+ \
+ macro(ALT) \
+ macro(KET) \
+ macro(KETRMAX) \
+ macro(KETRMIN) \
+ \
+ macro(ASSERT) \
+ macro(ASSERT_NOT) \
+ \
+ macro(BRAZERO) \
+ macro(BRAMINZERO) \
+ macro(BRANUMBER) \
+ macro(BRA)
+
+#define OPCODE_ENUM_VALUE(opcode) OP_##opcode,
+enum { FOR_EACH_OPCODE(OPCODE_ENUM_VALUE) };
+
+/* WARNING WARNING WARNING: There is an implicit assumption in pcre.c and
+study.c that all opcodes are less than 128 in value. This makes handling UTF-8
+character sequences easier. */
+
+/* The highest extraction number before we have to start using additional
+bytes. (Originally PCRE didn't have support for extraction counts higher than
+this number.) The value is limited by the number of opcodes left after OP_BRA,
+i.e. 255 - OP_BRA. We actually set it a bit lower to leave room for additional
+opcodes. */
+
+/* FIXME: Note that OP_BRA + 100 is > 128, so the two comments above
+are in conflict! */
+
+#define EXTRACT_BASIC_MAX 100
+
+/* The index of names and the
+code vector run on as long as necessary after the end. We store an explicit
+offset to the name table so that if a regex is compiled on one host, saved, and
+then run on another where the size of pointers is different, all might still
+be well. For the case of compiled-on-4 and run-on-8, we include an extra
+pointer that is always NULL.
+*/
+
+struct JSRegExp {
+ unsigned options;
+
+ unsigned short top_bracket;
+ unsigned short top_backref;
+
+ unsigned short first_byte;
+ unsigned short req_byte;
+};
+
+/* Internal shared data tables. These are tables that are used by more than one
+ of the exported public functions. They have to be "external" in the C sense,
+ but are not part of the PCRE public API. The data for these tables is in the
+ pcre_tables.c module. */
+
+#define kjs_pcre_utf8_table1_size 6
+
+extern const int kjs_pcre_utf8_table1[6];
+extern const int kjs_pcre_utf8_table2[6];
+extern const int kjs_pcre_utf8_table3[6];
+extern const unsigned char kjs_pcre_utf8_table4[0x40];
+
+extern const unsigned char kjs_pcre_default_tables[tables_length];
+
+static inline unsigned char toLowerCase(unsigned char c)
+{
+ static const unsigned char* lowerCaseChars = kjs_pcre_default_tables + lcc_offset;
+ return lowerCaseChars[c];
+}
+
+static inline unsigned char flipCase(unsigned char c)
+{
+ static const unsigned char* flippedCaseChars = kjs_pcre_default_tables + fcc_offset;
+ return flippedCaseChars[c];
+}
+
+static inline unsigned char classBitmapForChar(unsigned char c)
+{
+ static const unsigned char* charClassBitmaps = kjs_pcre_default_tables + cbits_offset;
+ return charClassBitmaps[c];
+}
+
+static inline unsigned char charTypeForChar(unsigned char c)
+{
+ const unsigned char* charTypeMap = kjs_pcre_default_tables + ctypes_offset;
+ return charTypeMap[c];
+}
+
+static inline bool isWordChar(UChar c)
+{
+ return c < 128 && (charTypeForChar(c) & ctype_word);
+}
+
+static inline bool isSpaceChar(UChar c)
+{
+ return (c < 128 && (charTypeForChar(c) & ctype_space));
+}
+
+static inline bool isNewline(UChar nl)
+{
+ return (nl == 0xA || nl == 0xD || nl == 0x2028 || nl == 0x2029);
+}
+
+static inline bool isBracketStartOpcode(unsigned char opcode)
+{
+ if (opcode >= OP_BRA)
+ return true;
+ switch (opcode) {
+ case OP_ASSERT:
+ case OP_ASSERT_NOT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline void advanceToEndOfBracket(const unsigned char*& opcodePtr)
+{
+ ASSERT(isBracketStartOpcode(*opcodePtr) || *opcodePtr == OP_ALT);
+ do
+ opcodePtr += getLinkValue(opcodePtr + 1);
+ while (*opcodePtr == OP_ALT);
+}
+
+/* Internal shared functions. These are functions that are used in more
+that one of the source files. They have to have external linkage, but
+but are not part of the public API and so not exported from the library. */
+
+extern int kjs_pcre_ucp_othercase(unsigned);
+extern bool kjs_pcre_xclass(int, const unsigned char*);
+
+#endif
+
+#endif
+
+/* End of pcre_internal.h */
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This module contains some fixed tables that are used by more than one of the
+PCRE code modules. */
+
+#include "pcre_internal.h"
+
+/*************************************************
+* Tables for UTF-8 support *
+*************************************************/
+
+/* These are the breakpoints for different numbers of bytes in a UTF-8
+character. */
+
+const int kjs_pcre_utf8_table1[6] =
+ { 0x7f, 0x7ff, 0xffff, 0x1fffff, 0x3ffffff, 0x7fffffff};
+
+/* These are the indicator bits and the mask for the data bits to set in the
+first byte of a character, indexed by the number of additional bytes. */
+
+const int kjs_pcre_utf8_table2[6] = { 0, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc};
+const int kjs_pcre_utf8_table3[6] = { 0xff, 0x1f, 0x0f, 0x07, 0x03, 0x01};
+
+/* Table of the number of extra characters, indexed by the first character
+masked with 0x3f. The highest number for a valid UTF-8 character is in fact
+0x3d. */
+
+const unsigned char kjs_pcre_utf8_table4[0x40] = {
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 };
+
+#include "pcre_chartables.c"
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains code for searching the table of Unicode character
+properties. */
+
+#include "pcre_internal.h"
+
+#include "ucpinternal.h" /* Internal table details */
+#include "ucptable.cpp" /* The table itself */
+
+/*************************************************
+* Search table and return other case *
+*************************************************/
+
+/* If the given character is a letter, and there is another case for the
+letter, return the other case. Otherwise, return -1.
+
+Arguments:
+ c the character value
+
+Returns: the other case or -1 if none
+*/
+
+int kjs_pcre_ucp_othercase(unsigned c)
+{
+ int bot = 0;
+ int top = sizeof(ucp_table) / sizeof(cnode);
+ int mid;
+
+ /* The table is searched using a binary chop. You might think that using
+ intermediate variables to hold some of the common expressions would speed
+ things up, but tests with gcc 3.4.4 on Linux showed that, on the contrary, it
+ makes things a lot slower. */
+
+ for (;;) {
+ if (top <= bot)
+ return -1;
+ mid = (bot + top) >> 1;
+ if (c == (ucp_table[mid].f0 & f0_charmask))
+ break;
+ if (c < (ucp_table[mid].f0 & f0_charmask))
+ top = mid;
+ else {
+ if ((ucp_table[mid].f0 & f0_rangeflag) && (c <= (ucp_table[mid].f0 & f0_charmask) + (ucp_table[mid].f1 & f1_rangemask)))
+ break;
+ bot = mid + 1;
+ }
+ }
+
+ /* Found an entry in the table. Return -1 for a range entry. Otherwise return
+ the other case if there is one, else -1. */
+
+ if (ucp_table[mid].f0 & f0_rangeflag)
+ return -1;
+
+ int offset = ucp_table[mid].f1 & f1_casemask;
+ if (offset & f1_caseneg)
+ offset |= f1_caseneg;
+ return !offset ? -1 : c + offset;
+}
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This module contains an internal function that is used to match an extended
+class (one that contains characters whose values are > 255). */
+
+#include "pcre_internal.h"
+
+/*************************************************
+* Match character against an XCLASS *
+*************************************************/
+
+/* This function is called to match a character against an extended class that
+might contain values > 255.
+
+Arguments:
+ c the character
+ data points to the flag byte of the XCLASS data
+
+Returns: true if character matches, else false
+*/
+
+/* Get the next UTF-8 character, advancing the pointer. This is called when we
+ know we are in UTF-8 mode. */
+
+static inline void getUTF8CharAndAdvancePointer(int& c, const unsigned char*& subjectPtr)
+{
+ c = *subjectPtr++;
+ if ((c & 0xc0) == 0xc0) {
+ int gcaa = kjs_pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */
+ int gcss = 6 * gcaa;
+ c = (c & kjs_pcre_utf8_table3[gcaa]) << gcss;
+ while (gcaa-- > 0) {
+ gcss -= 6;
+ c |= (*subjectPtr++ & 0x3f) << gcss;
+ }
+ }
+}
+
+bool kjs_pcre_xclass(int c, const unsigned char* data)
+{
+ bool negated = (*data & XCL_NOT);
+
+ /* Character values < 256 are matched against a bitmap, if one is present. If
+ not, we still carry on, because there may be ranges that start below 256 in the
+ additional data. */
+
+ if (c < 256) {
+ if ((*data & XCL_MAP) != 0 && (data[1 + c/8] & (1 << (c&7))) != 0)
+ return !negated; /* char found */
+ }
+
+ /* First skip the bit map if present. Then match against the list of Unicode
+ properties or large chars or ranges that end with a large char. We won't ever
+ encounter XCL_PROP or XCL_NOTPROP when UCP support is not compiled. */
+
+ if ((*data++ & XCL_MAP) != 0)
+ data += 32;
+
+ int t;
+ while ((t = *data++) != XCL_END) {
+ if (t == XCL_SINGLE) {
+ int x;
+ getUTF8CharAndAdvancePointer(x, data);
+ if (c == x)
+ return !negated;
+ }
+ else if (t == XCL_RANGE) {
+ int x, y;
+ getUTF8CharAndAdvancePointer(x, data);
+ getUTF8CharAndAdvancePointer(y, data);
+ if (c >= x && c <= y)
+ return !negated;
+ }
+ }
+
+ return negated; /* char did not match */
+}
--- /dev/null
+/* This is JavaScriptCore's variant of the PCRE library. While this library
+started out as a copy of PCRE, many of the features of PCRE have been
+removed. This library now supports only the regular expression features
+required by the JavaScript language specification, and has only the functions
+needed by JavaScriptCore and the rest of WebKit.
+
+ Originally written by Philip Hazel
+ Copyright (c) 1997-2006 University of Cambridge
+ Copyright (C) 2002, 2004, 2006, 2007 Apple Inc. All rights reserved.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/*************************************************
+* Unicode Property Table handler *
+*************************************************/
+
+/* Internal header file defining the layout of the bits in each pair of 32-bit
+words that form a data item in the table. */
+
+typedef struct cnode {
+ unsigned f0;
+ unsigned f1;
+} cnode;
+
+/* Things for the f0 field */
+
+#define f0_scriptmask 0xff000000 /* Mask for script field */
+#define f0_scriptshift 24 /* Shift for script value */
+#define f0_rangeflag 0x00f00000 /* Flag for a range item */
+#define f0_charmask 0x001fffff /* Mask for code point value */
+
+/* Things for the f1 field */
+
+#define f1_typemask 0xfc000000 /* Mask for char type field */
+#define f1_typeshift 26 /* Shift for the type field */
+#define f1_rangemask 0x0000ffff /* Mask for a range offset */
+#define f1_casemask 0x0000ffff /* Mask for a case offset */
+#define f1_caseneg 0xffff8000 /* Bits for negation */
+
+/* The data consists of a vector of structures of type cnode. The two unsigned
+32-bit integers are used as follows:
+
+(f0) (1) The most significant byte holds the script number. The numbers are
+ defined by the enum in ucp.h.
+
+ (2) The 0x00800000 bit is set if this entry defines a range of characters.
+ It is not set if this entry defines a single character
+
+ (3) The 0x00600000 bits are spare.
+
+ (4) The 0x001fffff bits contain the code point. No Unicode code point will
+ ever be greater than 0x0010ffff, so this should be OK for ever.
+
+(f1) (1) The 0xfc000000 bits contain the character type number. The numbers are
+ defined by an enum in ucp.h.
+
+ (2) The 0x03ff0000 bits are spare.
+
+ (3) The 0x0000ffff bits contain EITHER the unsigned offset to the top of
+ range if this entry defines a range, OR the *signed* offset to the
+ character's "other case" partner if this entry defines a single
+ character. There is no partner if the value is zero.
+
+-------------------------------------------------------------------------------
+| script (8) |.|.|.| codepoint (21) || type (6) |.|.| spare (8) | offset (16) |
+-------------------------------------------------------------------------------
+ | | | | |
+ | | |-> spare | |-> spare
+ | | |
+ | |-> spare |-> spare
+ |
+ |-> range flag
+
+The upper/lower casing information is set only for characters that come in
+pairs. The non-one-to-one mappings in the Unicode data are ignored.
+
+When searching the data, proceed as follows:
+
+(1) Set up for a binary chop search.
+
+(2) If the top is not greater than the bottom, the character is not in the
+ table. Its type must therefore be "Cn" ("Undefined").
+
+(3) Find the middle vector element.
+
+(4) Extract the code point and compare. If equal, we are done.
+
+(5) If the test character is smaller, set the top to the current point, and
+ goto (2).
+
+(6) If the current entry defines a range, compute the last character by adding
+ the offset, and see if the test character is within the range. If it is,
+ we are done.
+
+(7) Otherwise, set the bottom to one element past the current point and goto
+ (2).
+*/
+
+/* End of ucpinternal.h */
--- /dev/null
+/* This source module is automatically generated from the Unicode
+property table. See ucpinternal.h for a description of the layout. */
+
+static const cnode ucp_table[] = {
+ { 0x09800000, 0x0000001f },
+ { 0x09000020, 0x74000000 },
+ { 0x09800021, 0x54000002 },
+ { 0x09000024, 0x5c000000 },
+ { 0x09800025, 0x54000002 },
+ { 0x09000028, 0x58000000 },
+ { 0x09000029, 0x48000000 },
+ { 0x0900002a, 0x54000000 },
+ { 0x0900002b, 0x64000000 },
+ { 0x0900002c, 0x54000000 },
+ { 0x0900002d, 0x44000000 },
+ { 0x0980002e, 0x54000001 },
+ { 0x09800030, 0x34000009 },
+ { 0x0980003a, 0x54000001 },
+ { 0x0980003c, 0x64000002 },
+ { 0x0980003f, 0x54000001 },
+ { 0x21000041, 0x24000020 },
+ { 0x21000042, 0x24000020 },
+ { 0x21000043, 0x24000020 },
+ { 0x21000044, 0x24000020 },
+ { 0x21000045, 0x24000020 },
+ { 0x21000046, 0x24000020 },
+ { 0x21000047, 0x24000020 },
+ { 0x21000048, 0x24000020 },
+ { 0x21000049, 0x24000020 },
+ { 0x2100004a, 0x24000020 },
+ { 0x2100004b, 0x24000020 },
+ { 0x2100004c, 0x24000020 },
+ { 0x2100004d, 0x24000020 },
+ { 0x2100004e, 0x24000020 },
+ { 0x2100004f, 0x24000020 },
+ { 0x21000050, 0x24000020 },
+ { 0x21000051, 0x24000020 },
+ { 0x21000052, 0x24000020 },
+ { 0x21000053, 0x24000020 },
+ { 0x21000054, 0x24000020 },
+ { 0x21000055, 0x24000020 },
+ { 0x21000056, 0x24000020 },
+ { 0x21000057, 0x24000020 },
+ { 0x21000058, 0x24000020 },
+ { 0x21000059, 0x24000020 },
+ { 0x2100005a, 0x24000020 },
+ { 0x0900005b, 0x58000000 },
+ { 0x0900005c, 0x54000000 },
+ { 0x0900005d, 0x48000000 },
+ { 0x0900005e, 0x60000000 },
+ { 0x0900005f, 0x40000000 },
+ { 0x09000060, 0x60000000 },
+ { 0x21000061, 0x1400ffe0 },
+ { 0x21000062, 0x1400ffe0 },
+ { 0x21000063, 0x1400ffe0 },
+ { 0x21000064, 0x1400ffe0 },
+ { 0x21000065, 0x1400ffe0 },
+ { 0x21000066, 0x1400ffe0 },
+ { 0x21000067, 0x1400ffe0 },
+ { 0x21000068, 0x1400ffe0 },
+ { 0x21000069, 0x1400ffe0 },
+ { 0x2100006a, 0x1400ffe0 },
+ { 0x2100006b, 0x1400ffe0 },
+ { 0x2100006c, 0x1400ffe0 },
+ { 0x2100006d, 0x1400ffe0 },
+ { 0x2100006e, 0x1400ffe0 },
+ { 0x2100006f, 0x1400ffe0 },
+ { 0x21000070, 0x1400ffe0 },
+ { 0x21000071, 0x1400ffe0 },
+ { 0x21000072, 0x1400ffe0 },
+ { 0x21000073, 0x1400ffe0 },
+ { 0x21000074, 0x1400ffe0 },
+ { 0x21000075, 0x1400ffe0 },
+ { 0x21000076, 0x1400ffe0 },
+ { 0x21000077, 0x1400ffe0 },
+ { 0x21000078, 0x1400ffe0 },
+ { 0x21000079, 0x1400ffe0 },
+ { 0x2100007a, 0x1400ffe0 },
+ { 0x0900007b, 0x58000000 },
+ { 0x0900007c, 0x64000000 },
+ { 0x0900007d, 0x48000000 },
+ { 0x0900007e, 0x64000000 },
+ { 0x0980007f, 0x00000020 },
+ { 0x090000a0, 0x74000000 },
+ { 0x090000a1, 0x54000000 },
+ { 0x098000a2, 0x5c000003 },
+ { 0x098000a6, 0x68000001 },
+ { 0x090000a8, 0x60000000 },
+ { 0x090000a9, 0x68000000 },
+ { 0x210000aa, 0x14000000 },
+ { 0x090000ab, 0x50000000 },
+ { 0x090000ac, 0x64000000 },
+ { 0x090000ad, 0x04000000 },
+ { 0x090000ae, 0x68000000 },
+ { 0x090000af, 0x60000000 },
+ { 0x090000b0, 0x68000000 },
+ { 0x090000b1, 0x64000000 },
+ { 0x098000b2, 0x3c000001 },
+ { 0x090000b4, 0x60000000 },
+ { 0x090000b5, 0x140002e7 },
+ { 0x090000b6, 0x68000000 },
+ { 0x090000b7, 0x54000000 },
+ { 0x090000b8, 0x60000000 },
+ { 0x090000b9, 0x3c000000 },
+ { 0x210000ba, 0x14000000 },
+ { 0x090000bb, 0x4c000000 },
+ { 0x098000bc, 0x3c000002 },
+ { 0x090000bf, 0x54000000 },
+ { 0x210000c0, 0x24000020 },
+ { 0x210000c1, 0x24000020 },
+ { 0x210000c2, 0x24000020 },
+ { 0x210000c3, 0x24000020 },
+ { 0x210000c4, 0x24000020 },
+ { 0x210000c5, 0x24000020 },
+ { 0x210000c6, 0x24000020 },
+ { 0x210000c7, 0x24000020 },
+ { 0x210000c8, 0x24000020 },
+ { 0x210000c9, 0x24000020 },
+ { 0x210000ca, 0x24000020 },
+ { 0x210000cb, 0x24000020 },
+ { 0x210000cc, 0x24000020 },
+ { 0x210000cd, 0x24000020 },
+ { 0x210000ce, 0x24000020 },
+ { 0x210000cf, 0x24000020 },
+ { 0x210000d0, 0x24000020 },
+ { 0x210000d1, 0x24000020 },
+ { 0x210000d2, 0x24000020 },
+ { 0x210000d3, 0x24000020 },
+ { 0x210000d4, 0x24000020 },
+ { 0x210000d5, 0x24000020 },
+ { 0x210000d6, 0x24000020 },
+ { 0x090000d7, 0x64000000 },
+ { 0x210000d8, 0x24000020 },
+ { 0x210000d9, 0x24000020 },
+ { 0x210000da, 0x24000020 },
+ { 0x210000db, 0x24000020 },
+ { 0x210000dc, 0x24000020 },
+ { 0x210000dd, 0x24000020 },
+ { 0x210000de, 0x24000020 },
+ { 0x210000df, 0x14000000 },
+ { 0x210000e0, 0x1400ffe0 },
+ { 0x210000e1, 0x1400ffe0 },
+ { 0x210000e2, 0x1400ffe0 },
+ { 0x210000e3, 0x1400ffe0 },
+ { 0x210000e4, 0x1400ffe0 },
+ { 0x210000e5, 0x1400ffe0 },
+ { 0x210000e6, 0x1400ffe0 },
+ { 0x210000e7, 0x1400ffe0 },
+ { 0x210000e8, 0x1400ffe0 },
+ { 0x210000e9, 0x1400ffe0 },
+ { 0x210000ea, 0x1400ffe0 },
+ { 0x210000eb, 0x1400ffe0 },
+ { 0x210000ec, 0x1400ffe0 },
+ { 0x210000ed, 0x1400ffe0 },
+ { 0x210000ee, 0x1400ffe0 },
+ { 0x210000ef, 0x1400ffe0 },
+ { 0x210000f0, 0x1400ffe0 },
+ { 0x210000f1, 0x1400ffe0 },
+ { 0x210000f2, 0x1400ffe0 },
+ { 0x210000f3, 0x1400ffe0 },
+ { 0x210000f4, 0x1400ffe0 },
+ { 0x210000f5, 0x1400ffe0 },
+ { 0x210000f6, 0x1400ffe0 },
+ { 0x090000f7, 0x64000000 },
+ { 0x210000f8, 0x1400ffe0 },
+ { 0x210000f9, 0x1400ffe0 },
+ { 0x210000fa, 0x1400ffe0 },
+ { 0x210000fb, 0x1400ffe0 },
+ { 0x210000fc, 0x1400ffe0 },
+ { 0x210000fd, 0x1400ffe0 },
+ { 0x210000fe, 0x1400ffe0 },
+ { 0x210000ff, 0x14000079 },
+ { 0x21000100, 0x24000001 },
+ { 0x21000101, 0x1400ffff },
+ { 0x21000102, 0x24000001 },
+ { 0x21000103, 0x1400ffff },
+ { 0x21000104, 0x24000001 },
+ { 0x21000105, 0x1400ffff },
+ { 0x21000106, 0x24000001 },
+ { 0x21000107, 0x1400ffff },
+ { 0x21000108, 0x24000001 },
+ { 0x21000109, 0x1400ffff },
+ { 0x2100010a, 0x24000001 },
+ { 0x2100010b, 0x1400ffff },
+ { 0x2100010c, 0x24000001 },
+ { 0x2100010d, 0x1400ffff },
+ { 0x2100010e, 0x24000001 },
+ { 0x2100010f, 0x1400ffff },
+ { 0x21000110, 0x24000001 },
+ { 0x21000111, 0x1400ffff },
+ { 0x21000112, 0x24000001 },
+ { 0x21000113, 0x1400ffff },
+ { 0x21000114, 0x24000001 },
+ { 0x21000115, 0x1400ffff },
+ { 0x21000116, 0x24000001 },
+ { 0x21000117, 0x1400ffff },
+ { 0x21000118, 0x24000001 },
+ { 0x21000119, 0x1400ffff },
+ { 0x2100011a, 0x24000001 },
+ { 0x2100011b, 0x1400ffff },
+ { 0x2100011c, 0x24000001 },
+ { 0x2100011d, 0x1400ffff },
+ { 0x2100011e, 0x24000001 },
+ { 0x2100011f, 0x1400ffff },
+ { 0x21000120, 0x24000001 },
+ { 0x21000121, 0x1400ffff },
+ { 0x21000122, 0x24000001 },
+ { 0x21000123, 0x1400ffff },
+ { 0x21000124, 0x24000001 },
+ { 0x21000125, 0x1400ffff },
+ { 0x21000126, 0x24000001 },
+ { 0x21000127, 0x1400ffff },
+ { 0x21000128, 0x24000001 },
+ { 0x21000129, 0x1400ffff },
+ { 0x2100012a, 0x24000001 },
+ { 0x2100012b, 0x1400ffff },
+ { 0x2100012c, 0x24000001 },
+ { 0x2100012d, 0x1400ffff },
+ { 0x2100012e, 0x24000001 },
+ { 0x2100012f, 0x1400ffff },
+ { 0x21000130, 0x2400ff39 },
+ { 0x21000131, 0x1400ff18 },
+ { 0x21000132, 0x24000001 },
+ { 0x21000133, 0x1400ffff },
+ { 0x21000134, 0x24000001 },
+ { 0x21000135, 0x1400ffff },
+ { 0x21000136, 0x24000001 },
+ { 0x21000137, 0x1400ffff },
+ { 0x21000138, 0x14000000 },
+ { 0x21000139, 0x24000001 },
+ { 0x2100013a, 0x1400ffff },
+ { 0x2100013b, 0x24000001 },
+ { 0x2100013c, 0x1400ffff },
+ { 0x2100013d, 0x24000001 },
+ { 0x2100013e, 0x1400ffff },
+ { 0x2100013f, 0x24000001 },
+ { 0x21000140, 0x1400ffff },
+ { 0x21000141, 0x24000001 },
+ { 0x21000142, 0x1400ffff },
+ { 0x21000143, 0x24000001 },
+ { 0x21000144, 0x1400ffff },
+ { 0x21000145, 0x24000001 },
+ { 0x21000146, 0x1400ffff },
+ { 0x21000147, 0x24000001 },
+ { 0x21000148, 0x1400ffff },
+ { 0x21000149, 0x14000000 },
+ { 0x2100014a, 0x24000001 },
+ { 0x2100014b, 0x1400ffff },
+ { 0x2100014c, 0x24000001 },
+ { 0x2100014d, 0x1400ffff },
+ { 0x2100014e, 0x24000001 },
+ { 0x2100014f, 0x1400ffff },
+ { 0x21000150, 0x24000001 },
+ { 0x21000151, 0x1400ffff },
+ { 0x21000152, 0x24000001 },
+ { 0x21000153, 0x1400ffff },
+ { 0x21000154, 0x24000001 },
+ { 0x21000155, 0x1400ffff },
+ { 0x21000156, 0x24000001 },
+ { 0x21000157, 0x1400ffff },
+ { 0x21000158, 0x24000001 },
+ { 0x21000159, 0x1400ffff },
+ { 0x2100015a, 0x24000001 },
+ { 0x2100015b, 0x1400ffff },
+ { 0x2100015c, 0x24000001 },
+ { 0x2100015d, 0x1400ffff },
+ { 0x2100015e, 0x24000001 },
+ { 0x2100015f, 0x1400ffff },
+ { 0x21000160, 0x24000001 },
+ { 0x21000161, 0x1400ffff },
+ { 0x21000162, 0x24000001 },
+ { 0x21000163, 0x1400ffff },
+ { 0x21000164, 0x24000001 },
+ { 0x21000165, 0x1400ffff },
+ { 0x21000166, 0x24000001 },
+ { 0x21000167, 0x1400ffff },
+ { 0x21000168, 0x24000001 },
+ { 0x21000169, 0x1400ffff },
+ { 0x2100016a, 0x24000001 },
+ { 0x2100016b, 0x1400ffff },
+ { 0x2100016c, 0x24000001 },
+ { 0x2100016d, 0x1400ffff },
+ { 0x2100016e, 0x24000001 },
+ { 0x2100016f, 0x1400ffff },
+ { 0x21000170, 0x24000001 },
+ { 0x21000171, 0x1400ffff },
+ { 0x21000172, 0x24000001 },
+ { 0x21000173, 0x1400ffff },
+ { 0x21000174, 0x24000001 },
+ { 0x21000175, 0x1400ffff },
+ { 0x21000176, 0x24000001 },
+ { 0x21000177, 0x1400ffff },
+ { 0x21000178, 0x2400ff87 },
+ { 0x21000179, 0x24000001 },
+ { 0x2100017a, 0x1400ffff },
+ { 0x2100017b, 0x24000001 },
+ { 0x2100017c, 0x1400ffff },
+ { 0x2100017d, 0x24000001 },
+ { 0x2100017e, 0x1400ffff },
+ { 0x2100017f, 0x1400fed4 },
+ { 0x21000180, 0x14000000 },
+ { 0x21000181, 0x240000d2 },
+ { 0x21000182, 0x24000001 },
+ { 0x21000183, 0x1400ffff },
+ { 0x21000184, 0x24000001 },
+ { 0x21000185, 0x1400ffff },
+ { 0x21000186, 0x240000ce },
+ { 0x21000187, 0x24000001 },
+ { 0x21000188, 0x1400ffff },
+ { 0x21000189, 0x240000cd },
+ { 0x2100018a, 0x240000cd },
+ { 0x2100018b, 0x24000001 },
+ { 0x2100018c, 0x1400ffff },
+ { 0x2100018d, 0x14000000 },
+ { 0x2100018e, 0x2400004f },
+ { 0x2100018f, 0x240000ca },
+ { 0x21000190, 0x240000cb },
+ { 0x21000191, 0x24000001 },
+ { 0x21000192, 0x1400ffff },
+ { 0x21000193, 0x240000cd },
+ { 0x21000194, 0x240000cf },
+ { 0x21000195, 0x14000061 },
+ { 0x21000196, 0x240000d3 },
+ { 0x21000197, 0x240000d1 },
+ { 0x21000198, 0x24000001 },
+ { 0x21000199, 0x1400ffff },
+ { 0x2100019a, 0x140000a3 },
+ { 0x2100019b, 0x14000000 },
+ { 0x2100019c, 0x240000d3 },
+ { 0x2100019d, 0x240000d5 },
+ { 0x2100019e, 0x14000082 },
+ { 0x2100019f, 0x240000d6 },
+ { 0x210001a0, 0x24000001 },
+ { 0x210001a1, 0x1400ffff },
+ { 0x210001a2, 0x24000001 },
+ { 0x210001a3, 0x1400ffff },
+ { 0x210001a4, 0x24000001 },
+ { 0x210001a5, 0x1400ffff },
+ { 0x210001a6, 0x240000da },
+ { 0x210001a7, 0x24000001 },
+ { 0x210001a8, 0x1400ffff },
+ { 0x210001a9, 0x240000da },
+ { 0x218001aa, 0x14000001 },
+ { 0x210001ac, 0x24000001 },
+ { 0x210001ad, 0x1400ffff },
+ { 0x210001ae, 0x240000da },
+ { 0x210001af, 0x24000001 },
+ { 0x210001b0, 0x1400ffff },
+ { 0x210001b1, 0x240000d9 },
+ { 0x210001b2, 0x240000d9 },
+ { 0x210001b3, 0x24000001 },
+ { 0x210001b4, 0x1400ffff },
+ { 0x210001b5, 0x24000001 },
+ { 0x210001b6, 0x1400ffff },
+ { 0x210001b7, 0x240000db },
+ { 0x210001b8, 0x24000001 },
+ { 0x210001b9, 0x1400ffff },
+ { 0x210001ba, 0x14000000 },
+ { 0x210001bb, 0x1c000000 },
+ { 0x210001bc, 0x24000001 },
+ { 0x210001bd, 0x1400ffff },
+ { 0x210001be, 0x14000000 },
+ { 0x210001bf, 0x14000038 },
+ { 0x218001c0, 0x1c000003 },
+ { 0x210001c4, 0x24000002 },
+ { 0x210001c5, 0x2000ffff },
+ { 0x210001c6, 0x1400fffe },
+ { 0x210001c7, 0x24000002 },
+ { 0x210001c8, 0x2000ffff },
+ { 0x210001c9, 0x1400fffe },
+ { 0x210001ca, 0x24000002 },
+ { 0x210001cb, 0x2000ffff },
+ { 0x210001cc, 0x1400fffe },
+ { 0x210001cd, 0x24000001 },
+ { 0x210001ce, 0x1400ffff },
+ { 0x210001cf, 0x24000001 },
+ { 0x210001d0, 0x1400ffff },
+ { 0x210001d1, 0x24000001 },
+ { 0x210001d2, 0x1400ffff },
+ { 0x210001d3, 0x24000001 },
+ { 0x210001d4, 0x1400ffff },
+ { 0x210001d5, 0x24000001 },
+ { 0x210001d6, 0x1400ffff },
+ { 0x210001d7, 0x24000001 },
+ { 0x210001d8, 0x1400ffff },
+ { 0x210001d9, 0x24000001 },
+ { 0x210001da, 0x1400ffff },
+ { 0x210001db, 0x24000001 },
+ { 0x210001dc, 0x1400ffff },
+ { 0x210001dd, 0x1400ffb1 },
+ { 0x210001de, 0x24000001 },
+ { 0x210001df, 0x1400ffff },
+ { 0x210001e0, 0x24000001 },
+ { 0x210001e1, 0x1400ffff },
+ { 0x210001e2, 0x24000001 },
+ { 0x210001e3, 0x1400ffff },
+ { 0x210001e4, 0x24000001 },
+ { 0x210001e5, 0x1400ffff },
+ { 0x210001e6, 0x24000001 },
+ { 0x210001e7, 0x1400ffff },
+ { 0x210001e8, 0x24000001 },
+ { 0x210001e9, 0x1400ffff },
+ { 0x210001ea, 0x24000001 },
+ { 0x210001eb, 0x1400ffff },
+ { 0x210001ec, 0x24000001 },
+ { 0x210001ed, 0x1400ffff },
+ { 0x210001ee, 0x24000001 },
+ { 0x210001ef, 0x1400ffff },
+ { 0x210001f0, 0x14000000 },
+ { 0x210001f1, 0x24000002 },
+ { 0x210001f2, 0x2000ffff },
+ { 0x210001f3, 0x1400fffe },
+ { 0x210001f4, 0x24000001 },
+ { 0x210001f5, 0x1400ffff },
+ { 0x210001f6, 0x2400ff9f },
+ { 0x210001f7, 0x2400ffc8 },
+ { 0x210001f8, 0x24000001 },
+ { 0x210001f9, 0x1400ffff },
+ { 0x210001fa, 0x24000001 },
+ { 0x210001fb, 0x1400ffff },
+ { 0x210001fc, 0x24000001 },
+ { 0x210001fd, 0x1400ffff },
+ { 0x210001fe, 0x24000001 },
+ { 0x210001ff, 0x1400ffff },
+ { 0x21000200, 0x24000001 },
+ { 0x21000201, 0x1400ffff },
+ { 0x21000202, 0x24000001 },
+ { 0x21000203, 0x1400ffff },
+ { 0x21000204, 0x24000001 },
+ { 0x21000205, 0x1400ffff },
+ { 0x21000206, 0x24000001 },
+ { 0x21000207, 0x1400ffff },
+ { 0x21000208, 0x24000001 },
+ { 0x21000209, 0x1400ffff },
+ { 0x2100020a, 0x24000001 },
+ { 0x2100020b, 0x1400ffff },
+ { 0x2100020c, 0x24000001 },
+ { 0x2100020d, 0x1400ffff },
+ { 0x2100020e, 0x24000001 },
+ { 0x2100020f, 0x1400ffff },
+ { 0x21000210, 0x24000001 },
+ { 0x21000211, 0x1400ffff },
+ { 0x21000212, 0x24000001 },
+ { 0x21000213, 0x1400ffff },
+ { 0x21000214, 0x24000001 },
+ { 0x21000215, 0x1400ffff },
+ { 0x21000216, 0x24000001 },
+ { 0x21000217, 0x1400ffff },
+ { 0x21000218, 0x24000001 },
+ { 0x21000219, 0x1400ffff },
+ { 0x2100021a, 0x24000001 },
+ { 0x2100021b, 0x1400ffff },
+ { 0x2100021c, 0x24000001 },
+ { 0x2100021d, 0x1400ffff },
+ { 0x2100021e, 0x24000001 },
+ { 0x2100021f, 0x1400ffff },
+ { 0x21000220, 0x2400ff7e },
+ { 0x21000221, 0x14000000 },
+ { 0x21000222, 0x24000001 },
+ { 0x21000223, 0x1400ffff },
+ { 0x21000224, 0x24000001 },
+ { 0x21000225, 0x1400ffff },
+ { 0x21000226, 0x24000001 },
+ { 0x21000227, 0x1400ffff },
+ { 0x21000228, 0x24000001 },
+ { 0x21000229, 0x1400ffff },
+ { 0x2100022a, 0x24000001 },
+ { 0x2100022b, 0x1400ffff },
+ { 0x2100022c, 0x24000001 },
+ { 0x2100022d, 0x1400ffff },
+ { 0x2100022e, 0x24000001 },
+ { 0x2100022f, 0x1400ffff },
+ { 0x21000230, 0x24000001 },
+ { 0x21000231, 0x1400ffff },
+ { 0x21000232, 0x24000001 },
+ { 0x21000233, 0x1400ffff },
+ { 0x21800234, 0x14000005 },
+ { 0x2100023a, 0x24000000 },
+ { 0x2100023b, 0x24000001 },
+ { 0x2100023c, 0x1400ffff },
+ { 0x2100023d, 0x2400ff5d },
+ { 0x2100023e, 0x24000000 },
+ { 0x2180023f, 0x14000001 },
+ { 0x21000241, 0x24000053 },
+ { 0x21800250, 0x14000002 },
+ { 0x21000253, 0x1400ff2e },
+ { 0x21000254, 0x1400ff32 },
+ { 0x21000255, 0x14000000 },
+ { 0x21000256, 0x1400ff33 },
+ { 0x21000257, 0x1400ff33 },
+ { 0x21000258, 0x14000000 },
+ { 0x21000259, 0x1400ff36 },
+ { 0x2100025a, 0x14000000 },
+ { 0x2100025b, 0x1400ff35 },
+ { 0x2180025c, 0x14000003 },
+ { 0x21000260, 0x1400ff33 },
+ { 0x21800261, 0x14000001 },
+ { 0x21000263, 0x1400ff31 },
+ { 0x21800264, 0x14000003 },
+ { 0x21000268, 0x1400ff2f },
+ { 0x21000269, 0x1400ff2d },
+ { 0x2180026a, 0x14000004 },
+ { 0x2100026f, 0x1400ff2d },
+ { 0x21800270, 0x14000001 },
+ { 0x21000272, 0x1400ff2b },
+ { 0x21800273, 0x14000001 },
+ { 0x21000275, 0x1400ff2a },
+ { 0x21800276, 0x14000009 },
+ { 0x21000280, 0x1400ff26 },
+ { 0x21800281, 0x14000001 },
+ { 0x21000283, 0x1400ff26 },
+ { 0x21800284, 0x14000003 },
+ { 0x21000288, 0x1400ff26 },
+ { 0x21000289, 0x14000000 },
+ { 0x2100028a, 0x1400ff27 },
+ { 0x2100028b, 0x1400ff27 },
+ { 0x2180028c, 0x14000005 },
+ { 0x21000292, 0x1400ff25 },
+ { 0x21000293, 0x14000000 },
+ { 0x21000294, 0x1400ffad },
+ { 0x21800295, 0x1400001a },
+ { 0x218002b0, 0x18000011 },
+ { 0x098002c2, 0x60000003 },
+ { 0x098002c6, 0x1800000b },
+ { 0x098002d2, 0x6000000d },
+ { 0x218002e0, 0x18000004 },
+ { 0x098002e5, 0x60000008 },
+ { 0x090002ee, 0x18000000 },
+ { 0x098002ef, 0x60000010 },
+ { 0x1b800300, 0x30000044 },
+ { 0x1b000345, 0x30000054 },
+ { 0x1b800346, 0x30000029 },
+ { 0x13800374, 0x60000001 },
+ { 0x1300037a, 0x18000000 },
+ { 0x0900037e, 0x54000000 },
+ { 0x13800384, 0x60000001 },
+ { 0x13000386, 0x24000026 },
+ { 0x09000387, 0x54000000 },
+ { 0x13000388, 0x24000025 },
+ { 0x13000389, 0x24000025 },
+ { 0x1300038a, 0x24000025 },
+ { 0x1300038c, 0x24000040 },
+ { 0x1300038e, 0x2400003f },
+ { 0x1300038f, 0x2400003f },
+ { 0x13000390, 0x14000000 },
+ { 0x13000391, 0x24000020 },
+ { 0x13000392, 0x24000020 },
+ { 0x13000393, 0x24000020 },
+ { 0x13000394, 0x24000020 },
+ { 0x13000395, 0x24000020 },
+ { 0x13000396, 0x24000020 },
+ { 0x13000397, 0x24000020 },
+ { 0x13000398, 0x24000020 },
+ { 0x13000399, 0x24000020 },
+ { 0x1300039a, 0x24000020 },
+ { 0x1300039b, 0x24000020 },
+ { 0x1300039c, 0x24000020 },
+ { 0x1300039d, 0x24000020 },
+ { 0x1300039e, 0x24000020 },
+ { 0x1300039f, 0x24000020 },
+ { 0x130003a0, 0x24000020 },
+ { 0x130003a1, 0x24000020 },
+ { 0x130003a3, 0x24000020 },
+ { 0x130003a4, 0x24000020 },
+ { 0x130003a5, 0x24000020 },
+ { 0x130003a6, 0x24000020 },
+ { 0x130003a7, 0x24000020 },
+ { 0x130003a8, 0x24000020 },
+ { 0x130003a9, 0x24000020 },
+ { 0x130003aa, 0x24000020 },
+ { 0x130003ab, 0x24000020 },
+ { 0x130003ac, 0x1400ffda },
+ { 0x130003ad, 0x1400ffdb },
+ { 0x130003ae, 0x1400ffdb },
+ { 0x130003af, 0x1400ffdb },
+ { 0x130003b0, 0x14000000 },
+ { 0x130003b1, 0x1400ffe0 },
+ { 0x130003b2, 0x1400ffe0 },
+ { 0x130003b3, 0x1400ffe0 },
+ { 0x130003b4, 0x1400ffe0 },
+ { 0x130003b5, 0x1400ffe0 },
+ { 0x130003b6, 0x1400ffe0 },
+ { 0x130003b7, 0x1400ffe0 },
+ { 0x130003b8, 0x1400ffe0 },
+ { 0x130003b9, 0x1400ffe0 },
+ { 0x130003ba, 0x1400ffe0 },
+ { 0x130003bb, 0x1400ffe0 },
+ { 0x130003bc, 0x1400ffe0 },
+ { 0x130003bd, 0x1400ffe0 },
+ { 0x130003be, 0x1400ffe0 },
+ { 0x130003bf, 0x1400ffe0 },
+ { 0x130003c0, 0x1400ffe0 },
+ { 0x130003c1, 0x1400ffe0 },
+ { 0x130003c2, 0x1400ffe1 },
+ { 0x130003c3, 0x1400ffe0 },
+ { 0x130003c4, 0x1400ffe0 },
+ { 0x130003c5, 0x1400ffe0 },
+ { 0x130003c6, 0x1400ffe0 },
+ { 0x130003c7, 0x1400ffe0 },
+ { 0x130003c8, 0x1400ffe0 },
+ { 0x130003c9, 0x1400ffe0 },
+ { 0x130003ca, 0x1400ffe0 },
+ { 0x130003cb, 0x1400ffe0 },
+ { 0x130003cc, 0x1400ffc0 },
+ { 0x130003cd, 0x1400ffc1 },
+ { 0x130003ce, 0x1400ffc1 },
+ { 0x130003d0, 0x1400ffc2 },
+ { 0x130003d1, 0x1400ffc7 },
+ { 0x138003d2, 0x24000002 },
+ { 0x130003d5, 0x1400ffd1 },
+ { 0x130003d6, 0x1400ffca },
+ { 0x130003d7, 0x14000000 },
+ { 0x130003d8, 0x24000001 },
+ { 0x130003d9, 0x1400ffff },
+ { 0x130003da, 0x24000001 },
+ { 0x130003db, 0x1400ffff },
+ { 0x130003dc, 0x24000001 },
+ { 0x130003dd, 0x1400ffff },
+ { 0x130003de, 0x24000001 },
+ { 0x130003df, 0x1400ffff },
+ { 0x130003e0, 0x24000001 },
+ { 0x130003e1, 0x1400ffff },
+ { 0x0a0003e2, 0x24000001 },
+ { 0x0a0003e3, 0x1400ffff },
+ { 0x0a0003e4, 0x24000001 },
+ { 0x0a0003e5, 0x1400ffff },
+ { 0x0a0003e6, 0x24000001 },
+ { 0x0a0003e7, 0x1400ffff },
+ { 0x0a0003e8, 0x24000001 },
+ { 0x0a0003e9, 0x1400ffff },
+ { 0x0a0003ea, 0x24000001 },
+ { 0x0a0003eb, 0x1400ffff },
+ { 0x0a0003ec, 0x24000001 },
+ { 0x0a0003ed, 0x1400ffff },
+ { 0x0a0003ee, 0x24000001 },
+ { 0x0a0003ef, 0x1400ffff },
+ { 0x130003f0, 0x1400ffaa },
+ { 0x130003f1, 0x1400ffb0 },
+ { 0x130003f2, 0x14000007 },
+ { 0x130003f3, 0x14000000 },
+ { 0x130003f4, 0x2400ffc4 },
+ { 0x130003f5, 0x1400ffa0 },
+ { 0x130003f6, 0x64000000 },
+ { 0x130003f7, 0x24000001 },
+ { 0x130003f8, 0x1400ffff },
+ { 0x130003f9, 0x2400fff9 },
+ { 0x130003fa, 0x24000001 },
+ { 0x130003fb, 0x1400ffff },
+ { 0x130003fc, 0x14000000 },
+ { 0x138003fd, 0x24000002 },
+ { 0x0c000400, 0x24000050 },
+ { 0x0c000401, 0x24000050 },
+ { 0x0c000402, 0x24000050 },
+ { 0x0c000403, 0x24000050 },
+ { 0x0c000404, 0x24000050 },
+ { 0x0c000405, 0x24000050 },
+ { 0x0c000406, 0x24000050 },
+ { 0x0c000407, 0x24000050 },
+ { 0x0c000408, 0x24000050 },
+ { 0x0c000409, 0x24000050 },
+ { 0x0c00040a, 0x24000050 },
+ { 0x0c00040b, 0x24000050 },
+ { 0x0c00040c, 0x24000050 },
+ { 0x0c00040d, 0x24000050 },
+ { 0x0c00040e, 0x24000050 },
+ { 0x0c00040f, 0x24000050 },
+ { 0x0c000410, 0x24000020 },
+ { 0x0c000411, 0x24000020 },
+ { 0x0c000412, 0x24000020 },
+ { 0x0c000413, 0x24000020 },
+ { 0x0c000414, 0x24000020 },
+ { 0x0c000415, 0x24000020 },
+ { 0x0c000416, 0x24000020 },
+ { 0x0c000417, 0x24000020 },
+ { 0x0c000418, 0x24000020 },
+ { 0x0c000419, 0x24000020 },
+ { 0x0c00041a, 0x24000020 },
+ { 0x0c00041b, 0x24000020 },
+ { 0x0c00041c, 0x24000020 },
+ { 0x0c00041d, 0x24000020 },
+ { 0x0c00041e, 0x24000020 },
+ { 0x0c00041f, 0x24000020 },
+ { 0x0c000420, 0x24000020 },
+ { 0x0c000421, 0x24000020 },
+ { 0x0c000422, 0x24000020 },
+ { 0x0c000423, 0x24000020 },
+ { 0x0c000424, 0x24000020 },
+ { 0x0c000425, 0x24000020 },
+ { 0x0c000426, 0x24000020 },
+ { 0x0c000427, 0x24000020 },
+ { 0x0c000428, 0x24000020 },
+ { 0x0c000429, 0x24000020 },
+ { 0x0c00042a, 0x24000020 },
+ { 0x0c00042b, 0x24000020 },
+ { 0x0c00042c, 0x24000020 },
+ { 0x0c00042d, 0x24000020 },
+ { 0x0c00042e, 0x24000020 },
+ { 0x0c00042f, 0x24000020 },
+ { 0x0c000430, 0x1400ffe0 },
+ { 0x0c000431, 0x1400ffe0 },
+ { 0x0c000432, 0x1400ffe0 },
+ { 0x0c000433, 0x1400ffe0 },
+ { 0x0c000434, 0x1400ffe0 },
+ { 0x0c000435, 0x1400ffe0 },
+ { 0x0c000436, 0x1400ffe0 },
+ { 0x0c000437, 0x1400ffe0 },
+ { 0x0c000438, 0x1400ffe0 },
+ { 0x0c000439, 0x1400ffe0 },
+ { 0x0c00043a, 0x1400ffe0 },
+ { 0x0c00043b, 0x1400ffe0 },
+ { 0x0c00043c, 0x1400ffe0 },
+ { 0x0c00043d, 0x1400ffe0 },
+ { 0x0c00043e, 0x1400ffe0 },
+ { 0x0c00043f, 0x1400ffe0 },
+ { 0x0c000440, 0x1400ffe0 },
+ { 0x0c000441, 0x1400ffe0 },
+ { 0x0c000442, 0x1400ffe0 },
+ { 0x0c000443, 0x1400ffe0 },
+ { 0x0c000444, 0x1400ffe0 },
+ { 0x0c000445, 0x1400ffe0 },
+ { 0x0c000446, 0x1400ffe0 },
+ { 0x0c000447, 0x1400ffe0 },
+ { 0x0c000448, 0x1400ffe0 },
+ { 0x0c000449, 0x1400ffe0 },
+ { 0x0c00044a, 0x1400ffe0 },
+ { 0x0c00044b, 0x1400ffe0 },
+ { 0x0c00044c, 0x1400ffe0 },
+ { 0x0c00044d, 0x1400ffe0 },
+ { 0x0c00044e, 0x1400ffe0 },
+ { 0x0c00044f, 0x1400ffe0 },
+ { 0x0c000450, 0x1400ffb0 },
+ { 0x0c000451, 0x1400ffb0 },
+ { 0x0c000452, 0x1400ffb0 },
+ { 0x0c000453, 0x1400ffb0 },
+ { 0x0c000454, 0x1400ffb0 },
+ { 0x0c000455, 0x1400ffb0 },
+ { 0x0c000456, 0x1400ffb0 },
+ { 0x0c000457, 0x1400ffb0 },
+ { 0x0c000458, 0x1400ffb0 },
+ { 0x0c000459, 0x1400ffb0 },
+ { 0x0c00045a, 0x1400ffb0 },
+ { 0x0c00045b, 0x1400ffb0 },
+ { 0x0c00045c, 0x1400ffb0 },
+ { 0x0c00045d, 0x1400ffb0 },
+ { 0x0c00045e, 0x1400ffb0 },
+ { 0x0c00045f, 0x1400ffb0 },
+ { 0x0c000460, 0x24000001 },
+ { 0x0c000461, 0x1400ffff },
+ { 0x0c000462, 0x24000001 },
+ { 0x0c000463, 0x1400ffff },
+ { 0x0c000464, 0x24000001 },
+ { 0x0c000465, 0x1400ffff },
+ { 0x0c000466, 0x24000001 },
+ { 0x0c000467, 0x1400ffff },
+ { 0x0c000468, 0x24000001 },
+ { 0x0c000469, 0x1400ffff },
+ { 0x0c00046a, 0x24000001 },
+ { 0x0c00046b, 0x1400ffff },
+ { 0x0c00046c, 0x24000001 },
+ { 0x0c00046d, 0x1400ffff },
+ { 0x0c00046e, 0x24000001 },
+ { 0x0c00046f, 0x1400ffff },
+ { 0x0c000470, 0x24000001 },
+ { 0x0c000471, 0x1400ffff },
+ { 0x0c000472, 0x24000001 },
+ { 0x0c000473, 0x1400ffff },
+ { 0x0c000474, 0x24000001 },
+ { 0x0c000475, 0x1400ffff },
+ { 0x0c000476, 0x24000001 },
+ { 0x0c000477, 0x1400ffff },
+ { 0x0c000478, 0x24000001 },
+ { 0x0c000479, 0x1400ffff },
+ { 0x0c00047a, 0x24000001 },
+ { 0x0c00047b, 0x1400ffff },
+ { 0x0c00047c, 0x24000001 },
+ { 0x0c00047d, 0x1400ffff },
+ { 0x0c00047e, 0x24000001 },
+ { 0x0c00047f, 0x1400ffff },
+ { 0x0c000480, 0x24000001 },
+ { 0x0c000481, 0x1400ffff },
+ { 0x0c000482, 0x68000000 },
+ { 0x0c800483, 0x30000003 },
+ { 0x0c800488, 0x2c000001 },
+ { 0x0c00048a, 0x24000001 },
+ { 0x0c00048b, 0x1400ffff },
+ { 0x0c00048c, 0x24000001 },
+ { 0x0c00048d, 0x1400ffff },
+ { 0x0c00048e, 0x24000001 },
+ { 0x0c00048f, 0x1400ffff },
+ { 0x0c000490, 0x24000001 },
+ { 0x0c000491, 0x1400ffff },
+ { 0x0c000492, 0x24000001 },
+ { 0x0c000493, 0x1400ffff },
+ { 0x0c000494, 0x24000001 },
+ { 0x0c000495, 0x1400ffff },
+ { 0x0c000496, 0x24000001 },
+ { 0x0c000497, 0x1400ffff },
+ { 0x0c000498, 0x24000001 },
+ { 0x0c000499, 0x1400ffff },
+ { 0x0c00049a, 0x24000001 },
+ { 0x0c00049b, 0x1400ffff },
+ { 0x0c00049c, 0x24000001 },
+ { 0x0c00049d, 0x1400ffff },
+ { 0x0c00049e, 0x24000001 },
+ { 0x0c00049f, 0x1400ffff },
+ { 0x0c0004a0, 0x24000001 },
+ { 0x0c0004a1, 0x1400ffff },
+ { 0x0c0004a2, 0x24000001 },
+ { 0x0c0004a3, 0x1400ffff },
+ { 0x0c0004a4, 0x24000001 },
+ { 0x0c0004a5, 0x1400ffff },
+ { 0x0c0004a6, 0x24000001 },
+ { 0x0c0004a7, 0x1400ffff },
+ { 0x0c0004a8, 0x24000001 },
+ { 0x0c0004a9, 0x1400ffff },
+ { 0x0c0004aa, 0x24000001 },
+ { 0x0c0004ab, 0x1400ffff },
+ { 0x0c0004ac, 0x24000001 },
+ { 0x0c0004ad, 0x1400ffff },
+ { 0x0c0004ae, 0x24000001 },
+ { 0x0c0004af, 0x1400ffff },
+ { 0x0c0004b0, 0x24000001 },
+ { 0x0c0004b1, 0x1400ffff },
+ { 0x0c0004b2, 0x24000001 },
+ { 0x0c0004b3, 0x1400ffff },
+ { 0x0c0004b4, 0x24000001 },
+ { 0x0c0004b5, 0x1400ffff },
+ { 0x0c0004b6, 0x24000001 },
+ { 0x0c0004b7, 0x1400ffff },
+ { 0x0c0004b8, 0x24000001 },
+ { 0x0c0004b9, 0x1400ffff },
+ { 0x0c0004ba, 0x24000001 },
+ { 0x0c0004bb, 0x1400ffff },
+ { 0x0c0004bc, 0x24000001 },
+ { 0x0c0004bd, 0x1400ffff },
+ { 0x0c0004be, 0x24000001 },
+ { 0x0c0004bf, 0x1400ffff },
+ { 0x0c0004c0, 0x24000000 },
+ { 0x0c0004c1, 0x24000001 },
+ { 0x0c0004c2, 0x1400ffff },
+ { 0x0c0004c3, 0x24000001 },
+ { 0x0c0004c4, 0x1400ffff },
+ { 0x0c0004c5, 0x24000001 },
+ { 0x0c0004c6, 0x1400ffff },
+ { 0x0c0004c7, 0x24000001 },
+ { 0x0c0004c8, 0x1400ffff },
+ { 0x0c0004c9, 0x24000001 },
+ { 0x0c0004ca, 0x1400ffff },
+ { 0x0c0004cb, 0x24000001 },
+ { 0x0c0004cc, 0x1400ffff },
+ { 0x0c0004cd, 0x24000001 },
+ { 0x0c0004ce, 0x1400ffff },
+ { 0x0c0004d0, 0x24000001 },
+ { 0x0c0004d1, 0x1400ffff },
+ { 0x0c0004d2, 0x24000001 },
+ { 0x0c0004d3, 0x1400ffff },
+ { 0x0c0004d4, 0x24000001 },
+ { 0x0c0004d5, 0x1400ffff },
+ { 0x0c0004d6, 0x24000001 },
+ { 0x0c0004d7, 0x1400ffff },
+ { 0x0c0004d8, 0x24000001 },
+ { 0x0c0004d9, 0x1400ffff },
+ { 0x0c0004da, 0x24000001 },
+ { 0x0c0004db, 0x1400ffff },
+ { 0x0c0004dc, 0x24000001 },
+ { 0x0c0004dd, 0x1400ffff },
+ { 0x0c0004de, 0x24000001 },
+ { 0x0c0004df, 0x1400ffff },
+ { 0x0c0004e0, 0x24000001 },
+ { 0x0c0004e1, 0x1400ffff },
+ { 0x0c0004e2, 0x24000001 },
+ { 0x0c0004e3, 0x1400ffff },
+ { 0x0c0004e4, 0x24000001 },
+ { 0x0c0004e5, 0x1400ffff },
+ { 0x0c0004e6, 0x24000001 },
+ { 0x0c0004e7, 0x1400ffff },
+ { 0x0c0004e8, 0x24000001 },
+ { 0x0c0004e9, 0x1400ffff },
+ { 0x0c0004ea, 0x24000001 },
+ { 0x0c0004eb, 0x1400ffff },
+ { 0x0c0004ec, 0x24000001 },
+ { 0x0c0004ed, 0x1400ffff },
+ { 0x0c0004ee, 0x24000001 },
+ { 0x0c0004ef, 0x1400ffff },
+ { 0x0c0004f0, 0x24000001 },
+ { 0x0c0004f1, 0x1400ffff },
+ { 0x0c0004f2, 0x24000001 },
+ { 0x0c0004f3, 0x1400ffff },
+ { 0x0c0004f4, 0x24000001 },
+ { 0x0c0004f5, 0x1400ffff },
+ { 0x0c0004f6, 0x24000001 },
+ { 0x0c0004f7, 0x1400ffff },
+ { 0x0c0004f8, 0x24000001 },
+ { 0x0c0004f9, 0x1400ffff },
+ { 0x0c000500, 0x24000001 },
+ { 0x0c000501, 0x1400ffff },
+ { 0x0c000502, 0x24000001 },
+ { 0x0c000503, 0x1400ffff },
+ { 0x0c000504, 0x24000001 },
+ { 0x0c000505, 0x1400ffff },
+ { 0x0c000506, 0x24000001 },
+ { 0x0c000507, 0x1400ffff },
+ { 0x0c000508, 0x24000001 },
+ { 0x0c000509, 0x1400ffff },
+ { 0x0c00050a, 0x24000001 },
+ { 0x0c00050b, 0x1400ffff },
+ { 0x0c00050c, 0x24000001 },
+ { 0x0c00050d, 0x1400ffff },
+ { 0x0c00050e, 0x24000001 },
+ { 0x0c00050f, 0x1400ffff },
+ { 0x01000531, 0x24000030 },
+ { 0x01000532, 0x24000030 },
+ { 0x01000533, 0x24000030 },
+ { 0x01000534, 0x24000030 },
+ { 0x01000535, 0x24000030 },
+ { 0x01000536, 0x24000030 },
+ { 0x01000537, 0x24000030 },
+ { 0x01000538, 0x24000030 },
+ { 0x01000539, 0x24000030 },
+ { 0x0100053a, 0x24000030 },
+ { 0x0100053b, 0x24000030 },
+ { 0x0100053c, 0x24000030 },
+ { 0x0100053d, 0x24000030 },
+ { 0x0100053e, 0x24000030 },
+ { 0x0100053f, 0x24000030 },
+ { 0x01000540, 0x24000030 },
+ { 0x01000541, 0x24000030 },
+ { 0x01000542, 0x24000030 },
+ { 0x01000543, 0x24000030 },
+ { 0x01000544, 0x24000030 },
+ { 0x01000545, 0x24000030 },
+ { 0x01000546, 0x24000030 },
+ { 0x01000547, 0x24000030 },
+ { 0x01000548, 0x24000030 },
+ { 0x01000549, 0x24000030 },
+ { 0x0100054a, 0x24000030 },
+ { 0x0100054b, 0x24000030 },
+ { 0x0100054c, 0x24000030 },
+ { 0x0100054d, 0x24000030 },
+ { 0x0100054e, 0x24000030 },
+ { 0x0100054f, 0x24000030 },
+ { 0x01000550, 0x24000030 },
+ { 0x01000551, 0x24000030 },
+ { 0x01000552, 0x24000030 },
+ { 0x01000553, 0x24000030 },
+ { 0x01000554, 0x24000030 },
+ { 0x01000555, 0x24000030 },
+ { 0x01000556, 0x24000030 },
+ { 0x01000559, 0x18000000 },
+ { 0x0180055a, 0x54000005 },
+ { 0x01000561, 0x1400ffd0 },
+ { 0x01000562, 0x1400ffd0 },
+ { 0x01000563, 0x1400ffd0 },
+ { 0x01000564, 0x1400ffd0 },
+ { 0x01000565, 0x1400ffd0 },
+ { 0x01000566, 0x1400ffd0 },
+ { 0x01000567, 0x1400ffd0 },
+ { 0x01000568, 0x1400ffd0 },
+ { 0x01000569, 0x1400ffd0 },
+ { 0x0100056a, 0x1400ffd0 },
+ { 0x0100056b, 0x1400ffd0 },
+ { 0x0100056c, 0x1400ffd0 },
+ { 0x0100056d, 0x1400ffd0 },
+ { 0x0100056e, 0x1400ffd0 },
+ { 0x0100056f, 0x1400ffd0 },
+ { 0x01000570, 0x1400ffd0 },
+ { 0x01000571, 0x1400ffd0 },
+ { 0x01000572, 0x1400ffd0 },
+ { 0x01000573, 0x1400ffd0 },
+ { 0x01000574, 0x1400ffd0 },
+ { 0x01000575, 0x1400ffd0 },
+ { 0x01000576, 0x1400ffd0 },
+ { 0x01000577, 0x1400ffd0 },
+ { 0x01000578, 0x1400ffd0 },
+ { 0x01000579, 0x1400ffd0 },
+ { 0x0100057a, 0x1400ffd0 },
+ { 0x0100057b, 0x1400ffd0 },
+ { 0x0100057c, 0x1400ffd0 },
+ { 0x0100057d, 0x1400ffd0 },
+ { 0x0100057e, 0x1400ffd0 },
+ { 0x0100057f, 0x1400ffd0 },
+ { 0x01000580, 0x1400ffd0 },
+ { 0x01000581, 0x1400ffd0 },
+ { 0x01000582, 0x1400ffd0 },
+ { 0x01000583, 0x1400ffd0 },
+ { 0x01000584, 0x1400ffd0 },
+ { 0x01000585, 0x1400ffd0 },
+ { 0x01000586, 0x1400ffd0 },
+ { 0x01000587, 0x14000000 },
+ { 0x09000589, 0x54000000 },
+ { 0x0100058a, 0x44000000 },
+ { 0x19800591, 0x30000028 },
+ { 0x198005bb, 0x30000002 },
+ { 0x190005be, 0x54000000 },
+ { 0x190005bf, 0x30000000 },
+ { 0x190005c0, 0x54000000 },
+ { 0x198005c1, 0x30000001 },
+ { 0x190005c3, 0x54000000 },
+ { 0x198005c4, 0x30000001 },
+ { 0x190005c6, 0x54000000 },
+ { 0x190005c7, 0x30000000 },
+ { 0x198005d0, 0x1c00001a },
+ { 0x198005f0, 0x1c000002 },
+ { 0x198005f3, 0x54000001 },
+ { 0x09800600, 0x04000003 },
+ { 0x0000060b, 0x5c000000 },
+ { 0x0980060c, 0x54000001 },
+ { 0x0080060e, 0x68000001 },
+ { 0x00800610, 0x30000005 },
+ { 0x0900061b, 0x54000000 },
+ { 0x0080061e, 0x54000001 },
+ { 0x00800621, 0x1c000019 },
+ { 0x09000640, 0x18000000 },
+ { 0x00800641, 0x1c000009 },
+ { 0x1b80064b, 0x30000013 },
+ { 0x09800660, 0x34000009 },
+ { 0x0080066a, 0x54000003 },
+ { 0x0080066e, 0x1c000001 },
+ { 0x1b000670, 0x30000000 },
+ { 0x00800671, 0x1c000062 },
+ { 0x000006d4, 0x54000000 },
+ { 0x000006d5, 0x1c000000 },
+ { 0x008006d6, 0x30000006 },
+ { 0x090006dd, 0x04000000 },
+ { 0x000006de, 0x2c000000 },
+ { 0x008006df, 0x30000005 },
+ { 0x008006e5, 0x18000001 },
+ { 0x008006e7, 0x30000001 },
+ { 0x000006e9, 0x68000000 },
+ { 0x008006ea, 0x30000003 },
+ { 0x008006ee, 0x1c000001 },
+ { 0x008006f0, 0x34000009 },
+ { 0x008006fa, 0x1c000002 },
+ { 0x008006fd, 0x68000001 },
+ { 0x000006ff, 0x1c000000 },
+ { 0x31800700, 0x5400000d },
+ { 0x3100070f, 0x04000000 },
+ { 0x31000710, 0x1c000000 },
+ { 0x31000711, 0x30000000 },
+ { 0x31800712, 0x1c00001d },
+ { 0x31800730, 0x3000001a },
+ { 0x3180074d, 0x1c000020 },
+ { 0x37800780, 0x1c000025 },
+ { 0x378007a6, 0x3000000a },
+ { 0x370007b1, 0x1c000000 },
+ { 0x0e800901, 0x30000001 },
+ { 0x0e000903, 0x28000000 },
+ { 0x0e800904, 0x1c000035 },
+ { 0x0e00093c, 0x30000000 },
+ { 0x0e00093d, 0x1c000000 },
+ { 0x0e80093e, 0x28000002 },
+ { 0x0e800941, 0x30000007 },
+ { 0x0e800949, 0x28000003 },
+ { 0x0e00094d, 0x30000000 },
+ { 0x0e000950, 0x1c000000 },
+ { 0x0e800951, 0x30000003 },
+ { 0x0e800958, 0x1c000009 },
+ { 0x0e800962, 0x30000001 },
+ { 0x09800964, 0x54000001 },
+ { 0x0e800966, 0x34000009 },
+ { 0x09000970, 0x54000000 },
+ { 0x0e00097d, 0x1c000000 },
+ { 0x02000981, 0x30000000 },
+ { 0x02800982, 0x28000001 },
+ { 0x02800985, 0x1c000007 },
+ { 0x0280098f, 0x1c000001 },
+ { 0x02800993, 0x1c000015 },
+ { 0x028009aa, 0x1c000006 },
+ { 0x020009b2, 0x1c000000 },
+ { 0x028009b6, 0x1c000003 },
+ { 0x020009bc, 0x30000000 },
+ { 0x020009bd, 0x1c000000 },
+ { 0x028009be, 0x28000002 },
+ { 0x028009c1, 0x30000003 },
+ { 0x028009c7, 0x28000001 },
+ { 0x028009cb, 0x28000001 },
+ { 0x020009cd, 0x30000000 },
+ { 0x020009ce, 0x1c000000 },
+ { 0x020009d7, 0x28000000 },
+ { 0x028009dc, 0x1c000001 },
+ { 0x028009df, 0x1c000002 },
+ { 0x028009e2, 0x30000001 },
+ { 0x028009e6, 0x34000009 },
+ { 0x028009f0, 0x1c000001 },
+ { 0x028009f2, 0x5c000001 },
+ { 0x028009f4, 0x3c000005 },
+ { 0x020009fa, 0x68000000 },
+ { 0x15800a01, 0x30000001 },
+ { 0x15000a03, 0x28000000 },
+ { 0x15800a05, 0x1c000005 },
+ { 0x15800a0f, 0x1c000001 },
+ { 0x15800a13, 0x1c000015 },
+ { 0x15800a2a, 0x1c000006 },
+ { 0x15800a32, 0x1c000001 },
+ { 0x15800a35, 0x1c000001 },
+ { 0x15800a38, 0x1c000001 },
+ { 0x15000a3c, 0x30000000 },
+ { 0x15800a3e, 0x28000002 },
+ { 0x15800a41, 0x30000001 },
+ { 0x15800a47, 0x30000001 },
+ { 0x15800a4b, 0x30000002 },
+ { 0x15800a59, 0x1c000003 },
+ { 0x15000a5e, 0x1c000000 },
+ { 0x15800a66, 0x34000009 },
+ { 0x15800a70, 0x30000001 },
+ { 0x15800a72, 0x1c000002 },
+ { 0x14800a81, 0x30000001 },
+ { 0x14000a83, 0x28000000 },
+ { 0x14800a85, 0x1c000008 },
+ { 0x14800a8f, 0x1c000002 },
+ { 0x14800a93, 0x1c000015 },
+ { 0x14800aaa, 0x1c000006 },
+ { 0x14800ab2, 0x1c000001 },
+ { 0x14800ab5, 0x1c000004 },
+ { 0x14000abc, 0x30000000 },
+ { 0x14000abd, 0x1c000000 },
+ { 0x14800abe, 0x28000002 },
+ { 0x14800ac1, 0x30000004 },
+ { 0x14800ac7, 0x30000001 },
+ { 0x14000ac9, 0x28000000 },
+ { 0x14800acb, 0x28000001 },
+ { 0x14000acd, 0x30000000 },
+ { 0x14000ad0, 0x1c000000 },
+ { 0x14800ae0, 0x1c000001 },
+ { 0x14800ae2, 0x30000001 },
+ { 0x14800ae6, 0x34000009 },
+ { 0x14000af1, 0x5c000000 },
+ { 0x2b000b01, 0x30000000 },
+ { 0x2b800b02, 0x28000001 },
+ { 0x2b800b05, 0x1c000007 },
+ { 0x2b800b0f, 0x1c000001 },
+ { 0x2b800b13, 0x1c000015 },
+ { 0x2b800b2a, 0x1c000006 },
+ { 0x2b800b32, 0x1c000001 },
+ { 0x2b800b35, 0x1c000004 },
+ { 0x2b000b3c, 0x30000000 },
+ { 0x2b000b3d, 0x1c000000 },
+ { 0x2b000b3e, 0x28000000 },
+ { 0x2b000b3f, 0x30000000 },
+ { 0x2b000b40, 0x28000000 },
+ { 0x2b800b41, 0x30000002 },
+ { 0x2b800b47, 0x28000001 },
+ { 0x2b800b4b, 0x28000001 },
+ { 0x2b000b4d, 0x30000000 },
+ { 0x2b000b56, 0x30000000 },
+ { 0x2b000b57, 0x28000000 },
+ { 0x2b800b5c, 0x1c000001 },
+ { 0x2b800b5f, 0x1c000002 },
+ { 0x2b800b66, 0x34000009 },
+ { 0x2b000b70, 0x68000000 },
+ { 0x2b000b71, 0x1c000000 },
+ { 0x35000b82, 0x30000000 },
+ { 0x35000b83, 0x1c000000 },
+ { 0x35800b85, 0x1c000005 },
+ { 0x35800b8e, 0x1c000002 },
+ { 0x35800b92, 0x1c000003 },
+ { 0x35800b99, 0x1c000001 },
+ { 0x35000b9c, 0x1c000000 },
+ { 0x35800b9e, 0x1c000001 },
+ { 0x35800ba3, 0x1c000001 },
+ { 0x35800ba8, 0x1c000002 },
+ { 0x35800bae, 0x1c00000b },
+ { 0x35800bbe, 0x28000001 },
+ { 0x35000bc0, 0x30000000 },
+ { 0x35800bc1, 0x28000001 },
+ { 0x35800bc6, 0x28000002 },
+ { 0x35800bca, 0x28000002 },
+ { 0x35000bcd, 0x30000000 },
+ { 0x35000bd7, 0x28000000 },
+ { 0x35800be6, 0x34000009 },
+ { 0x35800bf0, 0x3c000002 },
+ { 0x35800bf3, 0x68000005 },
+ { 0x35000bf9, 0x5c000000 },
+ { 0x35000bfa, 0x68000000 },
+ { 0x36800c01, 0x28000002 },
+ { 0x36800c05, 0x1c000007 },
+ { 0x36800c0e, 0x1c000002 },
+ { 0x36800c12, 0x1c000016 },
+ { 0x36800c2a, 0x1c000009 },
+ { 0x36800c35, 0x1c000004 },
+ { 0x36800c3e, 0x30000002 },
+ { 0x36800c41, 0x28000003 },
+ { 0x36800c46, 0x30000002 },
+ { 0x36800c4a, 0x30000003 },
+ { 0x36800c55, 0x30000001 },
+ { 0x36800c60, 0x1c000001 },
+ { 0x36800c66, 0x34000009 },
+ { 0x1c800c82, 0x28000001 },
+ { 0x1c800c85, 0x1c000007 },
+ { 0x1c800c8e, 0x1c000002 },
+ { 0x1c800c92, 0x1c000016 },
+ { 0x1c800caa, 0x1c000009 },
+ { 0x1c800cb5, 0x1c000004 },
+ { 0x1c000cbc, 0x30000000 },
+ { 0x1c000cbd, 0x1c000000 },
+ { 0x1c000cbe, 0x28000000 },
+ { 0x1c000cbf, 0x30000000 },
+ { 0x1c800cc0, 0x28000004 },
+ { 0x1c000cc6, 0x30000000 },
+ { 0x1c800cc7, 0x28000001 },
+ { 0x1c800cca, 0x28000001 },
+ { 0x1c800ccc, 0x30000001 },
+ { 0x1c800cd5, 0x28000001 },
+ { 0x1c000cde, 0x1c000000 },
+ { 0x1c800ce0, 0x1c000001 },
+ { 0x1c800ce6, 0x34000009 },
+ { 0x24800d02, 0x28000001 },
+ { 0x24800d05, 0x1c000007 },
+ { 0x24800d0e, 0x1c000002 },
+ { 0x24800d12, 0x1c000016 },
+ { 0x24800d2a, 0x1c00000f },
+ { 0x24800d3e, 0x28000002 },
+ { 0x24800d41, 0x30000002 },
+ { 0x24800d46, 0x28000002 },
+ { 0x24800d4a, 0x28000002 },
+ { 0x24000d4d, 0x30000000 },
+ { 0x24000d57, 0x28000000 },
+ { 0x24800d60, 0x1c000001 },
+ { 0x24800d66, 0x34000009 },
+ { 0x2f800d82, 0x28000001 },
+ { 0x2f800d85, 0x1c000011 },
+ { 0x2f800d9a, 0x1c000017 },
+ { 0x2f800db3, 0x1c000008 },
+ { 0x2f000dbd, 0x1c000000 },
+ { 0x2f800dc0, 0x1c000006 },
+ { 0x2f000dca, 0x30000000 },
+ { 0x2f800dcf, 0x28000002 },
+ { 0x2f800dd2, 0x30000002 },
+ { 0x2f000dd6, 0x30000000 },
+ { 0x2f800dd8, 0x28000007 },
+ { 0x2f800df2, 0x28000001 },
+ { 0x2f000df4, 0x54000000 },
+ { 0x38800e01, 0x1c00002f },
+ { 0x38000e31, 0x30000000 },
+ { 0x38800e32, 0x1c000001 },
+ { 0x38800e34, 0x30000006 },
+ { 0x09000e3f, 0x5c000000 },
+ { 0x38800e40, 0x1c000005 },
+ { 0x38000e46, 0x18000000 },
+ { 0x38800e47, 0x30000007 },
+ { 0x38000e4f, 0x54000000 },
+ { 0x38800e50, 0x34000009 },
+ { 0x38800e5a, 0x54000001 },
+ { 0x20800e81, 0x1c000001 },
+ { 0x20000e84, 0x1c000000 },
+ { 0x20800e87, 0x1c000001 },
+ { 0x20000e8a, 0x1c000000 },
+ { 0x20000e8d, 0x1c000000 },
+ { 0x20800e94, 0x1c000003 },
+ { 0x20800e99, 0x1c000006 },
+ { 0x20800ea1, 0x1c000002 },
+ { 0x20000ea5, 0x1c000000 },
+ { 0x20000ea7, 0x1c000000 },
+ { 0x20800eaa, 0x1c000001 },
+ { 0x20800ead, 0x1c000003 },
+ { 0x20000eb1, 0x30000000 },
+ { 0x20800eb2, 0x1c000001 },
+ { 0x20800eb4, 0x30000005 },
+ { 0x20800ebb, 0x30000001 },
+ { 0x20000ebd, 0x1c000000 },
+ { 0x20800ec0, 0x1c000004 },
+ { 0x20000ec6, 0x18000000 },
+ { 0x20800ec8, 0x30000005 },
+ { 0x20800ed0, 0x34000009 },
+ { 0x20800edc, 0x1c000001 },
+ { 0x39000f00, 0x1c000000 },
+ { 0x39800f01, 0x68000002 },
+ { 0x39800f04, 0x5400000e },
+ { 0x39800f13, 0x68000004 },
+ { 0x39800f18, 0x30000001 },
+ { 0x39800f1a, 0x68000005 },
+ { 0x39800f20, 0x34000009 },
+ { 0x39800f2a, 0x3c000009 },
+ { 0x39000f34, 0x68000000 },
+ { 0x39000f35, 0x30000000 },
+ { 0x39000f36, 0x68000000 },
+ { 0x39000f37, 0x30000000 },
+ { 0x39000f38, 0x68000000 },
+ { 0x39000f39, 0x30000000 },
+ { 0x39000f3a, 0x58000000 },
+ { 0x39000f3b, 0x48000000 },
+ { 0x39000f3c, 0x58000000 },
+ { 0x39000f3d, 0x48000000 },
+ { 0x39800f3e, 0x28000001 },
+ { 0x39800f40, 0x1c000007 },
+ { 0x39800f49, 0x1c000021 },
+ { 0x39800f71, 0x3000000d },
+ { 0x39000f7f, 0x28000000 },
+ { 0x39800f80, 0x30000004 },
+ { 0x39000f85, 0x54000000 },
+ { 0x39800f86, 0x30000001 },
+ { 0x39800f88, 0x1c000003 },
+ { 0x39800f90, 0x30000007 },
+ { 0x39800f99, 0x30000023 },
+ { 0x39800fbe, 0x68000007 },
+ { 0x39000fc6, 0x30000000 },
+ { 0x39800fc7, 0x68000005 },
+ { 0x39000fcf, 0x68000000 },
+ { 0x39800fd0, 0x54000001 },
+ { 0x26801000, 0x1c000021 },
+ { 0x26801023, 0x1c000004 },
+ { 0x26801029, 0x1c000001 },
+ { 0x2600102c, 0x28000000 },
+ { 0x2680102d, 0x30000003 },
+ { 0x26001031, 0x28000000 },
+ { 0x26001032, 0x30000000 },
+ { 0x26801036, 0x30000001 },
+ { 0x26001038, 0x28000000 },
+ { 0x26001039, 0x30000000 },
+ { 0x26801040, 0x34000009 },
+ { 0x2680104a, 0x54000005 },
+ { 0x26801050, 0x1c000005 },
+ { 0x26801056, 0x28000001 },
+ { 0x26801058, 0x30000001 },
+ { 0x100010a0, 0x24001c60 },
+ { 0x100010a1, 0x24001c60 },
+ { 0x100010a2, 0x24001c60 },
+ { 0x100010a3, 0x24001c60 },
+ { 0x100010a4, 0x24001c60 },
+ { 0x100010a5, 0x24001c60 },
+ { 0x100010a6, 0x24001c60 },
+ { 0x100010a7, 0x24001c60 },
+ { 0x100010a8, 0x24001c60 },
+ { 0x100010a9, 0x24001c60 },
+ { 0x100010aa, 0x24001c60 },
+ { 0x100010ab, 0x24001c60 },
+ { 0x100010ac, 0x24001c60 },
+ { 0x100010ad, 0x24001c60 },
+ { 0x100010ae, 0x24001c60 },
+ { 0x100010af, 0x24001c60 },
+ { 0x100010b0, 0x24001c60 },
+ { 0x100010b1, 0x24001c60 },
+ { 0x100010b2, 0x24001c60 },
+ { 0x100010b3, 0x24001c60 },
+ { 0x100010b4, 0x24001c60 },
+ { 0x100010b5, 0x24001c60 },
+ { 0x100010b6, 0x24001c60 },
+ { 0x100010b7, 0x24001c60 },
+ { 0x100010b8, 0x24001c60 },
+ { 0x100010b9, 0x24001c60 },
+ { 0x100010ba, 0x24001c60 },
+ { 0x100010bb, 0x24001c60 },
+ { 0x100010bc, 0x24001c60 },
+ { 0x100010bd, 0x24001c60 },
+ { 0x100010be, 0x24001c60 },
+ { 0x100010bf, 0x24001c60 },
+ { 0x100010c0, 0x24001c60 },
+ { 0x100010c1, 0x24001c60 },
+ { 0x100010c2, 0x24001c60 },
+ { 0x100010c3, 0x24001c60 },
+ { 0x100010c4, 0x24001c60 },
+ { 0x100010c5, 0x24001c60 },
+ { 0x108010d0, 0x1c00002a },
+ { 0x090010fb, 0x54000000 },
+ { 0x100010fc, 0x18000000 },
+ { 0x17801100, 0x1c000059 },
+ { 0x1780115f, 0x1c000043 },
+ { 0x178011a8, 0x1c000051 },
+ { 0x0f801200, 0x1c000048 },
+ { 0x0f80124a, 0x1c000003 },
+ { 0x0f801250, 0x1c000006 },
+ { 0x0f001258, 0x1c000000 },
+ { 0x0f80125a, 0x1c000003 },
+ { 0x0f801260, 0x1c000028 },
+ { 0x0f80128a, 0x1c000003 },
+ { 0x0f801290, 0x1c000020 },
+ { 0x0f8012b2, 0x1c000003 },
+ { 0x0f8012b8, 0x1c000006 },
+ { 0x0f0012c0, 0x1c000000 },
+ { 0x0f8012c2, 0x1c000003 },
+ { 0x0f8012c8, 0x1c00000e },
+ { 0x0f8012d8, 0x1c000038 },
+ { 0x0f801312, 0x1c000003 },
+ { 0x0f801318, 0x1c000042 },
+ { 0x0f00135f, 0x30000000 },
+ { 0x0f001360, 0x68000000 },
+ { 0x0f801361, 0x54000007 },
+ { 0x0f801369, 0x3c000013 },
+ { 0x0f801380, 0x1c00000f },
+ { 0x0f801390, 0x68000009 },
+ { 0x088013a0, 0x1c000054 },
+ { 0x07801401, 0x1c00026b },
+ { 0x0780166d, 0x54000001 },
+ { 0x0780166f, 0x1c000007 },
+ { 0x28001680, 0x74000000 },
+ { 0x28801681, 0x1c000019 },
+ { 0x2800169b, 0x58000000 },
+ { 0x2800169c, 0x48000000 },
+ { 0x2d8016a0, 0x1c00004a },
+ { 0x098016eb, 0x54000002 },
+ { 0x2d8016ee, 0x38000002 },
+ { 0x32801700, 0x1c00000c },
+ { 0x3280170e, 0x1c000003 },
+ { 0x32801712, 0x30000002 },
+ { 0x18801720, 0x1c000011 },
+ { 0x18801732, 0x30000002 },
+ { 0x09801735, 0x54000001 },
+ { 0x06801740, 0x1c000011 },
+ { 0x06801752, 0x30000001 },
+ { 0x33801760, 0x1c00000c },
+ { 0x3380176e, 0x1c000002 },
+ { 0x33801772, 0x30000001 },
+ { 0x1f801780, 0x1c000033 },
+ { 0x1f8017b4, 0x04000001 },
+ { 0x1f0017b6, 0x28000000 },
+ { 0x1f8017b7, 0x30000006 },
+ { 0x1f8017be, 0x28000007 },
+ { 0x1f0017c6, 0x30000000 },
+ { 0x1f8017c7, 0x28000001 },
+ { 0x1f8017c9, 0x3000000a },
+ { 0x1f8017d4, 0x54000002 },
+ { 0x1f0017d7, 0x18000000 },
+ { 0x1f8017d8, 0x54000002 },
+ { 0x1f0017db, 0x5c000000 },
+ { 0x1f0017dc, 0x1c000000 },
+ { 0x1f0017dd, 0x30000000 },
+ { 0x1f8017e0, 0x34000009 },
+ { 0x1f8017f0, 0x3c000009 },
+ { 0x25801800, 0x54000005 },
+ { 0x25001806, 0x44000000 },
+ { 0x25801807, 0x54000003 },
+ { 0x2580180b, 0x30000002 },
+ { 0x2500180e, 0x74000000 },
+ { 0x25801810, 0x34000009 },
+ { 0x25801820, 0x1c000022 },
+ { 0x25001843, 0x18000000 },
+ { 0x25801844, 0x1c000033 },
+ { 0x25801880, 0x1c000028 },
+ { 0x250018a9, 0x30000000 },
+ { 0x22801900, 0x1c00001c },
+ { 0x22801920, 0x30000002 },
+ { 0x22801923, 0x28000003 },
+ { 0x22801927, 0x30000001 },
+ { 0x22801929, 0x28000002 },
+ { 0x22801930, 0x28000001 },
+ { 0x22001932, 0x30000000 },
+ { 0x22801933, 0x28000005 },
+ { 0x22801939, 0x30000002 },
+ { 0x22001940, 0x68000000 },
+ { 0x22801944, 0x54000001 },
+ { 0x22801946, 0x34000009 },
+ { 0x34801950, 0x1c00001d },
+ { 0x34801970, 0x1c000004 },
+ { 0x27801980, 0x1c000029 },
+ { 0x278019b0, 0x28000010 },
+ { 0x278019c1, 0x1c000006 },
+ { 0x278019c8, 0x28000001 },
+ { 0x278019d0, 0x34000009 },
+ { 0x278019de, 0x54000001 },
+ { 0x1f8019e0, 0x6800001f },
+ { 0x05801a00, 0x1c000016 },
+ { 0x05801a17, 0x30000001 },
+ { 0x05801a19, 0x28000002 },
+ { 0x05801a1e, 0x54000001 },
+ { 0x21801d00, 0x1400002b },
+ { 0x21801d2c, 0x18000035 },
+ { 0x21801d62, 0x14000015 },
+ { 0x0c001d78, 0x18000000 },
+ { 0x21801d79, 0x14000021 },
+ { 0x21801d9b, 0x18000024 },
+ { 0x1b801dc0, 0x30000003 },
+ { 0x21001e00, 0x24000001 },
+ { 0x21001e01, 0x1400ffff },
+ { 0x21001e02, 0x24000001 },
+ { 0x21001e03, 0x1400ffff },
+ { 0x21001e04, 0x24000001 },
+ { 0x21001e05, 0x1400ffff },
+ { 0x21001e06, 0x24000001 },
+ { 0x21001e07, 0x1400ffff },
+ { 0x21001e08, 0x24000001 },
+ { 0x21001e09, 0x1400ffff },
+ { 0x21001e0a, 0x24000001 },
+ { 0x21001e0b, 0x1400ffff },
+ { 0x21001e0c, 0x24000001 },
+ { 0x21001e0d, 0x1400ffff },
+ { 0x21001e0e, 0x24000001 },
+ { 0x21001e0f, 0x1400ffff },
+ { 0x21001e10, 0x24000001 },
+ { 0x21001e11, 0x1400ffff },
+ { 0x21001e12, 0x24000001 },
+ { 0x21001e13, 0x1400ffff },
+ { 0x21001e14, 0x24000001 },
+ { 0x21001e15, 0x1400ffff },
+ { 0x21001e16, 0x24000001 },
+ { 0x21001e17, 0x1400ffff },
+ { 0x21001e18, 0x24000001 },
+ { 0x21001e19, 0x1400ffff },
+ { 0x21001e1a, 0x24000001 },
+ { 0x21001e1b, 0x1400ffff },
+ { 0x21001e1c, 0x24000001 },
+ { 0x21001e1d, 0x1400ffff },
+ { 0x21001e1e, 0x24000001 },
+ { 0x21001e1f, 0x1400ffff },
+ { 0x21001e20, 0x24000001 },
+ { 0x21001e21, 0x1400ffff },
+ { 0x21001e22, 0x24000001 },
+ { 0x21001e23, 0x1400ffff },
+ { 0x21001e24, 0x24000001 },
+ { 0x21001e25, 0x1400ffff },
+ { 0x21001e26, 0x24000001 },
+ { 0x21001e27, 0x1400ffff },
+ { 0x21001e28, 0x24000001 },
+ { 0x21001e29, 0x1400ffff },
+ { 0x21001e2a, 0x24000001 },
+ { 0x21001e2b, 0x1400ffff },
+ { 0x21001e2c, 0x24000001 },
+ { 0x21001e2d, 0x1400ffff },
+ { 0x21001e2e, 0x24000001 },
+ { 0x21001e2f, 0x1400ffff },
+ { 0x21001e30, 0x24000001 },
+ { 0x21001e31, 0x1400ffff },
+ { 0x21001e32, 0x24000001 },
+ { 0x21001e33, 0x1400ffff },
+ { 0x21001e34, 0x24000001 },
+ { 0x21001e35, 0x1400ffff },
+ { 0x21001e36, 0x24000001 },
+ { 0x21001e37, 0x1400ffff },
+ { 0x21001e38, 0x24000001 },
+ { 0x21001e39, 0x1400ffff },
+ { 0x21001e3a, 0x24000001 },
+ { 0x21001e3b, 0x1400ffff },
+ { 0x21001e3c, 0x24000001 },
+ { 0x21001e3d, 0x1400ffff },
+ { 0x21001e3e, 0x24000001 },
+ { 0x21001e3f, 0x1400ffff },
+ { 0x21001e40, 0x24000001 },
+ { 0x21001e41, 0x1400ffff },
+ { 0x21001e42, 0x24000001 },
+ { 0x21001e43, 0x1400ffff },
+ { 0x21001e44, 0x24000001 },
+ { 0x21001e45, 0x1400ffff },
+ { 0x21001e46, 0x24000001 },
+ { 0x21001e47, 0x1400ffff },
+ { 0x21001e48, 0x24000001 },
+ { 0x21001e49, 0x1400ffff },
+ { 0x21001e4a, 0x24000001 },
+ { 0x21001e4b, 0x1400ffff },
+ { 0x21001e4c, 0x24000001 },
+ { 0x21001e4d, 0x1400ffff },
+ { 0x21001e4e, 0x24000001 },
+ { 0x21001e4f, 0x1400ffff },
+ { 0x21001e50, 0x24000001 },
+ { 0x21001e51, 0x1400ffff },
+ { 0x21001e52, 0x24000001 },
+ { 0x21001e53, 0x1400ffff },
+ { 0x21001e54, 0x24000001 },
+ { 0x21001e55, 0x1400ffff },
+ { 0x21001e56, 0x24000001 },
+ { 0x21001e57, 0x1400ffff },
+ { 0x21001e58, 0x24000001 },
+ { 0x21001e59, 0x1400ffff },
+ { 0x21001e5a, 0x24000001 },
+ { 0x21001e5b, 0x1400ffff },
+ { 0x21001e5c, 0x24000001 },
+ { 0x21001e5d, 0x1400ffff },
+ { 0x21001e5e, 0x24000001 },
+ { 0x21001e5f, 0x1400ffff },
+ { 0x21001e60, 0x24000001 },
+ { 0x21001e61, 0x1400ffff },
+ { 0x21001e62, 0x24000001 },
+ { 0x21001e63, 0x1400ffff },
+ { 0x21001e64, 0x24000001 },
+ { 0x21001e65, 0x1400ffff },
+ { 0x21001e66, 0x24000001 },
+ { 0x21001e67, 0x1400ffff },
+ { 0x21001e68, 0x24000001 },
+ { 0x21001e69, 0x1400ffff },
+ { 0x21001e6a, 0x24000001 },
+ { 0x21001e6b, 0x1400ffff },
+ { 0x21001e6c, 0x24000001 },
+ { 0x21001e6d, 0x1400ffff },
+ { 0x21001e6e, 0x24000001 },
+ { 0x21001e6f, 0x1400ffff },
+ { 0x21001e70, 0x24000001 },
+ { 0x21001e71, 0x1400ffff },
+ { 0x21001e72, 0x24000001 },
+ { 0x21001e73, 0x1400ffff },
+ { 0x21001e74, 0x24000001 },
+ { 0x21001e75, 0x1400ffff },
+ { 0x21001e76, 0x24000001 },
+ { 0x21001e77, 0x1400ffff },
+ { 0x21001e78, 0x24000001 },
+ { 0x21001e79, 0x1400ffff },
+ { 0x21001e7a, 0x24000001 },
+ { 0x21001e7b, 0x1400ffff },
+ { 0x21001e7c, 0x24000001 },
+ { 0x21001e7d, 0x1400ffff },
+ { 0x21001e7e, 0x24000001 },
+ { 0x21001e7f, 0x1400ffff },
+ { 0x21001e80, 0x24000001 },
+ { 0x21001e81, 0x1400ffff },
+ { 0x21001e82, 0x24000001 },
+ { 0x21001e83, 0x1400ffff },
+ { 0x21001e84, 0x24000001 },
+ { 0x21001e85, 0x1400ffff },
+ { 0x21001e86, 0x24000001 },
+ { 0x21001e87, 0x1400ffff },
+ { 0x21001e88, 0x24000001 },
+ { 0x21001e89, 0x1400ffff },
+ { 0x21001e8a, 0x24000001 },
+ { 0x21001e8b, 0x1400ffff },
+ { 0x21001e8c, 0x24000001 },
+ { 0x21001e8d, 0x1400ffff },
+ { 0x21001e8e, 0x24000001 },
+ { 0x21001e8f, 0x1400ffff },
+ { 0x21001e90, 0x24000001 },
+ { 0x21001e91, 0x1400ffff },
+ { 0x21001e92, 0x24000001 },
+ { 0x21001e93, 0x1400ffff },
+ { 0x21001e94, 0x24000001 },
+ { 0x21001e95, 0x1400ffff },
+ { 0x21801e96, 0x14000004 },
+ { 0x21001e9b, 0x1400ffc5 },
+ { 0x21001ea0, 0x24000001 },
+ { 0x21001ea1, 0x1400ffff },
+ { 0x21001ea2, 0x24000001 },
+ { 0x21001ea3, 0x1400ffff },
+ { 0x21001ea4, 0x24000001 },
+ { 0x21001ea5, 0x1400ffff },
+ { 0x21001ea6, 0x24000001 },
+ { 0x21001ea7, 0x1400ffff },
+ { 0x21001ea8, 0x24000001 },
+ { 0x21001ea9, 0x1400ffff },
+ { 0x21001eaa, 0x24000001 },
+ { 0x21001eab, 0x1400ffff },
+ { 0x21001eac, 0x24000001 },
+ { 0x21001ead, 0x1400ffff },
+ { 0x21001eae, 0x24000001 },
+ { 0x21001eaf, 0x1400ffff },
+ { 0x21001eb0, 0x24000001 },
+ { 0x21001eb1, 0x1400ffff },
+ { 0x21001eb2, 0x24000001 },
+ { 0x21001eb3, 0x1400ffff },
+ { 0x21001eb4, 0x24000001 },
+ { 0x21001eb5, 0x1400ffff },
+ { 0x21001eb6, 0x24000001 },
+ { 0x21001eb7, 0x1400ffff },
+ { 0x21001eb8, 0x24000001 },
+ { 0x21001eb9, 0x1400ffff },
+ { 0x21001eba, 0x24000001 },
+ { 0x21001ebb, 0x1400ffff },
+ { 0x21001ebc, 0x24000001 },
+ { 0x21001ebd, 0x1400ffff },
+ { 0x21001ebe, 0x24000001 },
+ { 0x21001ebf, 0x1400ffff },
+ { 0x21001ec0, 0x24000001 },
+ { 0x21001ec1, 0x1400ffff },
+ { 0x21001ec2, 0x24000001 },
+ { 0x21001ec3, 0x1400ffff },
+ { 0x21001ec4, 0x24000001 },
+ { 0x21001ec5, 0x1400ffff },
+ { 0x21001ec6, 0x24000001 },
+ { 0x21001ec7, 0x1400ffff },
+ { 0x21001ec8, 0x24000001 },
+ { 0x21001ec9, 0x1400ffff },
+ { 0x21001eca, 0x24000001 },
+ { 0x21001ecb, 0x1400ffff },
+ { 0x21001ecc, 0x24000001 },
+ { 0x21001ecd, 0x1400ffff },
+ { 0x21001ece, 0x24000001 },
+ { 0x21001ecf, 0x1400ffff },
+ { 0x21001ed0, 0x24000001 },
+ { 0x21001ed1, 0x1400ffff },
+ { 0x21001ed2, 0x24000001 },
+ { 0x21001ed3, 0x1400ffff },
+ { 0x21001ed4, 0x24000001 },
+ { 0x21001ed5, 0x1400ffff },
+ { 0x21001ed6, 0x24000001 },
+ { 0x21001ed7, 0x1400ffff },
+ { 0x21001ed8, 0x24000001 },
+ { 0x21001ed9, 0x1400ffff },
+ { 0x21001eda, 0x24000001 },
+ { 0x21001edb, 0x1400ffff },
+ { 0x21001edc, 0x24000001 },
+ { 0x21001edd, 0x1400ffff },
+ { 0x21001ede, 0x24000001 },
+ { 0x21001edf, 0x1400ffff },
+ { 0x21001ee0, 0x24000001 },
+ { 0x21001ee1, 0x1400ffff },
+ { 0x21001ee2, 0x24000001 },
+ { 0x21001ee3, 0x1400ffff },
+ { 0x21001ee4, 0x24000001 },
+ { 0x21001ee5, 0x1400ffff },
+ { 0x21001ee6, 0x24000001 },
+ { 0x21001ee7, 0x1400ffff },
+ { 0x21001ee8, 0x24000001 },
+ { 0x21001ee9, 0x1400ffff },
+ { 0x21001eea, 0x24000001 },
+ { 0x21001eeb, 0x1400ffff },
+ { 0x21001eec, 0x24000001 },
+ { 0x21001eed, 0x1400ffff },
+ { 0x21001eee, 0x24000001 },
+ { 0x21001eef, 0x1400ffff },
+ { 0x21001ef0, 0x24000001 },
+ { 0x21001ef1, 0x1400ffff },
+ { 0x21001ef2, 0x24000001 },
+ { 0x21001ef3, 0x1400ffff },
+ { 0x21001ef4, 0x24000001 },
+ { 0x21001ef5, 0x1400ffff },
+ { 0x21001ef6, 0x24000001 },
+ { 0x21001ef7, 0x1400ffff },
+ { 0x21001ef8, 0x24000001 },
+ { 0x21001ef9, 0x1400ffff },
+ { 0x13001f00, 0x14000008 },
+ { 0x13001f01, 0x14000008 },
+ { 0x13001f02, 0x14000008 },
+ { 0x13001f03, 0x14000008 },
+ { 0x13001f04, 0x14000008 },
+ { 0x13001f05, 0x14000008 },
+ { 0x13001f06, 0x14000008 },
+ { 0x13001f07, 0x14000008 },
+ { 0x13001f08, 0x2400fff8 },
+ { 0x13001f09, 0x2400fff8 },
+ { 0x13001f0a, 0x2400fff8 },
+ { 0x13001f0b, 0x2400fff8 },
+ { 0x13001f0c, 0x2400fff8 },
+ { 0x13001f0d, 0x2400fff8 },
+ { 0x13001f0e, 0x2400fff8 },
+ { 0x13001f0f, 0x2400fff8 },
+ { 0x13001f10, 0x14000008 },
+ { 0x13001f11, 0x14000008 },
+ { 0x13001f12, 0x14000008 },
+ { 0x13001f13, 0x14000008 },
+ { 0x13001f14, 0x14000008 },
+ { 0x13001f15, 0x14000008 },
+ { 0x13001f18, 0x2400fff8 },
+ { 0x13001f19, 0x2400fff8 },
+ { 0x13001f1a, 0x2400fff8 },
+ { 0x13001f1b, 0x2400fff8 },
+ { 0x13001f1c, 0x2400fff8 },
+ { 0x13001f1d, 0x2400fff8 },
+ { 0x13001f20, 0x14000008 },
+ { 0x13001f21, 0x14000008 },
+ { 0x13001f22, 0x14000008 },
+ { 0x13001f23, 0x14000008 },
+ { 0x13001f24, 0x14000008 },
+ { 0x13001f25, 0x14000008 },
+ { 0x13001f26, 0x14000008 },
+ { 0x13001f27, 0x14000008 },
+ { 0x13001f28, 0x2400fff8 },
+ { 0x13001f29, 0x2400fff8 },
+ { 0x13001f2a, 0x2400fff8 },
+ { 0x13001f2b, 0x2400fff8 },
+ { 0x13001f2c, 0x2400fff8 },
+ { 0x13001f2d, 0x2400fff8 },
+ { 0x13001f2e, 0x2400fff8 },
+ { 0x13001f2f, 0x2400fff8 },
+ { 0x13001f30, 0x14000008 },
+ { 0x13001f31, 0x14000008 },
+ { 0x13001f32, 0x14000008 },
+ { 0x13001f33, 0x14000008 },
+ { 0x13001f34, 0x14000008 },
+ { 0x13001f35, 0x14000008 },
+ { 0x13001f36, 0x14000008 },
+ { 0x13001f37, 0x14000008 },
+ { 0x13001f38, 0x2400fff8 },
+ { 0x13001f39, 0x2400fff8 },
+ { 0x13001f3a, 0x2400fff8 },
+ { 0x13001f3b, 0x2400fff8 },
+ { 0x13001f3c, 0x2400fff8 },
+ { 0x13001f3d, 0x2400fff8 },
+ { 0x13001f3e, 0x2400fff8 },
+ { 0x13001f3f, 0x2400fff8 },
+ { 0x13001f40, 0x14000008 },
+ { 0x13001f41, 0x14000008 },
+ { 0x13001f42, 0x14000008 },
+ { 0x13001f43, 0x14000008 },
+ { 0x13001f44, 0x14000008 },
+ { 0x13001f45, 0x14000008 },
+ { 0x13001f48, 0x2400fff8 },
+ { 0x13001f49, 0x2400fff8 },
+ { 0x13001f4a, 0x2400fff8 },
+ { 0x13001f4b, 0x2400fff8 },
+ { 0x13001f4c, 0x2400fff8 },
+ { 0x13001f4d, 0x2400fff8 },
+ { 0x13001f50, 0x14000000 },
+ { 0x13001f51, 0x14000008 },
+ { 0x13001f52, 0x14000000 },
+ { 0x13001f53, 0x14000008 },
+ { 0x13001f54, 0x14000000 },
+ { 0x13001f55, 0x14000008 },
+ { 0x13001f56, 0x14000000 },
+ { 0x13001f57, 0x14000008 },
+ { 0x13001f59, 0x2400fff8 },
+ { 0x13001f5b, 0x2400fff8 },
+ { 0x13001f5d, 0x2400fff8 },
+ { 0x13001f5f, 0x2400fff8 },
+ { 0x13001f60, 0x14000008 },
+ { 0x13001f61, 0x14000008 },
+ { 0x13001f62, 0x14000008 },
+ { 0x13001f63, 0x14000008 },
+ { 0x13001f64, 0x14000008 },
+ { 0x13001f65, 0x14000008 },
+ { 0x13001f66, 0x14000008 },
+ { 0x13001f67, 0x14000008 },
+ { 0x13001f68, 0x2400fff8 },
+ { 0x13001f69, 0x2400fff8 },
+ { 0x13001f6a, 0x2400fff8 },
+ { 0x13001f6b, 0x2400fff8 },
+ { 0x13001f6c, 0x2400fff8 },
+ { 0x13001f6d, 0x2400fff8 },
+ { 0x13001f6e, 0x2400fff8 },
+ { 0x13001f6f, 0x2400fff8 },
+ { 0x13001f70, 0x1400004a },
+ { 0x13001f71, 0x1400004a },
+ { 0x13001f72, 0x14000056 },
+ { 0x13001f73, 0x14000056 },
+ { 0x13001f74, 0x14000056 },
+ { 0x13001f75, 0x14000056 },
+ { 0x13001f76, 0x14000064 },
+ { 0x13001f77, 0x14000064 },
+ { 0x13001f78, 0x14000080 },
+ { 0x13001f79, 0x14000080 },
+ { 0x13001f7a, 0x14000070 },
+ { 0x13001f7b, 0x14000070 },
+ { 0x13001f7c, 0x1400007e },
+ { 0x13001f7d, 0x1400007e },
+ { 0x13001f80, 0x14000008 },
+ { 0x13001f81, 0x14000008 },
+ { 0x13001f82, 0x14000008 },
+ { 0x13001f83, 0x14000008 },
+ { 0x13001f84, 0x14000008 },
+ { 0x13001f85, 0x14000008 },
+ { 0x13001f86, 0x14000008 },
+ { 0x13001f87, 0x14000008 },
+ { 0x13001f88, 0x2000fff8 },
+ { 0x13001f89, 0x2000fff8 },
+ { 0x13001f8a, 0x2000fff8 },
+ { 0x13001f8b, 0x2000fff8 },
+ { 0x13001f8c, 0x2000fff8 },
+ { 0x13001f8d, 0x2000fff8 },
+ { 0x13001f8e, 0x2000fff8 },
+ { 0x13001f8f, 0x2000fff8 },
+ { 0x13001f90, 0x14000008 },
+ { 0x13001f91, 0x14000008 },
+ { 0x13001f92, 0x14000008 },
+ { 0x13001f93, 0x14000008 },
+ { 0x13001f94, 0x14000008 },
+ { 0x13001f95, 0x14000008 },
+ { 0x13001f96, 0x14000008 },
+ { 0x13001f97, 0x14000008 },
+ { 0x13001f98, 0x2000fff8 },
+ { 0x13001f99, 0x2000fff8 },
+ { 0x13001f9a, 0x2000fff8 },
+ { 0x13001f9b, 0x2000fff8 },
+ { 0x13001f9c, 0x2000fff8 },
+ { 0x13001f9d, 0x2000fff8 },
+ { 0x13001f9e, 0x2000fff8 },
+ { 0x13001f9f, 0x2000fff8 },
+ { 0x13001fa0, 0x14000008 },
+ { 0x13001fa1, 0x14000008 },
+ { 0x13001fa2, 0x14000008 },
+ { 0x13001fa3, 0x14000008 },
+ { 0x13001fa4, 0x14000008 },
+ { 0x13001fa5, 0x14000008 },
+ { 0x13001fa6, 0x14000008 },
+ { 0x13001fa7, 0x14000008 },
+ { 0x13001fa8, 0x2000fff8 },
+ { 0x13001fa9, 0x2000fff8 },
+ { 0x13001faa, 0x2000fff8 },
+ { 0x13001fab, 0x2000fff8 },
+ { 0x13001fac, 0x2000fff8 },
+ { 0x13001fad, 0x2000fff8 },
+ { 0x13001fae, 0x2000fff8 },
+ { 0x13001faf, 0x2000fff8 },
+ { 0x13001fb0, 0x14000008 },
+ { 0x13001fb1, 0x14000008 },
+ { 0x13001fb2, 0x14000000 },
+ { 0x13001fb3, 0x14000009 },
+ { 0x13001fb4, 0x14000000 },
+ { 0x13801fb6, 0x14000001 },
+ { 0x13001fb8, 0x2400fff8 },
+ { 0x13001fb9, 0x2400fff8 },
+ { 0x13001fba, 0x2400ffb6 },
+ { 0x13001fbb, 0x2400ffb6 },
+ { 0x13001fbc, 0x2000fff7 },
+ { 0x13001fbd, 0x60000000 },
+ { 0x13001fbe, 0x1400e3db },
+ { 0x13801fbf, 0x60000002 },
+ { 0x13001fc2, 0x14000000 },
+ { 0x13001fc3, 0x14000009 },
+ { 0x13001fc4, 0x14000000 },
+ { 0x13801fc6, 0x14000001 },
+ { 0x13001fc8, 0x2400ffaa },
+ { 0x13001fc9, 0x2400ffaa },
+ { 0x13001fca, 0x2400ffaa },
+ { 0x13001fcb, 0x2400ffaa },
+ { 0x13001fcc, 0x2000fff7 },
+ { 0x13801fcd, 0x60000002 },
+ { 0x13001fd0, 0x14000008 },
+ { 0x13001fd1, 0x14000008 },
+ { 0x13801fd2, 0x14000001 },
+ { 0x13801fd6, 0x14000001 },
+ { 0x13001fd8, 0x2400fff8 },
+ { 0x13001fd9, 0x2400fff8 },
+ { 0x13001fda, 0x2400ff9c },
+ { 0x13001fdb, 0x2400ff9c },
+ { 0x13801fdd, 0x60000002 },
+ { 0x13001fe0, 0x14000008 },
+ { 0x13001fe1, 0x14000008 },
+ { 0x13801fe2, 0x14000002 },
+ { 0x13001fe5, 0x14000007 },
+ { 0x13801fe6, 0x14000001 },
+ { 0x13001fe8, 0x2400fff8 },
+ { 0x13001fe9, 0x2400fff8 },
+ { 0x13001fea, 0x2400ff90 },
+ { 0x13001feb, 0x2400ff90 },
+ { 0x13001fec, 0x2400fff9 },
+ { 0x13801fed, 0x60000002 },
+ { 0x13001ff2, 0x14000000 },
+ { 0x13001ff3, 0x14000009 },
+ { 0x13001ff4, 0x14000000 },
+ { 0x13801ff6, 0x14000001 },
+ { 0x13001ff8, 0x2400ff80 },
+ { 0x13001ff9, 0x2400ff80 },
+ { 0x13001ffa, 0x2400ff82 },
+ { 0x13001ffb, 0x2400ff82 },
+ { 0x13001ffc, 0x2000fff7 },
+ { 0x13801ffd, 0x60000001 },
+ { 0x09802000, 0x7400000a },
+ { 0x0980200b, 0x04000004 },
+ { 0x09802010, 0x44000005 },
+ { 0x09802016, 0x54000001 },
+ { 0x09002018, 0x50000000 },
+ { 0x09002019, 0x4c000000 },
+ { 0x0900201a, 0x58000000 },
+ { 0x0980201b, 0x50000001 },
+ { 0x0900201d, 0x4c000000 },
+ { 0x0900201e, 0x58000000 },
+ { 0x0900201f, 0x50000000 },
+ { 0x09802020, 0x54000007 },
+ { 0x09002028, 0x6c000000 },
+ { 0x09002029, 0x70000000 },
+ { 0x0980202a, 0x04000004 },
+ { 0x0900202f, 0x74000000 },
+ { 0x09802030, 0x54000008 },
+ { 0x09002039, 0x50000000 },
+ { 0x0900203a, 0x4c000000 },
+ { 0x0980203b, 0x54000003 },
+ { 0x0980203f, 0x40000001 },
+ { 0x09802041, 0x54000002 },
+ { 0x09002044, 0x64000000 },
+ { 0x09002045, 0x58000000 },
+ { 0x09002046, 0x48000000 },
+ { 0x09802047, 0x5400000a },
+ { 0x09002052, 0x64000000 },
+ { 0x09002053, 0x54000000 },
+ { 0x09002054, 0x40000000 },
+ { 0x09802055, 0x54000009 },
+ { 0x0900205f, 0x74000000 },
+ { 0x09802060, 0x04000003 },
+ { 0x0980206a, 0x04000005 },
+ { 0x09002070, 0x3c000000 },
+ { 0x21002071, 0x14000000 },
+ { 0x09802074, 0x3c000005 },
+ { 0x0980207a, 0x64000002 },
+ { 0x0900207d, 0x58000000 },
+ { 0x0900207e, 0x48000000 },
+ { 0x2100207f, 0x14000000 },
+ { 0x09802080, 0x3c000009 },
+ { 0x0980208a, 0x64000002 },
+ { 0x0900208d, 0x58000000 },
+ { 0x0900208e, 0x48000000 },
+ { 0x21802090, 0x18000004 },
+ { 0x098020a0, 0x5c000015 },
+ { 0x1b8020d0, 0x3000000c },
+ { 0x1b8020dd, 0x2c000003 },
+ { 0x1b0020e1, 0x30000000 },
+ { 0x1b8020e2, 0x2c000002 },
+ { 0x1b8020e5, 0x30000006 },
+ { 0x09802100, 0x68000001 },
+ { 0x09002102, 0x24000000 },
+ { 0x09802103, 0x68000003 },
+ { 0x09002107, 0x24000000 },
+ { 0x09802108, 0x68000001 },
+ { 0x0900210a, 0x14000000 },
+ { 0x0980210b, 0x24000002 },
+ { 0x0980210e, 0x14000001 },
+ { 0x09802110, 0x24000002 },
+ { 0x09002113, 0x14000000 },
+ { 0x09002114, 0x68000000 },
+ { 0x09002115, 0x24000000 },
+ { 0x09802116, 0x68000002 },
+ { 0x09802119, 0x24000004 },
+ { 0x0980211e, 0x68000005 },
+ { 0x09002124, 0x24000000 },
+ { 0x09002125, 0x68000000 },
+ { 0x13002126, 0x2400e2a3 },
+ { 0x09002127, 0x68000000 },
+ { 0x09002128, 0x24000000 },
+ { 0x09002129, 0x68000000 },
+ { 0x2100212a, 0x2400df41 },
+ { 0x2100212b, 0x2400dfba },
+ { 0x0980212c, 0x24000001 },
+ { 0x0900212e, 0x68000000 },
+ { 0x0900212f, 0x14000000 },
+ { 0x09802130, 0x24000001 },
+ { 0x09002132, 0x68000000 },
+ { 0x09002133, 0x24000000 },
+ { 0x09002134, 0x14000000 },
+ { 0x09802135, 0x1c000003 },
+ { 0x09002139, 0x14000000 },
+ { 0x0980213a, 0x68000001 },
+ { 0x0980213c, 0x14000001 },
+ { 0x0980213e, 0x24000001 },
+ { 0x09802140, 0x64000004 },
+ { 0x09002145, 0x24000000 },
+ { 0x09802146, 0x14000003 },
+ { 0x0900214a, 0x68000000 },
+ { 0x0900214b, 0x64000000 },
+ { 0x0900214c, 0x68000000 },
+ { 0x09802153, 0x3c00000c },
+ { 0x09002160, 0x38000010 },
+ { 0x09002161, 0x38000010 },
+ { 0x09002162, 0x38000010 },
+ { 0x09002163, 0x38000010 },
+ { 0x09002164, 0x38000010 },
+ { 0x09002165, 0x38000010 },
+ { 0x09002166, 0x38000010 },
+ { 0x09002167, 0x38000010 },
+ { 0x09002168, 0x38000010 },
+ { 0x09002169, 0x38000010 },
+ { 0x0900216a, 0x38000010 },
+ { 0x0900216b, 0x38000010 },
+ { 0x0900216c, 0x38000010 },
+ { 0x0900216d, 0x38000010 },
+ { 0x0900216e, 0x38000010 },
+ { 0x0900216f, 0x38000010 },
+ { 0x09002170, 0x3800fff0 },
+ { 0x09002171, 0x3800fff0 },
+ { 0x09002172, 0x3800fff0 },
+ { 0x09002173, 0x3800fff0 },
+ { 0x09002174, 0x3800fff0 },
+ { 0x09002175, 0x3800fff0 },
+ { 0x09002176, 0x3800fff0 },
+ { 0x09002177, 0x3800fff0 },
+ { 0x09002178, 0x3800fff0 },
+ { 0x09002179, 0x3800fff0 },
+ { 0x0900217a, 0x3800fff0 },
+ { 0x0900217b, 0x3800fff0 },
+ { 0x0900217c, 0x3800fff0 },
+ { 0x0900217d, 0x3800fff0 },
+ { 0x0900217e, 0x3800fff0 },
+ { 0x0900217f, 0x3800fff0 },
+ { 0x09802180, 0x38000003 },
+ { 0x09802190, 0x64000004 },
+ { 0x09802195, 0x68000004 },
+ { 0x0980219a, 0x64000001 },
+ { 0x0980219c, 0x68000003 },
+ { 0x090021a0, 0x64000000 },
+ { 0x098021a1, 0x68000001 },
+ { 0x090021a3, 0x64000000 },
+ { 0x098021a4, 0x68000001 },
+ { 0x090021a6, 0x64000000 },
+ { 0x098021a7, 0x68000006 },
+ { 0x090021ae, 0x64000000 },
+ { 0x098021af, 0x6800001e },
+ { 0x098021ce, 0x64000001 },
+ { 0x098021d0, 0x68000001 },
+ { 0x090021d2, 0x64000000 },
+ { 0x090021d3, 0x68000000 },
+ { 0x090021d4, 0x64000000 },
+ { 0x098021d5, 0x6800001e },
+ { 0x098021f4, 0x6400010b },
+ { 0x09802300, 0x68000007 },
+ { 0x09802308, 0x64000003 },
+ { 0x0980230c, 0x68000013 },
+ { 0x09802320, 0x64000001 },
+ { 0x09802322, 0x68000006 },
+ { 0x09002329, 0x58000000 },
+ { 0x0900232a, 0x48000000 },
+ { 0x0980232b, 0x68000050 },
+ { 0x0900237c, 0x64000000 },
+ { 0x0980237d, 0x6800001d },
+ { 0x0980239b, 0x64000018 },
+ { 0x090023b4, 0x58000000 },
+ { 0x090023b5, 0x48000000 },
+ { 0x090023b6, 0x54000000 },
+ { 0x098023b7, 0x68000024 },
+ { 0x09802400, 0x68000026 },
+ { 0x09802440, 0x6800000a },
+ { 0x09802460, 0x3c00003b },
+ { 0x0980249c, 0x68000019 },
+ { 0x090024b6, 0x6800001a },
+ { 0x090024b7, 0x6800001a },
+ { 0x090024b8, 0x6800001a },
+ { 0x090024b9, 0x6800001a },
+ { 0x090024ba, 0x6800001a },
+ { 0x090024bb, 0x6800001a },
+ { 0x090024bc, 0x6800001a },
+ { 0x090024bd, 0x6800001a },
+ { 0x090024be, 0x6800001a },
+ { 0x090024bf, 0x6800001a },
+ { 0x090024c0, 0x6800001a },
+ { 0x090024c1, 0x6800001a },
+ { 0x090024c2, 0x6800001a },
+ { 0x090024c3, 0x6800001a },
+ { 0x090024c4, 0x6800001a },
+ { 0x090024c5, 0x6800001a },
+ { 0x090024c6, 0x6800001a },
+ { 0x090024c7, 0x6800001a },
+ { 0x090024c8, 0x6800001a },
+ { 0x090024c9, 0x6800001a },
+ { 0x090024ca, 0x6800001a },
+ { 0x090024cb, 0x6800001a },
+ { 0x090024cc, 0x6800001a },
+ { 0x090024cd, 0x6800001a },
+ { 0x090024ce, 0x6800001a },
+ { 0x090024cf, 0x6800001a },
+ { 0x090024d0, 0x6800ffe6 },
+ { 0x090024d1, 0x6800ffe6 },
+ { 0x090024d2, 0x6800ffe6 },
+ { 0x090024d3, 0x6800ffe6 },
+ { 0x090024d4, 0x6800ffe6 },
+ { 0x090024d5, 0x6800ffe6 },
+ { 0x090024d6, 0x6800ffe6 },
+ { 0x090024d7, 0x6800ffe6 },
+ { 0x090024d8, 0x6800ffe6 },
+ { 0x090024d9, 0x6800ffe6 },
+ { 0x090024da, 0x6800ffe6 },
+ { 0x090024db, 0x6800ffe6 },
+ { 0x090024dc, 0x6800ffe6 },
+ { 0x090024dd, 0x6800ffe6 },
+ { 0x090024de, 0x6800ffe6 },
+ { 0x090024df, 0x6800ffe6 },
+ { 0x090024e0, 0x6800ffe6 },
+ { 0x090024e1, 0x6800ffe6 },
+ { 0x090024e2, 0x6800ffe6 },
+ { 0x090024e3, 0x6800ffe6 },
+ { 0x090024e4, 0x6800ffe6 },
+ { 0x090024e5, 0x6800ffe6 },
+ { 0x090024e6, 0x6800ffe6 },
+ { 0x090024e7, 0x6800ffe6 },
+ { 0x090024e8, 0x6800ffe6 },
+ { 0x090024e9, 0x6800ffe6 },
+ { 0x098024ea, 0x3c000015 },
+ { 0x09802500, 0x680000b6 },
+ { 0x090025b7, 0x64000000 },
+ { 0x098025b8, 0x68000008 },
+ { 0x090025c1, 0x64000000 },
+ { 0x098025c2, 0x68000035 },
+ { 0x098025f8, 0x64000007 },
+ { 0x09802600, 0x6800006e },
+ { 0x0900266f, 0x64000000 },
+ { 0x09802670, 0x6800002c },
+ { 0x098026a0, 0x68000011 },
+ { 0x09802701, 0x68000003 },
+ { 0x09802706, 0x68000003 },
+ { 0x0980270c, 0x6800001b },
+ { 0x09802729, 0x68000022 },
+ { 0x0900274d, 0x68000000 },
+ { 0x0980274f, 0x68000003 },
+ { 0x09002756, 0x68000000 },
+ { 0x09802758, 0x68000006 },
+ { 0x09802761, 0x68000006 },
+ { 0x09002768, 0x58000000 },
+ { 0x09002769, 0x48000000 },
+ { 0x0900276a, 0x58000000 },
+ { 0x0900276b, 0x48000000 },
+ { 0x0900276c, 0x58000000 },
+ { 0x0900276d, 0x48000000 },
+ { 0x0900276e, 0x58000000 },
+ { 0x0900276f, 0x48000000 },
+ { 0x09002770, 0x58000000 },
+ { 0x09002771, 0x48000000 },
+ { 0x09002772, 0x58000000 },
+ { 0x09002773, 0x48000000 },
+ { 0x09002774, 0x58000000 },
+ { 0x09002775, 0x48000000 },
+ { 0x09802776, 0x3c00001d },
+ { 0x09002794, 0x68000000 },
+ { 0x09802798, 0x68000017 },
+ { 0x098027b1, 0x6800000d },
+ { 0x098027c0, 0x64000004 },
+ { 0x090027c5, 0x58000000 },
+ { 0x090027c6, 0x48000000 },
+ { 0x098027d0, 0x64000015 },
+ { 0x090027e6, 0x58000000 },
+ { 0x090027e7, 0x48000000 },
+ { 0x090027e8, 0x58000000 },
+ { 0x090027e9, 0x48000000 },
+ { 0x090027ea, 0x58000000 },
+ { 0x090027eb, 0x48000000 },
+ { 0x098027f0, 0x6400000f },
+ { 0x04802800, 0x680000ff },
+ { 0x09802900, 0x64000082 },
+ { 0x09002983, 0x58000000 },
+ { 0x09002984, 0x48000000 },
+ { 0x09002985, 0x58000000 },
+ { 0x09002986, 0x48000000 },
+ { 0x09002987, 0x58000000 },
+ { 0x09002988, 0x48000000 },
+ { 0x09002989, 0x58000000 },
+ { 0x0900298a, 0x48000000 },
+ { 0x0900298b, 0x58000000 },
+ { 0x0900298c, 0x48000000 },
+ { 0x0900298d, 0x58000000 },
+ { 0x0900298e, 0x48000000 },
+ { 0x0900298f, 0x58000000 },
+ { 0x09002990, 0x48000000 },
+ { 0x09002991, 0x58000000 },
+ { 0x09002992, 0x48000000 },
+ { 0x09002993, 0x58000000 },
+ { 0x09002994, 0x48000000 },
+ { 0x09002995, 0x58000000 },
+ { 0x09002996, 0x48000000 },
+ { 0x09002997, 0x58000000 },
+ { 0x09002998, 0x48000000 },
+ { 0x09802999, 0x6400003e },
+ { 0x090029d8, 0x58000000 },
+ { 0x090029d9, 0x48000000 },
+ { 0x090029da, 0x58000000 },
+ { 0x090029db, 0x48000000 },
+ { 0x098029dc, 0x6400001f },
+ { 0x090029fc, 0x58000000 },
+ { 0x090029fd, 0x48000000 },
+ { 0x098029fe, 0x64000101 },
+ { 0x09802b00, 0x68000013 },
+ { 0x11002c00, 0x24000030 },
+ { 0x11002c01, 0x24000030 },
+ { 0x11002c02, 0x24000030 },
+ { 0x11002c03, 0x24000030 },
+ { 0x11002c04, 0x24000030 },
+ { 0x11002c05, 0x24000030 },
+ { 0x11002c06, 0x24000030 },
+ { 0x11002c07, 0x24000030 },
+ { 0x11002c08, 0x24000030 },
+ { 0x11002c09, 0x24000030 },
+ { 0x11002c0a, 0x24000030 },
+ { 0x11002c0b, 0x24000030 },
+ { 0x11002c0c, 0x24000030 },
+ { 0x11002c0d, 0x24000030 },
+ { 0x11002c0e, 0x24000030 },
+ { 0x11002c0f, 0x24000030 },
+ { 0x11002c10, 0x24000030 },
+ { 0x11002c11, 0x24000030 },
+ { 0x11002c12, 0x24000030 },
+ { 0x11002c13, 0x24000030 },
+ { 0x11002c14, 0x24000030 },
+ { 0x11002c15, 0x24000030 },
+ { 0x11002c16, 0x24000030 },
+ { 0x11002c17, 0x24000030 },
+ { 0x11002c18, 0x24000030 },
+ { 0x11002c19, 0x24000030 },
+ { 0x11002c1a, 0x24000030 },
+ { 0x11002c1b, 0x24000030 },
+ { 0x11002c1c, 0x24000030 },
+ { 0x11002c1d, 0x24000030 },
+ { 0x11002c1e, 0x24000030 },
+ { 0x11002c1f, 0x24000030 },
+ { 0x11002c20, 0x24000030 },
+ { 0x11002c21, 0x24000030 },
+ { 0x11002c22, 0x24000030 },
+ { 0x11002c23, 0x24000030 },
+ { 0x11002c24, 0x24000030 },
+ { 0x11002c25, 0x24000030 },
+ { 0x11002c26, 0x24000030 },
+ { 0x11002c27, 0x24000030 },
+ { 0x11002c28, 0x24000030 },
+ { 0x11002c29, 0x24000030 },
+ { 0x11002c2a, 0x24000030 },
+ { 0x11002c2b, 0x24000030 },
+ { 0x11002c2c, 0x24000030 },
+ { 0x11002c2d, 0x24000030 },
+ { 0x11002c2e, 0x24000030 },
+ { 0x11002c30, 0x1400ffd0 },
+ { 0x11002c31, 0x1400ffd0 },
+ { 0x11002c32, 0x1400ffd0 },
+ { 0x11002c33, 0x1400ffd0 },
+ { 0x11002c34, 0x1400ffd0 },
+ { 0x11002c35, 0x1400ffd0 },
+ { 0x11002c36, 0x1400ffd0 },
+ { 0x11002c37, 0x1400ffd0 },
+ { 0x11002c38, 0x1400ffd0 },
+ { 0x11002c39, 0x1400ffd0 },
+ { 0x11002c3a, 0x1400ffd0 },
+ { 0x11002c3b, 0x1400ffd0 },
+ { 0x11002c3c, 0x1400ffd0 },
+ { 0x11002c3d, 0x1400ffd0 },
+ { 0x11002c3e, 0x1400ffd0 },
+ { 0x11002c3f, 0x1400ffd0 },
+ { 0x11002c40, 0x1400ffd0 },
+ { 0x11002c41, 0x1400ffd0 },
+ { 0x11002c42, 0x1400ffd0 },
+ { 0x11002c43, 0x1400ffd0 },
+ { 0x11002c44, 0x1400ffd0 },
+ { 0x11002c45, 0x1400ffd0 },
+ { 0x11002c46, 0x1400ffd0 },
+ { 0x11002c47, 0x1400ffd0 },
+ { 0x11002c48, 0x1400ffd0 },
+ { 0x11002c49, 0x1400ffd0 },
+ { 0x11002c4a, 0x1400ffd0 },
+ { 0x11002c4b, 0x1400ffd0 },
+ { 0x11002c4c, 0x1400ffd0 },
+ { 0x11002c4d, 0x1400ffd0 },
+ { 0x11002c4e, 0x1400ffd0 },
+ { 0x11002c4f, 0x1400ffd0 },
+ { 0x11002c50, 0x1400ffd0 },
+ { 0x11002c51, 0x1400ffd0 },
+ { 0x11002c52, 0x1400ffd0 },
+ { 0x11002c53, 0x1400ffd0 },
+ { 0x11002c54, 0x1400ffd0 },
+ { 0x11002c55, 0x1400ffd0 },
+ { 0x11002c56, 0x1400ffd0 },
+ { 0x11002c57, 0x1400ffd0 },
+ { 0x11002c58, 0x1400ffd0 },
+ { 0x11002c59, 0x1400ffd0 },
+ { 0x11002c5a, 0x1400ffd0 },
+ { 0x11002c5b, 0x1400ffd0 },
+ { 0x11002c5c, 0x1400ffd0 },
+ { 0x11002c5d, 0x1400ffd0 },
+ { 0x11002c5e, 0x1400ffd0 },
+ { 0x0a002c80, 0x24000001 },
+ { 0x0a002c81, 0x1400ffff },
+ { 0x0a002c82, 0x24000001 },
+ { 0x0a002c83, 0x1400ffff },
+ { 0x0a002c84, 0x24000001 },
+ { 0x0a002c85, 0x1400ffff },
+ { 0x0a002c86, 0x24000001 },
+ { 0x0a002c87, 0x1400ffff },
+ { 0x0a002c88, 0x24000001 },
+ { 0x0a002c89, 0x1400ffff },
+ { 0x0a002c8a, 0x24000001 },
+ { 0x0a002c8b, 0x1400ffff },
+ { 0x0a002c8c, 0x24000001 },
+ { 0x0a002c8d, 0x1400ffff },
+ { 0x0a002c8e, 0x24000001 },
+ { 0x0a002c8f, 0x1400ffff },
+ { 0x0a002c90, 0x24000001 },
+ { 0x0a002c91, 0x1400ffff },
+ { 0x0a002c92, 0x24000001 },
+ { 0x0a002c93, 0x1400ffff },
+ { 0x0a002c94, 0x24000001 },
+ { 0x0a002c95, 0x1400ffff },
+ { 0x0a002c96, 0x24000001 },
+ { 0x0a002c97, 0x1400ffff },
+ { 0x0a002c98, 0x24000001 },
+ { 0x0a002c99, 0x1400ffff },
+ { 0x0a002c9a, 0x24000001 },
+ { 0x0a002c9b, 0x1400ffff },
+ { 0x0a002c9c, 0x24000001 },
+ { 0x0a002c9d, 0x1400ffff },
+ { 0x0a002c9e, 0x24000001 },
+ { 0x0a002c9f, 0x1400ffff },
+ { 0x0a002ca0, 0x24000001 },
+ { 0x0a002ca1, 0x1400ffff },
+ { 0x0a002ca2, 0x24000001 },
+ { 0x0a002ca3, 0x1400ffff },
+ { 0x0a002ca4, 0x24000001 },
+ { 0x0a002ca5, 0x1400ffff },
+ { 0x0a002ca6, 0x24000001 },
+ { 0x0a002ca7, 0x1400ffff },
+ { 0x0a002ca8, 0x24000001 },
+ { 0x0a002ca9, 0x1400ffff },
+ { 0x0a002caa, 0x24000001 },
+ { 0x0a002cab, 0x1400ffff },
+ { 0x0a002cac, 0x24000001 },
+ { 0x0a002cad, 0x1400ffff },
+ { 0x0a002cae, 0x24000001 },
+ { 0x0a002caf, 0x1400ffff },
+ { 0x0a002cb0, 0x24000001 },
+ { 0x0a002cb1, 0x1400ffff },
+ { 0x0a002cb2, 0x24000001 },
+ { 0x0a002cb3, 0x1400ffff },
+ { 0x0a002cb4, 0x24000001 },
+ { 0x0a002cb5, 0x1400ffff },
+ { 0x0a002cb6, 0x24000001 },
+ { 0x0a002cb7, 0x1400ffff },
+ { 0x0a002cb8, 0x24000001 },
+ { 0x0a002cb9, 0x1400ffff },
+ { 0x0a002cba, 0x24000001 },
+ { 0x0a002cbb, 0x1400ffff },
+ { 0x0a002cbc, 0x24000001 },
+ { 0x0a002cbd, 0x1400ffff },
+ { 0x0a002cbe, 0x24000001 },
+ { 0x0a002cbf, 0x1400ffff },
+ { 0x0a002cc0, 0x24000001 },
+ { 0x0a002cc1, 0x1400ffff },
+ { 0x0a002cc2, 0x24000001 },
+ { 0x0a002cc3, 0x1400ffff },
+ { 0x0a002cc4, 0x24000001 },
+ { 0x0a002cc5, 0x1400ffff },
+ { 0x0a002cc6, 0x24000001 },
+ { 0x0a002cc7, 0x1400ffff },
+ { 0x0a002cc8, 0x24000001 },
+ { 0x0a002cc9, 0x1400ffff },
+ { 0x0a002cca, 0x24000001 },
+ { 0x0a002ccb, 0x1400ffff },
+ { 0x0a002ccc, 0x24000001 },
+ { 0x0a002ccd, 0x1400ffff },
+ { 0x0a002cce, 0x24000001 },
+ { 0x0a002ccf, 0x1400ffff },
+ { 0x0a002cd0, 0x24000001 },
+ { 0x0a002cd1, 0x1400ffff },
+ { 0x0a002cd2, 0x24000001 },
+ { 0x0a002cd3, 0x1400ffff },
+ { 0x0a002cd4, 0x24000001 },
+ { 0x0a002cd5, 0x1400ffff },
+ { 0x0a002cd6, 0x24000001 },
+ { 0x0a002cd7, 0x1400ffff },
+ { 0x0a002cd8, 0x24000001 },
+ { 0x0a002cd9, 0x1400ffff },
+ { 0x0a002cda, 0x24000001 },
+ { 0x0a002cdb, 0x1400ffff },
+ { 0x0a002cdc, 0x24000001 },
+ { 0x0a002cdd, 0x1400ffff },
+ { 0x0a002cde, 0x24000001 },
+ { 0x0a002cdf, 0x1400ffff },
+ { 0x0a002ce0, 0x24000001 },
+ { 0x0a002ce1, 0x1400ffff },
+ { 0x0a002ce2, 0x24000001 },
+ { 0x0a002ce3, 0x1400ffff },
+ { 0x0a002ce4, 0x14000000 },
+ { 0x0a802ce5, 0x68000005 },
+ { 0x0a802cf9, 0x54000003 },
+ { 0x0a002cfd, 0x3c000000 },
+ { 0x0a802cfe, 0x54000001 },
+ { 0x10002d00, 0x1400e3a0 },
+ { 0x10002d01, 0x1400e3a0 },
+ { 0x10002d02, 0x1400e3a0 },
+ { 0x10002d03, 0x1400e3a0 },
+ { 0x10002d04, 0x1400e3a0 },
+ { 0x10002d05, 0x1400e3a0 },
+ { 0x10002d06, 0x1400e3a0 },
+ { 0x10002d07, 0x1400e3a0 },
+ { 0x10002d08, 0x1400e3a0 },
+ { 0x10002d09, 0x1400e3a0 },
+ { 0x10002d0a, 0x1400e3a0 },
+ { 0x10002d0b, 0x1400e3a0 },
+ { 0x10002d0c, 0x1400e3a0 },
+ { 0x10002d0d, 0x1400e3a0 },
+ { 0x10002d0e, 0x1400e3a0 },
+ { 0x10002d0f, 0x1400e3a0 },
+ { 0x10002d10, 0x1400e3a0 },
+ { 0x10002d11, 0x1400e3a0 },
+ { 0x10002d12, 0x1400e3a0 },
+ { 0x10002d13, 0x1400e3a0 },
+ { 0x10002d14, 0x1400e3a0 },
+ { 0x10002d15, 0x1400e3a0 },
+ { 0x10002d16, 0x1400e3a0 },
+ { 0x10002d17, 0x1400e3a0 },
+ { 0x10002d18, 0x1400e3a0 },
+ { 0x10002d19, 0x1400e3a0 },
+ { 0x10002d1a, 0x1400e3a0 },
+ { 0x10002d1b, 0x1400e3a0 },
+ { 0x10002d1c, 0x1400e3a0 },
+ { 0x10002d1d, 0x1400e3a0 },
+ { 0x10002d1e, 0x1400e3a0 },
+ { 0x10002d1f, 0x1400e3a0 },
+ { 0x10002d20, 0x1400e3a0 },
+ { 0x10002d21, 0x1400e3a0 },
+ { 0x10002d22, 0x1400e3a0 },
+ { 0x10002d23, 0x1400e3a0 },
+ { 0x10002d24, 0x1400e3a0 },
+ { 0x10002d25, 0x1400e3a0 },
+ { 0x3a802d30, 0x1c000035 },
+ { 0x3a002d6f, 0x18000000 },
+ { 0x0f802d80, 0x1c000016 },
+ { 0x0f802da0, 0x1c000006 },
+ { 0x0f802da8, 0x1c000006 },
+ { 0x0f802db0, 0x1c000006 },
+ { 0x0f802db8, 0x1c000006 },
+ { 0x0f802dc0, 0x1c000006 },
+ { 0x0f802dc8, 0x1c000006 },
+ { 0x0f802dd0, 0x1c000006 },
+ { 0x0f802dd8, 0x1c000006 },
+ { 0x09802e00, 0x54000001 },
+ { 0x09002e02, 0x50000000 },
+ { 0x09002e03, 0x4c000000 },
+ { 0x09002e04, 0x50000000 },
+ { 0x09002e05, 0x4c000000 },
+ { 0x09802e06, 0x54000002 },
+ { 0x09002e09, 0x50000000 },
+ { 0x09002e0a, 0x4c000000 },
+ { 0x09002e0b, 0x54000000 },
+ { 0x09002e0c, 0x50000000 },
+ { 0x09002e0d, 0x4c000000 },
+ { 0x09802e0e, 0x54000008 },
+ { 0x09002e17, 0x44000000 },
+ { 0x09002e1c, 0x50000000 },
+ { 0x09002e1d, 0x4c000000 },
+ { 0x16802e80, 0x68000019 },
+ { 0x16802e9b, 0x68000058 },
+ { 0x16802f00, 0x680000d5 },
+ { 0x09802ff0, 0x6800000b },
+ { 0x09003000, 0x74000000 },
+ { 0x09803001, 0x54000002 },
+ { 0x09003004, 0x68000000 },
+ { 0x16003005, 0x18000000 },
+ { 0x09003006, 0x1c000000 },
+ { 0x16003007, 0x38000000 },
+ { 0x09003008, 0x58000000 },
+ { 0x09003009, 0x48000000 },
+ { 0x0900300a, 0x58000000 },
+ { 0x0900300b, 0x48000000 },
+ { 0x0900300c, 0x58000000 },
+ { 0x0900300d, 0x48000000 },
+ { 0x0900300e, 0x58000000 },
+ { 0x0900300f, 0x48000000 },
+ { 0x09003010, 0x58000000 },
+ { 0x09003011, 0x48000000 },
+ { 0x09803012, 0x68000001 },
+ { 0x09003014, 0x58000000 },
+ { 0x09003015, 0x48000000 },
+ { 0x09003016, 0x58000000 },
+ { 0x09003017, 0x48000000 },
+ { 0x09003018, 0x58000000 },
+ { 0x09003019, 0x48000000 },
+ { 0x0900301a, 0x58000000 },
+ { 0x0900301b, 0x48000000 },
+ { 0x0900301c, 0x44000000 },
+ { 0x0900301d, 0x58000000 },
+ { 0x0980301e, 0x48000001 },
+ { 0x09003020, 0x68000000 },
+ { 0x16803021, 0x38000008 },
+ { 0x1b80302a, 0x30000005 },
+ { 0x09003030, 0x44000000 },
+ { 0x09803031, 0x18000004 },
+ { 0x09803036, 0x68000001 },
+ { 0x16803038, 0x38000002 },
+ { 0x1600303b, 0x18000000 },
+ { 0x0900303c, 0x1c000000 },
+ { 0x0900303d, 0x54000000 },
+ { 0x0980303e, 0x68000001 },
+ { 0x1a803041, 0x1c000055 },
+ { 0x1b803099, 0x30000001 },
+ { 0x0980309b, 0x60000001 },
+ { 0x1a80309d, 0x18000001 },
+ { 0x1a00309f, 0x1c000000 },
+ { 0x090030a0, 0x44000000 },
+ { 0x1d8030a1, 0x1c000059 },
+ { 0x090030fb, 0x54000000 },
+ { 0x098030fc, 0x18000002 },
+ { 0x1d0030ff, 0x1c000000 },
+ { 0x03803105, 0x1c000027 },
+ { 0x17803131, 0x1c00005d },
+ { 0x09803190, 0x68000001 },
+ { 0x09803192, 0x3c000003 },
+ { 0x09803196, 0x68000009 },
+ { 0x038031a0, 0x1c000017 },
+ { 0x098031c0, 0x6800000f },
+ { 0x1d8031f0, 0x1c00000f },
+ { 0x17803200, 0x6800001e },
+ { 0x09803220, 0x3c000009 },
+ { 0x0980322a, 0x68000019 },
+ { 0x09003250, 0x68000000 },
+ { 0x09803251, 0x3c00000e },
+ { 0x17803260, 0x6800001f },
+ { 0x09803280, 0x3c000009 },
+ { 0x0980328a, 0x68000026 },
+ { 0x098032b1, 0x3c00000e },
+ { 0x098032c0, 0x6800003e },
+ { 0x09803300, 0x680000ff },
+ { 0x16803400, 0x1c0019b5 },
+ { 0x09804dc0, 0x6800003f },
+ { 0x16804e00, 0x1c0051bb },
+ { 0x3c80a000, 0x1c000014 },
+ { 0x3c00a015, 0x18000000 },
+ { 0x3c80a016, 0x1c000476 },
+ { 0x3c80a490, 0x68000036 },
+ { 0x0980a700, 0x60000016 },
+ { 0x3080a800, 0x1c000001 },
+ { 0x3000a802, 0x28000000 },
+ { 0x3080a803, 0x1c000002 },
+ { 0x3000a806, 0x30000000 },
+ { 0x3080a807, 0x1c000003 },
+ { 0x3000a80b, 0x30000000 },
+ { 0x3080a80c, 0x1c000016 },
+ { 0x3080a823, 0x28000001 },
+ { 0x3080a825, 0x30000001 },
+ { 0x3000a827, 0x28000000 },
+ { 0x3080a828, 0x68000003 },
+ { 0x1780ac00, 0x1c002ba3 },
+ { 0x0980d800, 0x1000037f },
+ { 0x0980db80, 0x1000007f },
+ { 0x0980dc00, 0x100003ff },
+ { 0x0980e000, 0x0c0018ff },
+ { 0x1680f900, 0x1c00012d },
+ { 0x1680fa30, 0x1c00003a },
+ { 0x1680fa70, 0x1c000069 },
+ { 0x2180fb00, 0x14000006 },
+ { 0x0180fb13, 0x14000004 },
+ { 0x1900fb1d, 0x1c000000 },
+ { 0x1900fb1e, 0x30000000 },
+ { 0x1980fb1f, 0x1c000009 },
+ { 0x1900fb29, 0x64000000 },
+ { 0x1980fb2a, 0x1c00000c },
+ { 0x1980fb38, 0x1c000004 },
+ { 0x1900fb3e, 0x1c000000 },
+ { 0x1980fb40, 0x1c000001 },
+ { 0x1980fb43, 0x1c000001 },
+ { 0x1980fb46, 0x1c00006b },
+ { 0x0080fbd3, 0x1c00016a },
+ { 0x0900fd3e, 0x58000000 },
+ { 0x0900fd3f, 0x48000000 },
+ { 0x0080fd50, 0x1c00003f },
+ { 0x0080fd92, 0x1c000035 },
+ { 0x0080fdf0, 0x1c00000b },
+ { 0x0000fdfc, 0x5c000000 },
+ { 0x0900fdfd, 0x68000000 },
+ { 0x1b80fe00, 0x3000000f },
+ { 0x0980fe10, 0x54000006 },
+ { 0x0900fe17, 0x58000000 },
+ { 0x0900fe18, 0x48000000 },
+ { 0x0900fe19, 0x54000000 },
+ { 0x1b80fe20, 0x30000003 },
+ { 0x0900fe30, 0x54000000 },
+ { 0x0980fe31, 0x44000001 },
+ { 0x0980fe33, 0x40000001 },
+ { 0x0900fe35, 0x58000000 },
+ { 0x0900fe36, 0x48000000 },
+ { 0x0900fe37, 0x58000000 },
+ { 0x0900fe38, 0x48000000 },
+ { 0x0900fe39, 0x58000000 },
+ { 0x0900fe3a, 0x48000000 },
+ { 0x0900fe3b, 0x58000000 },
+ { 0x0900fe3c, 0x48000000 },
+ { 0x0900fe3d, 0x58000000 },
+ { 0x0900fe3e, 0x48000000 },
+ { 0x0900fe3f, 0x58000000 },
+ { 0x0900fe40, 0x48000000 },
+ { 0x0900fe41, 0x58000000 },
+ { 0x0900fe42, 0x48000000 },
+ { 0x0900fe43, 0x58000000 },
+ { 0x0900fe44, 0x48000000 },
+ { 0x0980fe45, 0x54000001 },
+ { 0x0900fe47, 0x58000000 },
+ { 0x0900fe48, 0x48000000 },
+ { 0x0980fe49, 0x54000003 },
+ { 0x0980fe4d, 0x40000002 },
+ { 0x0980fe50, 0x54000002 },
+ { 0x0980fe54, 0x54000003 },
+ { 0x0900fe58, 0x44000000 },
+ { 0x0900fe59, 0x58000000 },
+ { 0x0900fe5a, 0x48000000 },
+ { 0x0900fe5b, 0x58000000 },
+ { 0x0900fe5c, 0x48000000 },
+ { 0x0900fe5d, 0x58000000 },
+ { 0x0900fe5e, 0x48000000 },
+ { 0x0980fe5f, 0x54000002 },
+ { 0x0900fe62, 0x64000000 },
+ { 0x0900fe63, 0x44000000 },
+ { 0x0980fe64, 0x64000002 },
+ { 0x0900fe68, 0x54000000 },
+ { 0x0900fe69, 0x5c000000 },
+ { 0x0980fe6a, 0x54000001 },
+ { 0x0080fe70, 0x1c000004 },
+ { 0x0080fe76, 0x1c000086 },
+ { 0x0900feff, 0x04000000 },
+ { 0x0980ff01, 0x54000002 },
+ { 0x0900ff04, 0x5c000000 },
+ { 0x0980ff05, 0x54000002 },
+ { 0x0900ff08, 0x58000000 },
+ { 0x0900ff09, 0x48000000 },
+ { 0x0900ff0a, 0x54000000 },
+ { 0x0900ff0b, 0x64000000 },
+ { 0x0900ff0c, 0x54000000 },
+ { 0x0900ff0d, 0x44000000 },
+ { 0x0980ff0e, 0x54000001 },
+ { 0x0980ff10, 0x34000009 },
+ { 0x0980ff1a, 0x54000001 },
+ { 0x0980ff1c, 0x64000002 },
+ { 0x0980ff1f, 0x54000001 },
+ { 0x2100ff21, 0x24000020 },
+ { 0x2100ff22, 0x24000020 },
+ { 0x2100ff23, 0x24000020 },
+ { 0x2100ff24, 0x24000020 },
+ { 0x2100ff25, 0x24000020 },
+ { 0x2100ff26, 0x24000020 },
+ { 0x2100ff27, 0x24000020 },
+ { 0x2100ff28, 0x24000020 },
+ { 0x2100ff29, 0x24000020 },
+ { 0x2100ff2a, 0x24000020 },
+ { 0x2100ff2b, 0x24000020 },
+ { 0x2100ff2c, 0x24000020 },
+ { 0x2100ff2d, 0x24000020 },
+ { 0x2100ff2e, 0x24000020 },
+ { 0x2100ff2f, 0x24000020 },
+ { 0x2100ff30, 0x24000020 },
+ { 0x2100ff31, 0x24000020 },
+ { 0x2100ff32, 0x24000020 },
+ { 0x2100ff33, 0x24000020 },
+ { 0x2100ff34, 0x24000020 },
+ { 0x2100ff35, 0x24000020 },
+ { 0x2100ff36, 0x24000020 },
+ { 0x2100ff37, 0x24000020 },
+ { 0x2100ff38, 0x24000020 },
+ { 0x2100ff39, 0x24000020 },
+ { 0x2100ff3a, 0x24000020 },
+ { 0x0900ff3b, 0x58000000 },
+ { 0x0900ff3c, 0x54000000 },
+ { 0x0900ff3d, 0x48000000 },
+ { 0x0900ff3e, 0x60000000 },
+ { 0x0900ff3f, 0x40000000 },
+ { 0x0900ff40, 0x60000000 },
+ { 0x2100ff41, 0x1400ffe0 },
+ { 0x2100ff42, 0x1400ffe0 },
+ { 0x2100ff43, 0x1400ffe0 },
+ { 0x2100ff44, 0x1400ffe0 },
+ { 0x2100ff45, 0x1400ffe0 },
+ { 0x2100ff46, 0x1400ffe0 },
+ { 0x2100ff47, 0x1400ffe0 },
+ { 0x2100ff48, 0x1400ffe0 },
+ { 0x2100ff49, 0x1400ffe0 },
+ { 0x2100ff4a, 0x1400ffe0 },
+ { 0x2100ff4b, 0x1400ffe0 },
+ { 0x2100ff4c, 0x1400ffe0 },
+ { 0x2100ff4d, 0x1400ffe0 },
+ { 0x2100ff4e, 0x1400ffe0 },
+ { 0x2100ff4f, 0x1400ffe0 },
+ { 0x2100ff50, 0x1400ffe0 },
+ { 0x2100ff51, 0x1400ffe0 },
+ { 0x2100ff52, 0x1400ffe0 },
+ { 0x2100ff53, 0x1400ffe0 },
+ { 0x2100ff54, 0x1400ffe0 },
+ { 0x2100ff55, 0x1400ffe0 },
+ { 0x2100ff56, 0x1400ffe0 },
+ { 0x2100ff57, 0x1400ffe0 },
+ { 0x2100ff58, 0x1400ffe0 },
+ { 0x2100ff59, 0x1400ffe0 },
+ { 0x2100ff5a, 0x1400ffe0 },
+ { 0x0900ff5b, 0x58000000 },
+ { 0x0900ff5c, 0x64000000 },
+ { 0x0900ff5d, 0x48000000 },
+ { 0x0900ff5e, 0x64000000 },
+ { 0x0900ff5f, 0x58000000 },
+ { 0x0900ff60, 0x48000000 },
+ { 0x0900ff61, 0x54000000 },
+ { 0x0900ff62, 0x58000000 },
+ { 0x0900ff63, 0x48000000 },
+ { 0x0980ff64, 0x54000001 },
+ { 0x1d80ff66, 0x1c000009 },
+ { 0x0900ff70, 0x18000000 },
+ { 0x1d80ff71, 0x1c00002c },
+ { 0x0980ff9e, 0x18000001 },
+ { 0x1780ffa0, 0x1c00001e },
+ { 0x1780ffc2, 0x1c000005 },
+ { 0x1780ffca, 0x1c000005 },
+ { 0x1780ffd2, 0x1c000005 },
+ { 0x1780ffda, 0x1c000002 },
+ { 0x0980ffe0, 0x5c000001 },
+ { 0x0900ffe2, 0x64000000 },
+ { 0x0900ffe3, 0x60000000 },
+ { 0x0900ffe4, 0x68000000 },
+ { 0x0980ffe5, 0x5c000001 },
+ { 0x0900ffe8, 0x68000000 },
+ { 0x0980ffe9, 0x64000003 },
+ { 0x0980ffed, 0x68000001 },
+ { 0x0980fff9, 0x04000002 },
+ { 0x0980fffc, 0x68000001 },
+ { 0x23810000, 0x1c00000b },
+ { 0x2381000d, 0x1c000019 },
+ { 0x23810028, 0x1c000012 },
+ { 0x2381003c, 0x1c000001 },
+ { 0x2381003f, 0x1c00000e },
+ { 0x23810050, 0x1c00000d },
+ { 0x23810080, 0x1c00007a },
+ { 0x09810100, 0x54000001 },
+ { 0x09010102, 0x68000000 },
+ { 0x09810107, 0x3c00002c },
+ { 0x09810137, 0x68000008 },
+ { 0x13810140, 0x38000034 },
+ { 0x13810175, 0x3c000003 },
+ { 0x13810179, 0x68000010 },
+ { 0x1301018a, 0x3c000000 },
+ { 0x29810300, 0x1c00001e },
+ { 0x29810320, 0x3c000003 },
+ { 0x12810330, 0x1c000019 },
+ { 0x1201034a, 0x38000000 },
+ { 0x3b810380, 0x1c00001d },
+ { 0x3b01039f, 0x54000000 },
+ { 0x2a8103a0, 0x1c000023 },
+ { 0x2a8103c8, 0x1c000007 },
+ { 0x2a0103d0, 0x68000000 },
+ { 0x2a8103d1, 0x38000004 },
+ { 0x0d010400, 0x24000028 },
+ { 0x0d010401, 0x24000028 },
+ { 0x0d010402, 0x24000028 },
+ { 0x0d010403, 0x24000028 },
+ { 0x0d010404, 0x24000028 },
+ { 0x0d010405, 0x24000028 },
+ { 0x0d010406, 0x24000028 },
+ { 0x0d010407, 0x24000028 },
+ { 0x0d010408, 0x24000028 },
+ { 0x0d010409, 0x24000028 },
+ { 0x0d01040a, 0x24000028 },
+ { 0x0d01040b, 0x24000028 },
+ { 0x0d01040c, 0x24000028 },
+ { 0x0d01040d, 0x24000028 },
+ { 0x0d01040e, 0x24000028 },
+ { 0x0d01040f, 0x24000028 },
+ { 0x0d010410, 0x24000028 },
+ { 0x0d010411, 0x24000028 },
+ { 0x0d010412, 0x24000028 },
+ { 0x0d010413, 0x24000028 },
+ { 0x0d010414, 0x24000028 },
+ { 0x0d010415, 0x24000028 },
+ { 0x0d010416, 0x24000028 },
+ { 0x0d010417, 0x24000028 },
+ { 0x0d010418, 0x24000028 },
+ { 0x0d010419, 0x24000028 },
+ { 0x0d01041a, 0x24000028 },
+ { 0x0d01041b, 0x24000028 },
+ { 0x0d01041c, 0x24000028 },
+ { 0x0d01041d, 0x24000028 },
+ { 0x0d01041e, 0x24000028 },
+ { 0x0d01041f, 0x24000028 },
+ { 0x0d010420, 0x24000028 },
+ { 0x0d010421, 0x24000028 },
+ { 0x0d010422, 0x24000028 },
+ { 0x0d010423, 0x24000028 },
+ { 0x0d010424, 0x24000028 },
+ { 0x0d010425, 0x24000028 },
+ { 0x0d010426, 0x24000028 },
+ { 0x0d010427, 0x24000028 },
+ { 0x0d010428, 0x1400ffd8 },
+ { 0x0d010429, 0x1400ffd8 },
+ { 0x0d01042a, 0x1400ffd8 },
+ { 0x0d01042b, 0x1400ffd8 },
+ { 0x0d01042c, 0x1400ffd8 },
+ { 0x0d01042d, 0x1400ffd8 },
+ { 0x0d01042e, 0x1400ffd8 },
+ { 0x0d01042f, 0x1400ffd8 },
+ { 0x0d010430, 0x1400ffd8 },
+ { 0x0d010431, 0x1400ffd8 },
+ { 0x0d010432, 0x1400ffd8 },
+ { 0x0d010433, 0x1400ffd8 },
+ { 0x0d010434, 0x1400ffd8 },
+ { 0x0d010435, 0x1400ffd8 },
+ { 0x0d010436, 0x1400ffd8 },
+ { 0x0d010437, 0x1400ffd8 },
+ { 0x0d010438, 0x1400ffd8 },
+ { 0x0d010439, 0x1400ffd8 },
+ { 0x0d01043a, 0x1400ffd8 },
+ { 0x0d01043b, 0x1400ffd8 },
+ { 0x0d01043c, 0x1400ffd8 },
+ { 0x0d01043d, 0x1400ffd8 },
+ { 0x0d01043e, 0x1400ffd8 },
+ { 0x0d01043f, 0x1400ffd8 },
+ { 0x0d010440, 0x1400ffd8 },
+ { 0x0d010441, 0x1400ffd8 },
+ { 0x0d010442, 0x1400ffd8 },
+ { 0x0d010443, 0x1400ffd8 },
+ { 0x0d010444, 0x1400ffd8 },
+ { 0x0d010445, 0x1400ffd8 },
+ { 0x0d010446, 0x1400ffd8 },
+ { 0x0d010447, 0x1400ffd8 },
+ { 0x0d010448, 0x1400ffd8 },
+ { 0x0d010449, 0x1400ffd8 },
+ { 0x0d01044a, 0x1400ffd8 },
+ { 0x0d01044b, 0x1400ffd8 },
+ { 0x0d01044c, 0x1400ffd8 },
+ { 0x0d01044d, 0x1400ffd8 },
+ { 0x0d01044e, 0x1400ffd8 },
+ { 0x0d01044f, 0x1400ffd8 },
+ { 0x2e810450, 0x1c00004d },
+ { 0x2c8104a0, 0x34000009 },
+ { 0x0b810800, 0x1c000005 },
+ { 0x0b010808, 0x1c000000 },
+ { 0x0b81080a, 0x1c00002b },
+ { 0x0b810837, 0x1c000001 },
+ { 0x0b01083c, 0x1c000000 },
+ { 0x0b01083f, 0x1c000000 },
+ { 0x1e010a00, 0x1c000000 },
+ { 0x1e810a01, 0x30000002 },
+ { 0x1e810a05, 0x30000001 },
+ { 0x1e810a0c, 0x30000003 },
+ { 0x1e810a10, 0x1c000003 },
+ { 0x1e810a15, 0x1c000002 },
+ { 0x1e810a19, 0x1c00001a },
+ { 0x1e810a38, 0x30000002 },
+ { 0x1e010a3f, 0x30000000 },
+ { 0x1e810a40, 0x3c000007 },
+ { 0x1e810a50, 0x54000008 },
+ { 0x0981d000, 0x680000f5 },
+ { 0x0981d100, 0x68000026 },
+ { 0x0981d12a, 0x6800003a },
+ { 0x0981d165, 0x28000001 },
+ { 0x1b81d167, 0x30000002 },
+ { 0x0981d16a, 0x68000002 },
+ { 0x0981d16d, 0x28000005 },
+ { 0x0981d173, 0x04000007 },
+ { 0x1b81d17b, 0x30000007 },
+ { 0x0981d183, 0x68000001 },
+ { 0x1b81d185, 0x30000006 },
+ { 0x0981d18c, 0x6800001d },
+ { 0x1b81d1aa, 0x30000003 },
+ { 0x0981d1ae, 0x6800002f },
+ { 0x1381d200, 0x68000041 },
+ { 0x1381d242, 0x30000002 },
+ { 0x1301d245, 0x68000000 },
+ { 0x0981d300, 0x68000056 },
+ { 0x0981d400, 0x24000019 },
+ { 0x0981d41a, 0x14000019 },
+ { 0x0981d434, 0x24000019 },
+ { 0x0981d44e, 0x14000006 },
+ { 0x0981d456, 0x14000011 },
+ { 0x0981d468, 0x24000019 },
+ { 0x0981d482, 0x14000019 },
+ { 0x0901d49c, 0x24000000 },
+ { 0x0981d49e, 0x24000001 },
+ { 0x0901d4a2, 0x24000000 },
+ { 0x0981d4a5, 0x24000001 },
+ { 0x0981d4a9, 0x24000003 },
+ { 0x0981d4ae, 0x24000007 },
+ { 0x0981d4b6, 0x14000003 },
+ { 0x0901d4bb, 0x14000000 },
+ { 0x0981d4bd, 0x14000006 },
+ { 0x0981d4c5, 0x1400000a },
+ { 0x0981d4d0, 0x24000019 },
+ { 0x0981d4ea, 0x14000019 },
+ { 0x0981d504, 0x24000001 },
+ { 0x0981d507, 0x24000003 },
+ { 0x0981d50d, 0x24000007 },
+ { 0x0981d516, 0x24000006 },
+ { 0x0981d51e, 0x14000019 },
+ { 0x0981d538, 0x24000001 },
+ { 0x0981d53b, 0x24000003 },
+ { 0x0981d540, 0x24000004 },
+ { 0x0901d546, 0x24000000 },
+ { 0x0981d54a, 0x24000006 },
+ { 0x0981d552, 0x14000019 },
+ { 0x0981d56c, 0x24000019 },
+ { 0x0981d586, 0x14000019 },
+ { 0x0981d5a0, 0x24000019 },
+ { 0x0981d5ba, 0x14000019 },
+ { 0x0981d5d4, 0x24000019 },
+ { 0x0981d5ee, 0x14000019 },
+ { 0x0981d608, 0x24000019 },
+ { 0x0981d622, 0x14000019 },
+ { 0x0981d63c, 0x24000019 },
+ { 0x0981d656, 0x14000019 },
+ { 0x0981d670, 0x24000019 },
+ { 0x0981d68a, 0x1400001b },
+ { 0x0981d6a8, 0x24000018 },
+ { 0x0901d6c1, 0x64000000 },
+ { 0x0981d6c2, 0x14000018 },
+ { 0x0901d6db, 0x64000000 },
+ { 0x0981d6dc, 0x14000005 },
+ { 0x0981d6e2, 0x24000018 },
+ { 0x0901d6fb, 0x64000000 },
+ { 0x0981d6fc, 0x14000018 },
+ { 0x0901d715, 0x64000000 },
+ { 0x0981d716, 0x14000005 },
+ { 0x0981d71c, 0x24000018 },
+ { 0x0901d735, 0x64000000 },
+ { 0x0981d736, 0x14000018 },
+ { 0x0901d74f, 0x64000000 },
+ { 0x0981d750, 0x14000005 },
+ { 0x0981d756, 0x24000018 },
+ { 0x0901d76f, 0x64000000 },
+ { 0x0981d770, 0x14000018 },
+ { 0x0901d789, 0x64000000 },
+ { 0x0981d78a, 0x14000005 },
+ { 0x0981d790, 0x24000018 },
+ { 0x0901d7a9, 0x64000000 },
+ { 0x0981d7aa, 0x14000018 },
+ { 0x0901d7c3, 0x64000000 },
+ { 0x0981d7c4, 0x14000005 },
+ { 0x0981d7ce, 0x34000031 },
+ { 0x16820000, 0x1c00a6d6 },
+ { 0x1682f800, 0x1c00021d },
+ { 0x090e0001, 0x04000000 },
+ { 0x098e0020, 0x0400005f },
+ { 0x1b8e0100, 0x300000ef },
+ { 0x098f0000, 0x0c00fffd },
+ { 0x09900000, 0x0c00fffd },
+};
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "token.h"
+
+namespace v8 { namespace internal {
+
+#ifdef DEBUG
+#define T(name, string, precedence) #name,
+const char* Token::name_[NUM_TOKENS] = {
+ TOKEN_LIST(T, T, IGNORE_TOKEN)
+};
+#undef T
+#endif
+
+
+#define T(name, string, precedence) string,
+const char* Token::string_[NUM_TOKENS] = {
+ TOKEN_LIST(T, T, IGNORE_TOKEN)
+};
+#undef T
+
+
+#define T(name, string, precedence) precedence,
+int8_t Token::precedence_[NUM_TOKENS] = {
+ TOKEN_LIST(T, T, IGNORE_TOKEN)
+};
+#undef T
+
+
+// A perfect (0 collision) hash table of keyword token values.
+
+// larger N will reduce the number of collisions (power of 2 for fast %)
+const unsigned int N = 128;
+// make this small since we have <= 256 tokens
+static uint8_t Hashtable[N];
+static bool IsInitialized = false;
+
+
+static unsigned int Hash(const char* s) {
+ // The following constants have been found using trial-and-error. If the
+ // keyword set changes, they may have to be recomputed (make them flags
+ // and play with the flag values). Increasing N is the simplest way to
+ // reduce the number of collisions.
+
+ // we must use at least 4 or more chars ('const' and 'continue' share
+ // 'con')
+ const unsigned int L = 5;
+ // smaller S tend to reduce the number of collisions
+ const unsigned int S = 4;
+ // make this a prime, or at least an odd number
+ const unsigned int M = 3;
+
+ unsigned int h = 0;
+ for (unsigned int i = 0; s[i] != '\0' && i < L; i++) {
+ h += (h << S) + s[i];
+ }
+ // unsigned int % by a power of 2 (otherwise this will not be a bit mask)
+ return h * M % N;
+}
+
+
+Token::Value Token::Lookup(const char* str) {
+ ASSERT(IsInitialized);
+ Value k = static_cast<Value>(Hashtable[Hash(str)]);
+ const char* s = string_[k];
+ ASSERT(s != NULL || k == IDENTIFIER);
+ if (s == NULL || strcmp(s, str) == 0) {
+ return k;
+ }
+ return IDENTIFIER;
+}
+
+
+#ifdef DEBUG
+// We need this function because C++ doesn't allow the expression
+// NULL == NULL, which is a result of macro expansion below. What
+// the hell?
+static bool IsNull(const char* s) {
+ return s == NULL;
+}
+#endif
+
+
+void Token::Initialize() {
+ if (IsInitialized) return;
+
+ // A list of all keywords, terminated by ILLEGAL.
+#define T(name, string, precedence) name,
+ static Value keyword[] = {
+ TOKEN_LIST(IGNORE_TOKEN, T, IGNORE_TOKEN)
+ ILLEGAL
+ };
+#undef T
+
+ // Assert that the keyword array contains the 25 keywords, 3 future
+ // reserved words (const, debugger, and native), and the 3 named literals
+ // defined by ECMA-262 standard.
+ ASSERT(ARRAY_SIZE(keyword) == 25 + 3 + 3 + 1); // +1 for ILLEGAL sentinel
+
+ // Initialize Hashtable.
+ ASSERT(NUM_TOKENS <= 256); // Hashtable contains uint8_t elements
+ for (unsigned int i = 0; i < N; i++) {
+ Hashtable[i] = IDENTIFIER;
+ }
+
+ // Insert all keywords into Hashtable.
+ int collisions = 0;
+ for (int i = 0; keyword[i] != ILLEGAL; i++) {
+ Value k = keyword[i];
+ unsigned int h = Hash(string_[k]);
+ if (Hashtable[h] != IDENTIFIER) collisions++;
+ Hashtable[h] = k;
+ }
+
+ if (collisions > 0) {
+ PrintF("%d collisions in keyword hashtable\n", collisions);
+ FATAL("Fix keyword lookup!");
+ }
+
+ IsInitialized = true;
+
+ // Verify hash table.
+#define T(name, string, precedence) \
+ ASSERT(IsNull(string) || Lookup(string) == IDENTIFIER);
+
+#define K(name, string, precedence) \
+ ASSERT(Lookup(string) == name);
+
+ TOKEN_LIST(T, K, IGNORE_TOKEN)
+
+#undef K
+#undef T
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TOKEN_H_
+#define V8_TOKEN_H_
+
+namespace v8 { namespace internal {
+
+// TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
+// same signature M(name, string, precedence), where name is the
+// symbolic token name, string is the corresponding syntactic symbol
+// (or NULL, for literals), and precedence is the precedence (or 0).
+// The parameters are invoked for token categories as follows:
+//
+// T: Non-keyword tokens
+// K: Keyword tokens
+// F: Future (reserved) keyword tokens
+
+// IGNORE_TOKEN is a convenience macro that can be supplied as
+// an argument (at any position) for a TOKEN_LIST call. It does
+// nothing with tokens belonging to the respective category.
+
+#define IGNORE_TOKEN(name, string, precedence)
+
+#define TOKEN_LIST(T, K, F) \
+ /* End of source indicator. */ \
+ T(EOS, "EOS", 0) \
+ \
+ /* Punctuators (ECMA-262, section 7.7, page 15). */ \
+ T(LPAREN, "(", 0) \
+ T(RPAREN, ")", 0) \
+ T(LBRACK, "[", 0) \
+ T(RBRACK, "]", 0) \
+ T(LBRACE, "{", 0) \
+ T(RBRACE, "}", 0) \
+ T(COLON, ":", 0) \
+ T(SEMICOLON, ";", 0) \
+ T(PERIOD, ".", 0) \
+ T(CONDITIONAL, "?", 3) \
+ T(INC, "++", 0) \
+ T(DEC, "--", 0) \
+ \
+ /* Assignment operators. */ \
+ /* IsAssignmentOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
+ T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
+ T(ASSIGN, "=", 2) \
+ T(ASSIGN_BIT_OR, "|=", 2) \
+ T(ASSIGN_BIT_XOR, "^=", 2) \
+ T(ASSIGN_BIT_AND, "&=", 2) \
+ T(ASSIGN_SHL, "<<=", 2) \
+ T(ASSIGN_SAR, ">>=", 2) \
+ T(ASSIGN_SHR, ">>>=", 2) \
+ T(ASSIGN_ADD, "+=", 2) \
+ T(ASSIGN_SUB, "-=", 2) \
+ T(ASSIGN_MUL, "*=", 2) \
+ T(ASSIGN_DIV, "/=", 2) \
+ T(ASSIGN_MOD, "%=", 2) \
+ \
+ /* Binary operators sorted by precedence. */ \
+ /* IsBinaryOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(COMMA, ",", 1) \
+ T(OR, "||", 4) \
+ T(AND, "&&", 5) \
+ T(BIT_OR, "|", 6) \
+ T(BIT_XOR, "^", 7) \
+ T(BIT_AND, "&", 8) \
+ T(SHL, "<<", 11) \
+ T(SAR, ">>", 11) \
+ T(SHR, ">>>", 11) \
+ T(ADD, "+", 12) \
+ T(SUB, "-", 12) \
+ T(MUL, "*", 13) \
+ T(DIV, "/", 13) \
+ T(MOD, "%", 13) \
+ \
+ /* Compare operators sorted by precedence. */ \
+ /* IsCompareOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(EQ, "==", 9) \
+ T(NE, "!=", 9) \
+ T(EQ_STRICT, "===", 9) \
+ T(NE_STRICT, "!==", 9) \
+ T(LT, "<", 10) \
+ T(GT, ">", 10) \
+ T(LTE, "<=", 10) \
+ T(GTE, ">=", 10) \
+ K(INSTANCEOF, "instanceof", 10) \
+ K(IN, "in", 10) \
+ \
+ /* Unary operators. */ \
+ /* IsUnaryOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(NOT, "!", 0) \
+ T(BIT_NOT, "~", 0) \
+ K(DELETE, "delete", 0) \
+ K(TYPEOF, "typeof", 0) \
+ K(VOID, "void", 0) \
+ \
+ /* Keywords (ECMA-262, section 7.5.2, page 13). */ \
+ K(BREAK, "break", 0) \
+ K(CASE, "case", 0) \
+ K(CATCH, "catch", 0) \
+ K(CONTINUE, "continue", 0) \
+ K(DEBUGGER, "debugger", 0) \
+ K(DEFAULT, "default", 0) \
+ /* DELETE */ \
+ K(DO, "do", 0) \
+ K(ELSE, "else", 0) \
+ K(FINALLY, "finally", 0) \
+ K(FOR, "for", 0) \
+ K(FUNCTION, "function", 0) \
+ K(IF, "if", 0) \
+ /* IN */ \
+ /* INSTANCEOF */ \
+ K(NEW, "new", 0) \
+ K(RETURN, "return", 0) \
+ K(SWITCH, "switch", 0) \
+ K(THIS, "this", 0) \
+ K(THROW, "throw", 0) \
+ K(TRY, "try", 0) \
+ /* TYPEOF */ \
+ K(VAR, "var", 0) \
+ /* VOID */ \
+ K(WHILE, "while", 0) \
+ K(WITH, "with", 0) \
+ \
+ /* Future reserved words (ECMA-262, section 7.5.3, page 14). */ \
+ F(ABSTRACT, "abstract", 0) \
+ F(BOOLEAN, "boolean", 0) \
+ F(BYTE, "byte", 0) \
+ F(CHAR, "char", 0) \
+ F(CLASS, "class", 0) \
+ K(CONST, "const", 0) \
+ F(DOUBLE, "double", 0) \
+ F(ENUM, "enum", 0) \
+ F(EXPORT, "export", 0) \
+ F(EXTENDS, "extends", 0) \
+ F(FINAL, "final", 0) \
+ F(FLOAT, "float", 0) \
+ F(GOTO, "goto", 0) \
+ F(IMPLEMENTS, "implements", 0) \
+ F(IMPORT, "import", 0) \
+ F(INT, "int", 0) \
+ F(INTERFACE, "interface", 0) \
+ F(LONG, "long", 0) \
+ K(NATIVE, "native", 0) \
+ F(PACKAGE, "package", 0) \
+ F(PRIVATE, "private", 0) \
+ F(PROTECTED, "protected", 0) \
+ F(PUBLIC, "public", 0) \
+ F(SHORT, "short", 0) \
+ F(STATIC, "static", 0) \
+ F(SUPER, "super", 0) \
+ F(SYNCHRONIZED, "synchronized", 0) \
+ F(THROWS, "throws", 0) \
+ F(TRANSIENT, "transient", 0) \
+ F(VOLATILE, "volatile", 0) \
+ \
+ /* Literals (ECMA-262, section 7.8, page 16). */ \
+ K(NULL_LITERAL, "null", 0) \
+ K(TRUE_LITERAL, "true", 0) \
+ K(FALSE_LITERAL, "false", 0) \
+ T(NUMBER, NULL, 0) \
+ T(STRING, NULL, 0) \
+ \
+ /* Identifiers (not keywords or future reserved words). */ \
+ T(IDENTIFIER, NULL, 0) \
+ \
+ /* Illegal token - not able to scan. */ \
+ T(ILLEGAL, "ILLEGAL", 0) \
+ \
+ /* Scanner-internal use only. */ \
+ T(COMMENT, NULL, 0)
+
+
+class Token {
+ public:
+ // All token values.
+#define T(name, string, precedence) name,
+ enum Value {
+ TOKEN_LIST(T, T, IGNORE_TOKEN)
+ NUM_TOKENS
+ };
+#undef T
+
+#ifdef DEBUG
+ // Returns a string corresponding to the C++ token name
+ // (e.g. "LT" for the token LT).
+ static const char* Name(Value tok) {
+ ASSERT(0 <= tok && tok < NUM_TOKENS);
+ return name_[tok];
+ }
+#endif
+
+ // Predicates
+ static bool IsAssignmentOp(Value tok) {
+ return INIT_VAR <= tok && tok <= ASSIGN_MOD;
+ }
+
+ static bool IsBinaryOp(Value op) {
+ return COMMA <= op && op <= MOD;
+ }
+
+ static bool IsCompareOp(Value op) {
+ return EQ <= op && op <= IN;
+ }
+
+ static bool IsBitOp(Value op) {
+ return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
+ }
+
+ static bool IsUnaryOp(Value op) {
+ return (NOT <= op && op <= VOID) || op == ADD || op == SUB;
+ }
+
+ static bool IsCountOp(Value op) {
+ return op == INC || op == DEC;
+ }
+
+ // Returns a string corresponding to the JS token string
+ // (.e., "<" for the token LT) or NULL if the token doesn't
+ // have a (unique) string (e.g. an IDENTIFIER).
+ static const char* String(Value tok) {
+ ASSERT(0 <= tok && tok < NUM_TOKENS);
+ return string_[tok];
+ }
+
+ // Returns the precedence > 0 for binary and compare
+ // operators; returns 0 otherwise.
+ static int Precedence(Value tok) {
+ ASSERT(0 <= tok && tok < NUM_TOKENS);
+ return precedence_[tok];
+ }
+
+ // Returns the keyword value if str is a keyword;
+ // returns IDENTIFIER otherwise. The class must
+ // have been initialized.
+ static Value Lookup(const char* str);
+
+ // Must be called once to initialize the class.
+ // Multiple calls are ignored.
+ static void Initialize();
+
+ private:
+#ifdef DEBUG
+ static const char* name_[NUM_TOKENS];
+#endif
+ static const char* string_[NUM_TOKENS];
+ static int8_t precedence_[NUM_TOKENS];
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TOKEN_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "debug.h"
+#include "execution.h"
+#include "string-stream.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(trace_exception, false,
+ "print stack trace when throwing exceptions");
+DEFINE_int(preallocated_stack_trace_memory, 0,
+ "preallocate some space to build stack traces. "
+ "Default is not to preallocate.");
+
+ThreadLocalTop Top::thread_local_;
+Mutex* Top::break_access_ = OS::CreateMutex();
+StackFrame::Id Top::break_frame_id_;
+int Top::break_count_;
+int Top::break_id_;
+
+NoAllocationStringAllocator* preallocated_message_space;
+
+Address top_addresses[] = {
+#define C(name) reinterpret_cast<Address>(Top::name()),
+ TOP_ADDRESS_LIST(C)
+#undef C
+ NULL
+};
+
+Address Top::get_address_from_id(Top::AddressId id) {
+ return top_addresses[id];
+}
+
+char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
+ ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
+ Iterate(v, thread);
+ return thread_storage + sizeof(ThreadLocalTop);
+}
+
+
+#define VISIT(field) v->VisitPointer(reinterpret_cast<Object**>(&(field)));
+
+void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+ VISIT(thread->pending_exception_);
+ VISIT(thread->security_context_);
+ VISIT(thread->context_);
+ VISIT(thread->scheduled_exception_);
+
+ for (v8::TryCatch* block = thread->try_catch_handler_;
+ block != NULL;
+ block = block->next_) {
+ VISIT(reinterpret_cast<Object*&>(block->exception_));
+ }
+
+ // Iterate over pointers on native execution stack.
+ for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+ it.frame()->Iterate(v);
+ }
+}
+#undef VISIT
+
+
+void Top::Iterate(ObjectVisitor* v) {
+ ThreadLocalTop* current_t = &thread_local_;
+ Iterate(v, current_t);
+}
+
+
+void Top::InitializeThreadLocal() {
+ thread_local_.c_entry_fp_ = 0;
+ thread_local_.handler_ = 0;
+ thread_local_.stack_is_cooked_ = false;
+ thread_local_.try_catch_handler_ = NULL;
+ thread_local_.security_context_ = NULL;
+ thread_local_.context_ = NULL;
+ thread_local_.external_caught_exception_ = false;
+ thread_local_.failed_access_check_callback_ = NULL;
+ clear_pending_exception();
+ clear_scheduled_exception();
+ thread_local_.save_context_ = NULL;
+}
+
+
+void Top::Initialize() {
+ InitializeThreadLocal();
+
+ break_frame_id_ = StackFrame::NO_ID;
+ break_count_ = 0;
+ break_id_ = 0;
+
+ if (FLAG_preallocated_stack_trace_memory != 0) {
+ if (FLAG_preallocated_stack_trace_memory < StringStream::kInitialCapacity)
+ FLAG_preallocated_stack_trace_memory = StringStream::kInitialCapacity;
+ // 3/4 is allocated to the message and 1/4 is allocated to the work area.
+ preallocated_message_space =
+ new NoAllocationStringAllocator(
+ FLAG_preallocated_stack_trace_memory * 3 / 4);
+ PreallocatedStorage::Init(FLAG_preallocated_stack_trace_memory / 4);
+ }
+}
+
+
+void Top::RegisterTryCatchHandler(v8::TryCatch* that) {
+ thread_local_.try_catch_handler_ = that;
+}
+
+
+void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
+ ASSERT(thread_local_.try_catch_handler_ == that);
+ thread_local_.try_catch_handler_ = that->next_;
+}
+
+
+void Top::new_break(StackFrame::Id break_frame_id) {
+ ExecutionAccess access;
+ break_frame_id_ = break_frame_id;
+ break_id_ = ++break_count_;
+}
+
+
+void Top::set_break(StackFrame::Id break_frame_id, int break_id) {
+ ExecutionAccess access;
+ break_frame_id_ = break_frame_id;
+ break_id_ = break_id;
+}
+
+
+bool Top::check_break(int break_id) {
+ ExecutionAccess access;
+ return break_id == break_id_;
+}
+
+
+bool Top::is_break() {
+ ExecutionAccess access;
+ return is_break_no_lock();
+}
+
+
+bool Top::is_break_no_lock() {
+ return break_id_ != 0;
+}
+
+StackFrame::Id Top::break_frame_id() {
+ ExecutionAccess access;
+ return break_frame_id_;
+}
+
+
+int Top::break_id() {
+ ExecutionAccess access;
+ return break_id_;
+}
+
+
+void Top::MarkCompactPrologue() {
+ MarkCompactPrologue(&thread_local_);
+}
+
+
+void Top::MarkCompactPrologue(char* data) {
+ MarkCompactPrologue(reinterpret_cast<ThreadLocalTop*>(data));
+}
+
+
+void Top::MarkCompactPrologue(ThreadLocalTop* thread) {
+ StackFrame::CookFramesForThread(thread);
+}
+
+
+void Top::MarkCompactEpilogue(char* data) {
+ MarkCompactEpilogue(reinterpret_cast<ThreadLocalTop*>(data));
+}
+
+
+void Top::MarkCompactEpilogue() {
+ MarkCompactEpilogue(&thread_local_);
+}
+
+
+void Top::MarkCompactEpilogue(ThreadLocalTop* thread) {
+ StackFrame::UncookFramesForThread(thread);
+}
+
+
+static int stack_trace_nesting_level = 0;
+static StringStream* incomplete_message = NULL;
+
+
+Handle<String> Top::StackTrace() {
+ if (stack_trace_nesting_level == 0) {
+ stack_trace_nesting_level++;
+ HeapStringAllocator allocator;
+ StringStream::ClearMentionedObjectCache();
+ StringStream accumulator(&allocator);
+ incomplete_message = &accumulator;
+ PrintStack(&accumulator);
+ Handle<String> stack_trace = accumulator.ToString();
+ incomplete_message = NULL;
+ stack_trace_nesting_level = 0;
+ return stack_trace;
+ } else if (stack_trace_nesting_level == 1) {
+ stack_trace_nesting_level++;
+ OS::PrintError(
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
+ OS::PrintError(
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ incomplete_message->OutputToStdOut();
+ return Factory::empty_symbol();
+ } else {
+ OS::Abort();
+ // Unreachable
+ return Factory::empty_symbol();
+ }
+}
+
+
+void Top::PrintStack() {
+ if (stack_trace_nesting_level == 0) {
+ stack_trace_nesting_level++;
+
+ StringAllocator* allocator;
+ if (FLAG_preallocated_stack_trace_memory == 0) {
+ allocator = new HeapStringAllocator();
+ } else {
+ allocator = preallocated_message_space;
+ }
+
+ NativeAllocationChecker allocation_checker(
+ FLAG_preallocated_stack_trace_memory == 0 ?
+ NativeAllocationChecker::ALLOW :
+ NativeAllocationChecker::DISALLOW);
+
+ StringStream::ClearMentionedObjectCache();
+ StringStream accumulator(allocator);
+ incomplete_message = &accumulator;
+ PrintStack(&accumulator);
+ accumulator.OutputToStdOut();
+ accumulator.Log();
+ incomplete_message = NULL;
+ stack_trace_nesting_level = 0;
+ if (FLAG_preallocated_stack_trace_memory == 0) {
+ delete allocator;
+ }
+ } else if (stack_trace_nesting_level == 1) {
+ stack_trace_nesting_level++;
+ OS::PrintError(
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
+ OS::PrintError(
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ incomplete_message->OutputToStdOut();
+ }
+}
+
+
+static void PrintFrames(StringStream* accumulator,
+ StackFrame::PrintMode mode) {
+ StackFrameIterator it;
+ for (int i = 0; !it.done(); it.Advance()) {
+ it.frame()->Print(accumulator, mode, i++);
+ }
+}
+
+
+void Top::PrintStack(StringStream* accumulator) {
+ // The MentionedObjectCache is not GC-proof at the moment.
+ AssertNoAllocation nogc;
+ ASSERT(StringStream::IsMentionedObjectCacheClear());
+
+ // Avoid printing anything if there are no frames.
+ if (c_entry_fp(GetCurrentThread()) == 0) return;
+
+ accumulator->Add(
+ "\n==== Stack trace ============================================\n\n");
+ PrintFrames(accumulator, StackFrame::OVERVIEW);
+
+ accumulator->Add(
+ "\n==== Details ================================================\n\n");
+ PrintFrames(accumulator, StackFrame::DETAILS);
+
+ accumulator->PrintMentionedObjectCache();
+ accumulator->Add("=====================\n\n");
+}
+
+
+void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) {
+ ASSERT(thread_local_.failed_access_check_callback_ == NULL);
+ thread_local_.failed_access_check_callback_ = callback;
+}
+
+
+void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+ if (!thread_local_.failed_access_check_callback_) return;
+
+ ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(Top::security_context());
+ // The callers of this method are not expecting a GC.
+ AssertNoAllocation no_gc;
+
+ // Get the data object from access check info.
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ Object* info = constructor->shared()->function_data();
+ if (info == Heap::undefined_value()) return;
+
+ Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+ if (data_obj == Heap::undefined_value()) return;
+
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+ thread_local_.failed_access_check_callback_(
+ v8::Utils::ToLocal(receiver_handle),
+ type,
+ v8::Utils::ToLocal(data));
+}
+
+bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
+ ASSERT(receiver->IsAccessCheckNeeded());
+ // Check for compatibility between the security tokens in the
+ // current security context and the accessed object.
+ ASSERT(Top::security_context());
+ // The callers of this method are not expecting a GC.
+ AssertNoAllocation no_gc;
+
+ // During bootstrapping, callback functions are not enabled yet.
+ if (Bootstrapper::IsActive()) return true;
+
+ if (receiver->IsJSGlobalObject()) {
+ JSGlobalObject* global = JSGlobalObject::cast(receiver);
+ JSGlobalObject* current =
+ JSGlobalObject::cast(Top::security_context()->global());
+ if (current->security_token() == global->security_token()) return true;
+ }
+
+ // Get named access check callback
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ Object* info = constructor->shared()->function_data();
+ if (info == Heap::undefined_value()) return false;
+
+ Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+ if (data_obj == Heap::undefined_value()) return false;
+
+ Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
+ v8::NamedSecurityCallback callback =
+ v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+
+ if (!callback) return false;
+
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<Object> key_handle(key);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+ LOG(ApiNamedSecurityCheck(key));
+ bool result = false;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = callback(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(key_handle),
+ type,
+ v8::Utils::ToLocal(data));
+ }
+ return result;
+}
+
+
+bool Top::MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type) {
+ ASSERT(receiver->IsAccessCheckNeeded());
+ // Check for compatibility between the security tokens in the
+ // current security context and the accessed object.
+ ASSERT(Top::security_context());
+ // The callers of this method are not expecting a GC.
+ AssertNoAllocation no_gc;
+
+ // During bootstrapping, callback functions are not enabled yet.
+ if (Bootstrapper::IsActive()) return true;
+
+ if (receiver->IsJSGlobalObject()) {
+ JSGlobalObject* global = JSGlobalObject::cast(receiver);
+ JSGlobalObject* current =
+ JSGlobalObject::cast(Top::security_context()->global());
+ if (current->security_token() == global->security_token()) return true;
+ }
+
+ // Get indexed access check callback
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ Object* info = constructor->shared()->function_data();
+ if (info == Heap::undefined_value()) return false;
+
+ Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+ if (data_obj == Heap::undefined_value()) return false;
+
+ Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
+ v8::IndexedSecurityCallback callback =
+ v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+
+ if (!callback) return false;
+
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+ LOG(ApiIndexedSecurityCheck(index));
+ bool result = false;
+ {
+ // Leaving JavaScript.
+ VMState state(OTHER);
+ result = callback(v8::Utils::ToLocal(receiver_handle),
+ index,
+ type,
+ v8::Utils::ToLocal(data));
+ }
+ return result;
+}
+
+
+Failure* Top::StackOverflow() {
+ HandleScope scope;
+ Handle<String> key = Factory::stack_overflow_symbol();
+ Handle<JSObject> boilerplate =
+ Handle<JSObject>::cast(
+ GetProperty(Top::security_context_builtins(), key));
+ Handle<Object> exception = Copy(boilerplate);
+ // TODO(1240995): To avoid having to call JavaScript code to compute
+ // the message for stack overflow exceptions which is very likely to
+ // double fault with another stack overflow exception, we use a
+ // precomputed message. This is somewhat problematic in that it
+ // doesn't use ReportUncaughtException to determine the location
+ // from where the exception occurred. It should probably be
+ // reworked.
+ static const char* kMessage =
+ "Uncaught RangeError: Maximum call stack size exceeded";
+ DoThrow(*exception, NULL, kMessage, false);
+ return Failure::Exception();
+}
+
+
+Failure* Top::Throw(Object* exception, MessageLocation* location) {
+ DoThrow(exception, location, NULL, false);
+ return Failure::Exception();
+}
+
+
+Failure* Top::ReThrow(Object* exception, MessageLocation* location) {
+ DoThrow(exception, location, NULL, true);
+ return Failure::Exception();
+}
+
+
+void Top::ScheduleThrow(Object* exception) {
+ // When scheduling a throw we first throw the exception to get the
+ // error reporting if it is uncaught before rescheduling it.
+ Throw(exception);
+ thread_local_.scheduled_exception_ = pending_exception();
+ thread_local_.external_caught_exception_ = false;
+ clear_pending_exception();
+}
+
+
+Object* Top::PromoteScheduledException() {
+ Object* thrown = scheduled_exception();
+ clear_scheduled_exception();
+ // Re-throw the exception to avoid getting repeated error reporting.
+ return ReThrow(thrown);
+}
+
+
+// TODO(1233523): Get rid of this hackish abstraction once all
+// JavaScript frames have a function associated with them.
+
+// NOTE: The stack trace frame iterator is an iterator that only
+// traverse proper JavaScript frames; that is JavaScript frames that
+// have proper JavaScript functions. This excludes the problematic
+// functions in runtime.js.
+class StackTraceFrameIterator: public JavaScriptFrameIterator {
+ public:
+ StackTraceFrameIterator() {
+ if (!done() && !frame()->function()->IsJSFunction()) Advance();
+ }
+
+ void Advance() {
+ while (true) {
+ JavaScriptFrameIterator::Advance();
+ if (done()) return;
+ if (frame()->function()->IsJSFunction()) return;
+ }
+ }
+};
+
+
+void Top::PrintCurrentStackTrace(FILE* out) {
+ StackTraceFrameIterator it;
+ while (!it.done()) {
+ HandleScope scope;
+ // Find code position if recorded in relocation info.
+ JavaScriptFrame* frame = it.frame();
+ int pos = frame->FindCode()->SourcePosition(frame->pc());
+ Handle<Object> pos_obj(Smi::FromInt(pos));
+ // Fetch function and receiver.
+ Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+ Handle<Object> recv(frame->receiver());
+ // Advance to the next JavaScript frame and determine if the
+ // current frame is the top-level frame.
+ it.Advance();
+ Handle<Object> is_top_level = it.done()
+ ? Factory::true_value()
+ : Factory::false_value();
+ // Generate and print strack trace line.
+ Handle<String> line =
+ Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+ if (line->length() > 0) {
+ line->PrintOn(out);
+ fprintf(out, "\n");
+ }
+ }
+}
+
+
+void Top::ReportUncaughtException(Handle<Object> exception,
+ MessageLocation* location,
+ Handle<String> stack_trace) {
+ MessageLocation computed_location(empty_script(), -1, -1);
+ if (location == NULL) {
+ location = &computed_location;
+
+ StackTraceFrameIterator it;
+ if (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* fun = JSFunction::cast(frame->function());
+ Object* script = fun->shared()->script();
+ if (script->IsScript() &&
+ !(Script::cast(script)->source()->IsUndefined())) {
+ int pos = frame->FindCode()->SourcePosition(frame->pc());
+ // Compute the location from the function and the reloc info.
+ Handle<Script> casted_script(Script::cast(script));
+ computed_location = MessageLocation(casted_script, pos, pos + 1);
+ }
+ }
+ }
+
+ // Report the uncaught exception.
+ MessageHandler::ReportMessage("uncaught_exception",
+ location,
+ HandleVector<Object>(&exception, 1));
+
+ // Optionally, report the stack trace separately.
+ if (!stack_trace.is_null()) {
+ MessageHandler::ReportMessage("stack_trace",
+ location,
+ HandleVector<String>(&stack_trace, 1));
+ }
+}
+
+
+bool Top::ShouldReportException(bool* is_caught_externally) {
+ StackHandler* handler =
+ StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
+
+ // Determine if we have an external exception handler and get the
+ // address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ bool has_external_handler = (thread_local_.try_catch_handler_ != NULL);
+ Address external_handler_address =
+ reinterpret_cast<Address>(thread_local_.try_catch_handler_);
+
+ // NOTE: The stack is assumed to grown towards lower addresses. If
+ // the handler is at a higher address than the external address it
+ // means that it is below it on the stack.
+
+ // Find the top-most try-catch or try-finally handler.
+ while (handler != NULL && handler->is_entry()) {
+ handler = handler->next();
+ }
+
+ // The exception has been externally caught if and only if there is
+ // an external handler which is above any JavaScript try-catch or
+ // try-finally handlers.
+ *is_caught_externally = has_external_handler &&
+ (handler == NULL || handler->address() > external_handler_address);
+
+ // Find the top-most try-catch handler.
+ while (handler != NULL && !handler->is_try_catch()) {
+ handler = handler->next();
+ }
+
+ // If we have a try-catch handler then the exception is caught in
+ // JavaScript code.
+ bool is_uncaught_by_js = (handler == NULL);
+
+ // If there is no external try-catch handler, we report the
+ // exception if it isn't caught by JavaScript code.
+ if (!has_external_handler) return is_uncaught_by_js;
+
+ if (is_uncaught_by_js || handler->address() > external_handler_address) {
+ // Only report the exception if the external handler is verbose.
+ return thread_local_.try_catch_handler_->is_verbose_;
+ } else {
+ // Report the exception if it isn't caught by JavaScript code.
+ return is_uncaught_by_js;
+ }
+}
+
+
+void Top::DoThrow(Object* exception,
+ MessageLocation* location,
+ const char* message,
+ bool is_rethrow) {
+ ASSERT(!has_pending_exception());
+ ASSERT(!external_caught_exception());
+
+ HandleScope scope;
+ Handle<Object> exception_handle(exception);
+
+ bool is_caught_externally = false;
+ bool report_exception = (exception != Failure::OutOfMemoryException()) &&
+ ShouldReportException(&is_caught_externally);
+ if (is_rethrow) report_exception = false;
+
+ // If the exception is caught externally, we store it in the
+ // try/catch handler. The C code can find it later and process it if
+ // necessary.
+ if (is_caught_externally) {
+ thread_local_.try_catch_handler_->exception_ =
+ reinterpret_cast<void*>(*exception_handle);
+ }
+
+ // Notify debugger of exception.
+ Debugger::OnException(exception_handle, report_exception);
+
+ if (report_exception) {
+ if (message != NULL) {
+ MessageHandler::ReportMessage(message);
+ } else {
+ Handle<String> stack_trace;
+ if (FLAG_trace_exception) stack_trace = StackTrace();
+ ReportUncaughtException(exception_handle, location, stack_trace);
+ }
+ }
+ thread_local_.external_caught_exception_ = is_caught_externally;
+ // NOTE: Notifying the debugger or reporting the exception may have caused
+ // new exceptions. For now, we just ignore that and set the pending exception
+ // to the original one.
+ set_pending_exception(*exception_handle);
+}
+
+
+void Top::TraceException(bool flag) {
+ FLAG_trace_exception = flag;
+}
+
+
+bool Top::optional_reschedule_exception(bool is_bottom_call) {
+ if (!is_out_of_memory() &&
+ (thread_local_.external_caught_exception_ || is_bottom_call)) {
+ thread_local_.external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
+ } else {
+ thread_local_.scheduled_exception_ = pending_exception();
+ clear_pending_exception();
+ return true;
+ }
+}
+
+
+bool Top::is_out_of_memory() {
+ if (has_pending_exception()) {
+ Object* e = pending_exception();
+ if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+ return true;
+ }
+ }
+ if (has_scheduled_exception()) {
+ Object* e = scheduled_exception();
+ if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+Handle<Context> Top::global_context() {
+ GlobalObject* global = thread_local_.context_->global();
+ return Handle<Context>(global->global_context());
+}
+
+
+Object* Top::LookupSpecialFunction(JSObject* receiver,
+ JSObject* prototype,
+ JSFunction* function) {
+ if (receiver->IsJSArray()) {
+ FixedArray* table = context()->global_context()->special_function_table();
+ for (int index = 0; index < table->length(); index +=3) {
+ if ((prototype == table->get(index)) &&
+ (function == table->get(index+1))) {
+ return table->get(index+2);
+ }
+ }
+ }
+ return Heap::undefined_value();
+}
+
+
+char* Top::ArchiveThread(char* to) {
+ memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(thread_local_));
+ Initialize();
+ return to + sizeof(thread_local_);
+}
+
+
+char* Top::RestoreThread(char* from) {
+ memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(thread_local_));
+ return from + sizeof(thread_local_);
+}
+
+
+ExecutionAccess::ExecutionAccess() {
+ Top::break_access_->Lock();
+}
+
+
+ExecutionAccess::~ExecutionAccess() {
+ Top::break_access_->Unlock();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TOP_H_
+#define V8_TOP_H_
+
+#include "frames-inl.h"
+
+namespace v8 { namespace internal {
+
+
+#define RETURN_IF_SCHEDULED_EXCEPTION() \
+ if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
+
+// Top has static variables used for JavaScript execution.
+
+class SaveContext; // Forward decleration.
+
+class ThreadLocalTop BASE_EMBEDDED {
+ public:
+ Context* security_context_;
+ // The context where the current execution method is created and for variable
+ // lookups.
+ Context* context_;
+ Object* pending_exception_;
+ // Use a separate value for scheduled exceptions to preserve the
+ // invariants that hold about pending_exception. We may want to
+ // unify them later.
+ Object* scheduled_exception_;
+ bool external_caught_exception_;
+ v8::TryCatch* try_catch_handler_;
+ SaveContext* save_context_;
+
+ // Stack.
+ Address c_entry_fp_; // the frame pointer of the top c entry frame
+ Address handler_; // try-blocks are chained through the stack
+ bool stack_is_cooked_;
+ inline bool stack_is_cooked() { return stack_is_cooked_; }
+ inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
+
+ // Generated code scratch locations.
+ int32_t formal_count_;
+
+ // Call back function to report unsafe JS accesses.
+ v8::FailedAccessCheckCallback failed_access_check_callback_;
+};
+
+#define TOP_ADDRESS_LIST(C) \
+ C(handler_address) \
+ C(c_entry_fp_address) \
+ C(context_address) \
+ C(pending_exception_address) \
+ C(external_caught_exception_address) \
+ C(security_context_address)
+
+class Top {
+ public:
+ enum AddressId {
+#define C(name) k_##name,
+ TOP_ADDRESS_LIST(C)
+#undef C
+ k_top_address_count
+ };
+
+ static Address get_address_from_id(AddressId id);
+
+ // Access to the security context from which JS execution started.
+ // In a browser world, it is the JS context of the frame which initiated
+ // JavaScript execution.
+ static Context* security_context() { return thread_local_.security_context_; }
+ static void set_security_context(Context* context) {
+ ASSERT(context == NULL || context->IsGlobalContext());
+ thread_local_.security_context_ = context;
+ }
+ static Context** security_context_address() {
+ return &thread_local_.security_context_;
+ }
+
+ // Access to top context (where the current function object was created).
+ static Context* context() { return thread_local_.context_; }
+ static void set_context(Context* context) {
+ thread_local_.context_ = context;
+ }
+ static Context** context_address() { return &thread_local_.context_; }
+
+ static SaveContext* save_context() {return thread_local_.save_context_; }
+ static void set_save_context(SaveContext* save) {
+ thread_local_.save_context_ = save;
+ }
+
+ // Interface to pending exception.
+ static Object* pending_exception() {
+ ASSERT(has_pending_exception());
+ return thread_local_.pending_exception_;
+ }
+ static bool external_caught_exception() {
+ return thread_local_.external_caught_exception_;
+ }
+ static void set_pending_exception(Object* exception) {
+ thread_local_.pending_exception_ = exception;
+ }
+ static void clear_pending_exception() {
+ thread_local_.pending_exception_ = Heap::the_hole_value();
+ }
+
+ static Object** pending_exception_address() {
+ return &thread_local_.pending_exception_;
+ }
+ static bool has_pending_exception() {
+ return !thread_local_.pending_exception_->IsTheHole();
+ }
+ static v8::TryCatch* try_catch_handler() {
+ return thread_local_.try_catch_handler_;
+ }
+ // This method is called by the api after operations that may throw
+ // exceptions. If an exception was thrown and not handled by an external
+ // handler the exception is scheduled to be rethrown when we return to running
+ // JavaScript code. If an exception is scheduled true is returned.
+ static bool optional_reschedule_exception(bool is_bottom_call);
+
+ static bool* external_caught_exception_address() {
+ return &thread_local_.external_caught_exception_;
+ }
+
+ static Object* scheduled_exception() {
+ ASSERT(has_scheduled_exception());
+ return thread_local_.scheduled_exception_;
+ }
+ static bool has_scheduled_exception() {
+ return !thread_local_.scheduled_exception_->IsTheHole();
+ }
+ static void clear_scheduled_exception() {
+ thread_local_.scheduled_exception_ = Heap::the_hole_value();
+ }
+
+ // Tells whether the current context has experienced an out of memory
+ // exception.
+ static bool is_out_of_memory();
+
+ // JS execution stack (see frames.h).
+ static Address c_entry_fp(ThreadLocalTop* thread) {
+ return thread->c_entry_fp_;
+ }
+ static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
+
+ static inline Address* c_entry_fp_address() {
+ return &thread_local_.c_entry_fp_;
+ }
+ static inline Address* handler_address() { return &thread_local_.handler_; }
+
+ // Generated code scratch locations.
+ static void* formal_count_address() { return &thread_local_.formal_count_; }
+
+ static void new_break(StackFrame::Id break_frame_id);
+ static void set_break(StackFrame::Id break_frame_id, int break_id);
+ static bool check_break(int break_id);
+ static bool is_break();
+ static bool is_break_no_lock();
+ static StackFrame::Id break_frame_id();
+ static int break_id();
+
+ static void MarkCompactPrologue();
+ static void MarkCompactEpilogue();
+ static void MarkCompactPrologue(char* archived_thread_data);
+ static void MarkCompactEpilogue(char* archived_thread_data);
+ static void PrintCurrentStackTrace(FILE* out);
+ static void PrintStackTrace(FILE* out, char* thread_data);
+ static void PrintStack(StringStream* accumulator);
+ static void PrintStack();
+ static Handle<String> StackTrace();
+
+ // Returns if the top context may access the given global object. If
+ // the result is false, the pending exception is guaranteed to be
+ // set.
+ static bool MayNamedAccess(JSObject* receiver,
+ Object* key,
+ v8::AccessType type);
+ static bool MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type);
+
+ static void SetFailedAccessCheckCallback(
+ v8::FailedAccessCheckCallback callback);
+ static void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
+
+ // Exception throwing support. The caller should use the result
+ // of Throw() as its return value.
+ static Failure* Throw(Object* exception, MessageLocation* location = NULL);
+ // Re-throw an exception. This involves no error reporting since
+ // error reporting was handled when the exception was thrown
+ // originally.
+ static Failure* ReThrow(Object* exception, MessageLocation* location = NULL);
+ static void ScheduleThrow(Object* exception);
+
+ // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
+ static Object* PromoteScheduledException();
+ static void DoThrow(Object* exception,
+ MessageLocation* location,
+ const char* message,
+ bool is_rethrow);
+ static bool ShouldReportException(bool* is_caught_externally);
+ static void ReportUncaughtException(Handle<Object> exception,
+ MessageLocation* location,
+ Handle<String> stack_trace);
+
+ // Override command line flag.
+ static void TraceException(bool flag);
+
+ // Out of resource exception helpers.
+ static Failure* StackOverflow();
+
+ // Administration
+ static void Initialize();
+ static void Iterate(ObjectVisitor* v);
+ static void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
+ static char* Iterate(ObjectVisitor* v, char* t);
+
+ static Handle<JSObject> global() {
+ return Handle<JSObject>(context()->global());
+ }
+ static Handle<Context> global_context();
+
+ static Handle<JSBuiltinsObject> builtins() {
+ return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
+ }
+ static Handle<JSBuiltinsObject> security_context_builtins() {
+ return Handle<JSBuiltinsObject>(
+ thread_local_.security_context_->builtins());
+ }
+
+ static Object* LookupSpecialFunction(JSObject* receiver,
+ JSObject* prototype,
+ JSFunction* value);
+
+ static void RegisterTryCatchHandler(v8::TryCatch* that);
+ static void UnregisterTryCatchHandler(v8::TryCatch* that);
+
+#define TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ static Handle<type> name() { \
+ return Handle<type>(context()->global_context()->name()); \
+ }
+ GLOBAL_CONTEXT_FIELDS(TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR)
+#undef TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR
+
+ static inline ThreadLocalTop* GetCurrentThread() { return &thread_local_; }
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
+ static char* ArchiveThread(char* to);
+ static char* RestoreThread(char* from);
+
+ private:
+ // The context that initiated this JS execution.
+ static ThreadLocalTop thread_local_;
+ static void InitializeThreadLocal();
+ static void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
+ static void MarkCompactPrologue(ThreadLocalTop* archived_thread_data);
+ static void MarkCompactEpilogue(ThreadLocalTop* archived_thread_data);
+
+ // Debug.
+ // Mutex for serializing access to break control structures.
+ static Mutex* break_access_;
+
+ // ID of the frame where execution is stopped by debugger.
+ static StackFrame::Id break_frame_id_;
+
+ // Counter to create unique id for each debug break.
+ static int break_count_;
+
+ // Current debug break, 0 if running.
+ static int break_id_;
+
+ friend class SaveContext;
+ friend class AssertNoContextChange;
+ friend class ExecutionAccess;
+
+ static void FillCache();
+};
+
+
+class SaveContext BASE_EMBEDDED {
+ public:
+ SaveContext() :
+ context_(Top::context()),
+ security_context_(Top::security_context()),
+ prev_(Top::save_context()) {
+ Top::set_save_context(this);
+ }
+
+ ~SaveContext() {
+ Top::set_context(*context_);
+ Top::set_security_context(*security_context_);
+ Top::set_save_context(prev_);
+ }
+
+ Handle<Context> context() { return context_; }
+ Handle<Context> security_context() { return security_context_; }
+ SaveContext* prev() { return prev_; }
+
+ private:
+ Handle<Context> context_;
+ Handle<Context> security_context_;
+ SaveContext* prev_;
+};
+
+
+class AssertNoContextChange BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ AssertNoContextChange() :
+ context_(Top::context()),
+ security_context_(Top::security_context()) {
+ }
+
+ ~AssertNoContextChange() {
+ ASSERT(Top::context() == *context_);
+ ASSERT(Top::security_context() == *security_context_);
+ }
+
+ private:
+ HandleScope scope_;
+ Handle<Context> context_;
+ Handle<Context> security_context_;
+#else
+ public:
+ AssertNoContextChange() { }
+#endif
+};
+
+
+class ExecutionAccess BASE_EMBEDDED {
+ public:
+ ExecutionAccess();
+ ~ExecutionAccess();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TOP_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef __UNIBROW_INL_H__
+#define __UNIBROW_INL_H__
+
+#include "unicode.h"
+
+namespace unibrow {
+
+template <class T, int s> bool Predicate<T, s>::get(uchar code_point) {
+ CacheEntry entry = entries_[code_point & kMask];
+ if (entry.code_point_ == code_point) return entry.value_;
+ return CalculateValue(code_point);
+}
+
+template <class T, int s> bool Predicate<T, s>::CalculateValue(
+ uchar code_point) {
+ bool result = T::Is(code_point);
+ entries_[code_point & kMask] = CacheEntry(code_point, result);
+ return result;
+}
+
+template <class T, int s> int Mapping<T, s>::get(uchar c, uchar n,
+ uchar* result) {
+ CacheEntry entry = entries_[c & kMask];
+ if (entry.code_point_ == c) {
+ if (entry.offset_ == 0) {
+ return 0;
+ } else {
+ result[0] = c + entry.offset_;
+ return 1;
+ }
+ } else {
+ return CalculateValue(c, n, result);
+ }
+}
+
+template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
+ uchar* result) {
+ bool allow_caching = true;
+ int length = T::Convert(c, n, result, &allow_caching);
+ if (allow_caching) {
+ if (length == 1) {
+ entries_[c & kMask] = CacheEntry(c, result[0] - c);
+ return 1;
+ } else {
+ entries_[c & kMask] = CacheEntry(c, 0);
+ return 0;
+ }
+ } else {
+ return length;
+ }
+}
+
+
+unsigned Utf8::Encode(char* str, uchar c) {
+ static const int kMask = ~(1 << 6);
+ if (c <= kMaxOneByteChar) {
+ str[0] = c;
+ return 1;
+ } else if (c <= kMaxTwoByteChar) {
+ str[0] = 0xC0 | (c >> 6);
+ str[1] = 0x80 | (c & kMask);
+ return 2;
+ } else if (c <= kMaxThreeByteChar) {
+ str[0] = 0xE0 | (c >> 12);
+ str[1] = 0x80 | ((c >> 6) & kMask);
+ str[2] = 0x80 | (c & kMask);
+ return 3;
+ } else {
+ str[0] = 0xF0 | (c >> 18);
+ str[1] = 0x80 | ((c >> 12) & kMask);
+ str[2] = 0x80 | ((c >> 6) & kMask);
+ str[3] = 0x80 | (c & kMask);
+ return 4;
+ }
+}
+
+
+uchar Utf8::ValueOf(const byte* bytes, unsigned length, unsigned* cursor) {
+ if (length <= 0) return kBadChar;
+ byte first = bytes[0];
+ // Characters between 0000 and 0007F are encoded as a single character
+ if (first <= kMaxOneByteChar) {
+ *cursor += 1;
+ return first;
+ }
+ return CalculateValue(bytes, length, cursor);
+}
+
+unsigned Utf8::Length(uchar c) {
+ if (c <= kMaxOneByteChar) {
+ return 1;
+ } else if (c <= kMaxTwoByteChar) {
+ return 2;
+ } else if (c <= kMaxThreeByteChar) {
+ return 3;
+ } else {
+ return 4;
+ }
+}
+
+uchar CharacterStream::GetNext() {
+ uchar result = DecodeCharacter(buffer_, &cursor_);
+ if (remaining_ == 1) {
+ cursor_ = 0;
+ FillBuffer();
+ } else {
+ remaining_--;
+ }
+ return result;
+}
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define IF_LITTLE(expr) expr
+#define IF_BIG(expr) ((void) 0)
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define IF_LITTLE(expr) ((void) 0)
+#define IF_BIG(expr) expr
+#else
+#warning Unknown byte ordering
+#endif
+
+bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer,
+ unsigned capacity, unsigned& offset) {
+ if (offset >= capacity) return false;
+ buffer[offset] = c;
+ offset += 1;
+ return true;
+}
+
+bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer,
+ unsigned capacity, unsigned& offset) {
+ unsigned aligned = (offset + 0x3) & ~0x3;
+ if ((aligned + sizeof(uchar)) > capacity)
+ return false;
+ if (offset == aligned) {
+ IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80);
+ IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31));
+ } else {
+ buffer[offset] = 0x80;
+ IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8);
+ IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c);
+ }
+ offset = aligned + sizeof(uchar);
+ return true;
+}
+
+bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
+ unsigned& offset) {
+ if (c <= Utf8::kMaxOneByteChar) {
+ return EncodeAsciiCharacter(c, buffer, capacity, offset);
+ } else {
+ return EncodeNonAsciiCharacter(c, buffer, capacity, offset);
+ }
+}
+
+uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) {
+ byte b = buffer[*offset];
+ if (b <= Utf8::kMaxOneByteChar) {
+ (*offset)++;
+ return b;
+ } else {
+ unsigned aligned = (*offset + 0x3) & ~0x3;
+ *offset = aligned + sizeof(uchar);
+ IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8);
+ IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) &
+ ~(1 << 31));
+ }
+}
+
+#undef IF_LITTLE
+#undef IF_BIG
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::FillBuffer() {
+ buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Rewind() {
+ Reset(input_);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Reset(unsigned position, I input) {
+ input_ = input;
+ remaining_ = 0;
+ cursor_ = 0;
+ offset_ = position;
+ buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Reset(I input) {
+ Reset(0, input);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Seek(unsigned position) {
+ offset_ = position;
+ buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
+}
+
+template <unsigned s>
+Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length)
+ : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data,
+ length)) {
+}
+
+} // namespace unibrow
+
+#endif // __UNIBROW_INL_H__
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file was generated at 2008-06-12 16:11:05.556081
+
+#include "unicode-inl.h"
+#include <cstdlib>
+#include <cstdio>
+
+namespace unibrow {
+
+/**
+ * \file
+ * Implementations of functions for working with unicode.
+ */
+
+typedef signed short int16_t;
+typedef unsigned short uint16_t;
+
+// All access to the character table should go through this function.
+template <int D>
+static inline uchar TableGet(const uint16_t* table, int index) {
+ return table[D * index];
+}
+
+static inline uchar GetEntry(uint16_t entry) {
+ return entry & 0x7fff;
+}
+
+static inline bool IsStart(uint16_t entry) {
+ return (entry & (1 << 15)) != 0;
+}
+
+/**
+ * Look up a character in the unicode table using a mix of binary and
+ * interpolation search. For a uniformly distributed array
+ * interpolation search beats binary search by a wide margin. However,
+ * in this case interpolation search degenerates because of some very
+ * high values in the lower end of the table so this function uses a
+ * combination. The average number of steps to look up the information
+ * about a character is around 10, slightly higher if there is no
+ * information available about the character.
+ */
+static bool LookupPredicate(const uint16_t* table, uint16_t size, uchar chr) {
+ static const int kEntryDist = 1;
+ uint16_t value = chr & 0x7fff;
+ unsigned int low = 0;
+ unsigned int high = size - 1;
+ while (high != low) {
+ unsigned int mid = low + ((high - low) >> 1);
+ uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
+ // If we've found an entry less than or equal to this one, and the
+ // next one is not also less than this one, we've arrived.
+ if ((current_value <= value) &&
+ (mid + 1 == size ||
+ GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
+ low = mid;
+ break;
+ } else if (current_value < value) {
+ low = mid + 1;
+ } else if (current_value > value) {
+ // If we've just checked the bottom-most value and it's not
+ // the one we're looking for, we're done.
+ if (mid == 0) break;
+ high = mid - 1;
+ }
+ }
+ uint16_t field = TableGet<kEntryDist>(table, low);
+ return (GetEntry(field) == value) ||
+ (GetEntry(field) < value && IsStart(field));
+}
+
+struct MultiCharacterSpecialCase {
+ uint16_t length;
+ uchar chars[kMaxCaseConvertedSize];
+};
+
+// Look up the mapping for the given character in the specified table,
+// which is of the specified length and uses the specified special case
+// mapping for multi-char mappings. The next parameter is the character
+// following the one to map. The result will be written in to the result
+// buffer and the number of characters written will be returned. Finally,
+// if the allow_caching_ptr is non-null then false will be stored in
+// it if the result contains multiple characters or depends on the
+// context.
+static int LookupMapping(const uint16_t* table, uint16_t size,
+ const MultiCharacterSpecialCase* multi_chars, uchar chr, uchar next,
+ uchar* result, bool* allow_caching_ptr) {
+ static const int kEntryDist = 2;
+ uint16_t value = chr & 0x7fff;
+ unsigned int low = 0;
+ unsigned int high = size - 1;
+ while (high != low) {
+ unsigned int mid = low + ((high - low) >> 1);
+ uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
+ // If we've found an entry less than or equal to this one, and the next one
+ // is not also less than this one, we've arrived.
+ if ((current_value <= value) &&
+ (mid + 1 == size ||
+ GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
+ low = mid;
+ break;
+ } else if (current_value < value) {
+ low = mid + 1;
+ } else if (current_value > value) {
+ // If we've just checked the bottom-most value and it's not
+ // the one we're looking for, we're done.
+ if (mid == 0) break;
+ high = mid - 1;
+ }
+ }
+ uint16_t field = TableGet<kEntryDist>(table, low);
+ bool found = (GetEntry(field) == value) ||
+ (GetEntry(field) < value && IsStart(field));
+ if (found) {
+ int16_t value = table[2 * low + 1];
+ if (value == 0) {
+ // 0 means not present
+ return 0;
+ } else if ((value & 3) == 0) {
+ // Low bits 0 means a constant offset from the given character.
+ result[0] = chr + (value >> 2);
+ return 1;
+ } else if ((value & 3) == 1) {
+ // Low bits 1 means a special case mapping
+ if (allow_caching_ptr) *allow_caching_ptr = false;
+ const MultiCharacterSpecialCase& mapping = multi_chars[value >> 2];
+ for (int i = 0; i < mapping.length; i++)
+ result[i] = mapping.chars[i];
+ return mapping.length;
+ } else {
+ // Low bits 2 means a really really special case
+ if (allow_caching_ptr) *allow_caching_ptr = false;
+ // The cases of this switch are defined in unicode.py in the
+ // really_special_cases mapping.
+ switch (value >> 2) {
+ case 1:
+ // Really special case 1: upper case sigma. This letter
+ // converts to two different lower case sigmas depending on
+ // whether or not it occurs at the end of a word.
+ if (next != 0 && Letter::Is(next)) {
+ result[0] = 0x03C3;
+ } else {
+ result[0] = 0x03C2;
+ }
+ return 1;
+ default:
+ return 0;
+ }
+ return -1;
+ }
+ } else {
+ return 0;
+ }
+}
+
+uchar Utf8::CalculateValue(const byte* str,
+ unsigned length,
+ unsigned* cursor) {
+ static const uchar kMaxOneByteChar = 0x7F;
+ static const uchar kMaxTwoByteChar = 0x7FF;
+ static const uchar kMaxThreeByteChar = 0xFFFF;
+ static const uchar kMaxFourByteChar = 0x1FFFFF;
+
+ // We only get called for non-ascii characters.
+ if (length == 1) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ int first = str[0];
+ int second = str[1] ^ 0x80;
+ if (second & 0xC0) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ if (first < 0xE0) {
+ if (first < 0xC0) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ uchar l = ((first << 6) | second) & kMaxTwoByteChar;
+ if (l <= kMaxOneByteChar) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ *cursor += 2;
+ return l;
+ }
+ if (length == 2) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ int third = str[2] ^ 0x80;
+ if (third & 0xC0) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ if (first < 0xF0) {
+ uchar l = ((((first << 6) | second) << 6) | third) & kMaxThreeByteChar;
+ if (l <= kMaxTwoByteChar) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ *cursor += 3;
+ return l;
+ }
+ if (length == 3) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ int fourth = str[3] ^ 0x80;
+ if (fourth & 0xC0) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ if (first < 0xF8) {
+ uchar l = (((((first << 6 | second) << 6) | third) << 6) | fourth) &
+ kMaxFourByteChar;
+ if (l <= kMaxThreeByteChar) {
+ *cursor += 1;
+ return kBadChar;
+ }
+ *cursor += 4;
+ return l;
+ }
+ *cursor += 1;
+ return kBadChar;
+}
+
+const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
+ unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
+ unsigned offset = *offset_ptr;
+ // Bail out early if we've reached the end of the string.
+ if (offset == str.length()) {
+ *chars_read_ptr = 0;
+ return NULL;
+ }
+ const byte* data = reinterpret_cast<const byte*>(str.data());
+ if (data[offset] <= kMaxOneByteChar) {
+ // The next character is an ascii char so we scan forward over
+ // the following ascii characters and return the next pure ascii
+ // substring
+ const byte* result = data + offset;
+ offset++;
+ while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar))
+ offset++;
+ *chars_read_ptr = offset - *offset_ptr;
+ *offset_ptr = offset;
+ return result;
+ } else {
+ // The next character is non-ascii so we just fill the buffer
+ unsigned cursor = 0;
+ unsigned chars_read = 0;
+ while (offset < str.length()) {
+ uchar c = data[offset];
+ if (c <= kMaxOneByteChar) {
+ // Fast case for ascii characters
+ if (!CharacterStream::EncodeAsciiCharacter(c,
+ buffer,
+ capacity,
+ cursor))
+ break;
+ offset += 1;
+ } else {
+ unsigned chars = 0;
+ c = Utf8::ValueOf(data + offset, str.length() - offset, &chars);
+ if (!CharacterStream::EncodeNonAsciiCharacter(c,
+ buffer,
+ capacity,
+ cursor))
+ break;
+ offset += chars;
+ }
+ chars_read++;
+ }
+ *offset_ptr = offset;
+ *chars_read_ptr = chars_read;
+ return buffer;
+ }
+}
+
+unsigned CharacterStream::Length() {
+ unsigned result = 0;
+ while (has_more()) {
+ result++;
+ GetNext();
+ }
+ Rewind();
+ return result;
+}
+
+void CharacterStream::Seek(unsigned position) {
+ Rewind();
+ for (unsigned i = 0; i < position; i++) {
+ GetNext();
+ }
+}
+
+// Uppercase: point.category == 'Lu'
+
+static const uint16_t kUppercaseTable0Size = 509;
+static const uint16_t kUppercaseTable0[509] = { 32833, 90, 32960, 214, 32984, 222, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 313, 315, 317, 319, 321, 323, 325, 327, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 33144, 377, 379, 381, 33153, 386, 388, 33158, 391, 33161, 395, 33166, 401, 33171, 404, 33174, 408, 33180, 413, 33183, 416, 418, 420, 33190, 423, 425, 428, 33198, 431, 33201, 435, 437, 33207, 440, 444, 452, 455, 458, 461, 463, 465, 467, 469, 471, 473, 475, 478, 480, 482, 484, 486, 488, 490, 492, 494, 497, 500, 33270, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 33338, 571, 33341, 574, 577, 33347, 582, 584, 586, 588, 590, 902, 33672, 906, 908, 33678, 911, 33681, 929, 33699, 939, 33746, 980, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1012, 1015, 33785, 1018, 33789, 1071, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1162, 1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, 1212, 1214, 33984, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1232, 1234, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 34097, 1366, 37024, 4293, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, 7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, 7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, 7922, 7924, 7926, 7928, 40712, 7951, 40728, 7965, 40744, 7983, 40760, 7999, 40776, 8013, 8025, 8027, 8029, 8031, 40808, 8047, 40888, 8123, 40904, 8139, 40920, 8155, 40936, 8172, 40952, 8187, 8450, 8455, 41227, 8461, 41232, 8466, 8469, 41241, 8477, 8484, 8486, 8488, 41258, 8493, 41264, 8499, 41278, 8511, 8517, 8579, 44032, 11310, 11360, 44130, 11364, 11367, 11369, 11371, 11381, 11392, 11394, 11396, 11398, 11400, 11402, 11404, 11406, 11408, 11410, 11412, 11414, 11416, 11418, 11420, 11422, 11424, 11426, 11428, 11430, 11432, 11434, 11436, 11438, 11440, 11442, 11444, 11446, 11448, 11450, 11452, 11454, 11456, 11458, 11460, 11462, 11464, 11466, 11468, 11470, 11472, 11474, 11476, 11478, 11480, 11482, 11484, 11486, 11488, 11490 };
+static const uint16_t kUppercaseTable1Size = 2;
+static const uint16_t kUppercaseTable1[2] = { 65313, 32570 };
+static const uint16_t kUppercaseTable2Size = 2;
+static const uint16_t kUppercaseTable2[2] = { 33792, 1063 };
+static const uint16_t kUppercaseTable3Size = 58;
+static const uint16_t kUppercaseTable3[58] = { 54272, 21529, 54324, 21581, 54376, 21633, 21660, 54430, 21663, 21666, 54437, 21670, 54441, 21676, 54446, 21685, 54480, 21737, 54532, 21765, 54535, 21770, 54541, 21780, 54550, 21788, 54584, 21817, 54587, 21822, 54592, 21828, 21830, 54602, 21840, 54636, 21893, 54688, 21945, 54740, 21997, 54792, 22049, 54844, 22101, 54896, 22153, 54952, 22208, 55010, 22266, 55068, 22324, 55126, 22382, 55184, 22440, 22474 };
+bool Uppercase::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kUppercaseTable0,
+ kUppercaseTable0Size,
+ c);
+ case 1: return LookupPredicate(kUppercaseTable1,
+ kUppercaseTable1Size,
+ c);
+ case 2: return LookupPredicate(kUppercaseTable2,
+ kUppercaseTable2Size,
+ c);
+ case 3: return LookupPredicate(kUppercaseTable3,
+ kUppercaseTable3Size,
+ c);
+ default: return false;
+ }
+}
+
+// Lowercase: point.category == 'Ll'
+
+static const uint16_t kLowercaseTable0Size = 528;
+static const uint16_t kLowercaseTable0[528] = { 32865, 122, 170, 181, 186, 32991, 246, 33016, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 33079, 312, 314, 316, 318, 320, 322, 324, 326, 33096, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 378, 380, 33150, 384, 387, 389, 392, 33164, 397, 402, 405, 33177, 411, 414, 417, 419, 421, 424, 33194, 427, 429, 432, 436, 438, 33209, 442, 33213, 447, 454, 457, 460, 462, 464, 466, 468, 470, 472, 474, 33244, 477, 479, 481, 483, 485, 487, 489, 491, 493, 33263, 496, 499, 501, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 33331, 569, 572, 33343, 576, 578, 583, 585, 587, 589, 33359, 659, 33429, 687, 33659, 893, 912, 33708, 974, 33744, 977, 33749, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 33775, 1011, 1013, 1016, 33787, 1020, 33840, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1218, 1220, 1222, 1224, 1226, 1228, 33998, 1231, 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 34145, 1415, 40192, 7467, 40290, 7543, 40313, 7578, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 40597, 7835, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929, 40704, 7943, 40720, 7957, 40736, 7975, 40752, 7991, 40768, 8005, 40784, 8023, 40800, 8039, 40816, 8061, 40832, 8071, 40848, 8087, 40864, 8103, 40880, 8116, 40886, 8119, 8126, 40898, 8132, 40902, 8135, 40912, 8147, 40918, 8151, 40928, 8167, 40946, 8180, 40950, 8183, 8305, 8319, 8458, 41230, 8463, 8467, 8495, 8500, 8505, 41276, 8509, 41286, 8521, 8526, 8580, 44080, 11358, 11361, 44133, 11366, 11368, 11370, 11372, 11380, 44150, 11383, 11393, 11395, 11397, 11399, 11401, 11403, 11405, 11407, 11409, 11411, 11413, 11415, 11417, 11419, 11421, 11423, 11425, 11427, 11429, 11431, 11433, 11435, 11437, 11439, 11441, 11443, 11445, 11447, 11449, 11451, 11453, 11455, 11457, 11459, 11461, 11463, 11465, 11467, 11469, 11471, 11473, 11475, 11477, 11479, 11481, 11483, 11485, 11487, 11489, 44259, 11492, 44288, 11557 };
+static const uint16_t kLowercaseTable1Size = 6;
+static const uint16_t kLowercaseTable1[6] = { 64256, 31494, 64275, 31511, 65345, 32602 };
+static const uint16_t kLowercaseTable2Size = 2;
+static const uint16_t kLowercaseTable2[2] = { 33832, 1103 };
+static const uint16_t kLowercaseTable3Size = 54;
+static const uint16_t kLowercaseTable3[54] = { 54298, 21555, 54350, 21588, 54358, 21607, 54402, 21659, 54454, 21689, 21691, 54461, 21699, 54469, 21711, 54506, 21763, 54558, 21815, 54610, 21867, 54662, 21919, 54714, 21971, 54766, 22023, 54818, 22075, 54870, 22127, 54922, 22181, 54978, 22234, 55004, 22241, 55036, 22292, 55062, 22299, 55094, 22350, 55120, 22357, 55152, 22408, 55178, 22415, 55210, 22466, 55236, 22473, 22475 };
+bool Lowercase::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kLowercaseTable0,
+ kLowercaseTable0Size,
+ c);
+ case 1: return LookupPredicate(kLowercaseTable1,
+ kLowercaseTable1Size,
+ c);
+ case 2: return LookupPredicate(kLowercaseTable2,
+ kLowercaseTable2Size,
+ c);
+ case 3: return LookupPredicate(kLowercaseTable3,
+ kLowercaseTable3Size,
+ c);
+ default: return false;
+ }
+}
+
+// Letter: point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo' ]
+
+static const uint16_t kLetterTable0Size = 475;
+static const uint16_t kLetterTable0[475] = { 32833, 90, 32865, 122, 170, 181, 186, 32960, 214, 32984, 246, 33016, 705, 33478, 721, 33504, 740, 750, 33658, 893, 902, 33672, 906, 908, 33678, 929, 33699, 974, 33744, 1013, 33783, 1153, 33930, 1299, 34097, 1366, 1369, 34145, 1415, 34256, 1514, 34288, 1522, 34337, 1594, 34368, 1610, 34414, 1647, 34417, 1747, 1749, 34533, 1766, 34542, 1775, 34554, 1788, 1791, 1808, 34578, 1839, 34637, 1901, 34688, 1957, 1969, 34762, 2026, 34804, 2037, 2042, 35076, 2361, 2365, 2384, 35160, 2401, 35195, 2431, 35205, 2444, 35215, 2448, 35219, 2472, 35242, 2480, 2482, 35254, 2489, 2493, 2510, 35292, 2525, 35295, 2529, 35312, 2545, 35333, 2570, 35343, 2576, 35347, 2600, 35370, 2608, 35378, 2611, 35381, 2614, 35384, 2617, 35417, 2652, 2654, 35442, 2676, 35461, 2701, 35471, 2705, 35475, 2728, 35498, 2736, 35506, 2739, 35509, 2745, 2749, 2768, 35552, 2785, 35589, 2828, 35599, 2832, 35603, 2856, 35626, 2864, 35634, 2867, 35637, 2873, 2877, 35676, 2909, 35679, 2913, 2929, 2947, 35717, 2954, 35726, 2960, 35730, 2965, 35737, 2970, 2972, 35742, 2975, 35747, 2980, 35752, 2986, 35758, 3001, 35845, 3084, 35854, 3088, 35858, 3112, 35882, 3123, 35893, 3129, 35936, 3169, 35973, 3212, 35982, 3216, 35986, 3240, 36010, 3251, 36021, 3257, 3261, 3294, 36064, 3297, 36101, 3340, 36110, 3344, 36114, 3368, 36138, 3385, 36192, 3425, 36229, 3478, 36250, 3505, 36275, 3515, 3517, 36288, 3526, 36353, 3632, 36402, 3635, 36416, 3654, 36481, 3714, 3716, 36487, 3720, 3722, 3725, 36500, 3735, 36505, 3743, 36513, 3747, 3749, 3751, 36522, 3755, 36525, 3760, 36530, 3763, 3773, 36544, 3780, 3782, 36572, 3805, 3840, 36672, 3911, 36681, 3946, 36744, 3979, 36864, 4129, 36899, 4135, 36905, 4138, 36944, 4181, 37024, 4293, 37072, 4346, 4348, 37120, 4441, 37215, 4514, 37288, 4601, 37376, 4680, 37450, 4685, 37456, 4694, 4696, 37466, 4701, 37472, 4744, 37514, 4749, 37520, 4784, 37554, 4789, 37560, 4798, 4800, 37570, 4805, 37576, 4822, 37592, 4880, 37650, 4885, 37656, 4954, 37760, 5007, 37792, 5108, 37889, 5740, 38511, 5750, 38529, 5786, 38560, 5866, 38656, 5900, 38670, 5905, 38688, 5937, 38720, 5969, 38752, 5996, 38766, 6000, 38784, 6067, 6103, 6108, 38944, 6263, 39040, 6312, 39168, 6428, 39248, 6509, 39280, 6516, 39296, 6569, 39361, 6599, 39424, 6678, 39685, 6963, 39749, 6987, 40192, 7615, 40448, 7835, 40608, 7929, 40704, 7957, 40728, 7965, 40736, 8005, 40776, 8013, 40784, 8023, 8025, 8027, 8029, 40799, 8061, 40832, 8116, 40886, 8124, 8126, 40898, 8132, 40902, 8140, 40912, 8147, 40918, 8155, 40928, 8172, 40946, 8180, 40950, 8188, 8305, 8319, 41104, 8340, 8450, 8455, 41226, 8467, 8469, 41241, 8477, 8484, 8486, 8488, 41258, 8493, 41263, 8505, 41276, 8511, 41285, 8521, 8526, 41347, 8580, 44032, 11310, 44080, 11358, 44128, 11372, 44148, 11383, 44160, 11492, 44288, 11557, 44336, 11621, 11631, 44416, 11670, 44448, 11686, 44456, 11694, 44464, 11702, 44472, 11710, 44480, 11718, 44488, 11726, 44496, 11734, 44504, 11742, 45061, 12294, 45105, 12341, 45115, 12348, 45121, 12438, 45213, 12447, 45217, 12538, 45308, 12543, 45317, 12588, 45361, 12686, 45472, 12727, 45552, 12799, 13312, 19893, 19968 };
+static const uint16_t kLetterTable1Size = 67;
+static const uint16_t kLetterTable1[67] = { 8123, 40960, 9356, 42775, 10010, 43008, 10241, 43011, 10245, 43015, 10250, 43020, 10274, 43072, 10355, 11264, 22435, 63744, 31277, 64048, 31338, 64112, 31449, 64256, 31494, 64275, 31511, 31517, 64287, 31528, 64298, 31542, 64312, 31548, 31550, 64320, 31553, 64323, 31556, 64326, 31665, 64467, 32061, 64848, 32143, 64914, 32199, 65008, 32251, 65136, 32372, 65142, 32508, 65313, 32570, 65345, 32602, 65382, 32702, 65474, 32711, 65482, 32719, 65490, 32727, 65498, 32732 };
+static const uint16_t kLetterTable2Size = 48;
+static const uint16_t kLetterTable2[48] = { 32768, 11, 32781, 38, 32808, 58, 32828, 61, 32831, 77, 32848, 93, 32896, 250, 33536, 798, 33584, 832, 33602, 841, 33664, 925, 33696, 963, 33736, 975, 33792, 1181, 34816, 2053, 2056, 34826, 2101, 34871, 2104, 2108, 2111, 35072, 2325, 2560, 35344, 2579, 35349, 2583, 35353, 2611, 40960, 9070 };
+static const uint16_t kLetterTable3Size = 57;
+static const uint16_t kLetterTable3[57] = { 54272, 21588, 54358, 21660, 54430, 21663, 21666, 54437, 21670, 54441, 21676, 54446, 21689, 21691, 54461, 21699, 54469, 21765, 54535, 21770, 54541, 21780, 54550, 21788, 54558, 21817, 54587, 21822, 54592, 21828, 21830, 54602, 21840, 54610, 22181, 54952, 22208, 54978, 22234, 55004, 22266, 55036, 22292, 55062, 22324, 55094, 22350, 55120, 22382, 55152, 22408, 55178, 22440, 55210, 22466, 55236, 22475 };
+static const uint16_t kLetterTable4Size = 1;
+static const uint16_t kLetterTable4[1] = { 0 };
+static const uint16_t kLetterTable5Size = 3;
+static const uint16_t kLetterTable5[3] = { 9942, 63488, 31261 };
+bool Letter::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kLetterTable0,
+ kLetterTable0Size,
+ c);
+ case 1: return LookupPredicate(kLetterTable1,
+ kLetterTable1Size,
+ c);
+ case 2: return LookupPredicate(kLetterTable2,
+ kLetterTable2Size,
+ c);
+ case 3: return LookupPredicate(kLetterTable3,
+ kLetterTable3Size,
+ c);
+ case 4: return LookupPredicate(kLetterTable4,
+ kLetterTable4Size,
+ c);
+ case 5: return LookupPredicate(kLetterTable5,
+ kLetterTable5Size,
+ c);
+ default: return false;
+ }
+}
+
+// Space: point.category == 'Zs'
+
+static const uint16_t kSpaceTable0Size = 9;
+static const uint16_t kSpaceTable0[9] = { 32, 160, 5760, 6158, 40960, 8202, 8239, 8287, 12288 };
+bool Space::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kSpaceTable0,
+ kSpaceTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// Titlecase: point.category == 'Lt'
+
+static const uint16_t kTitlecaseTable0Size = 13;
+static const uint16_t kTitlecaseTable0[13] = { 453, 456, 459, 498, 40840, 8079, 40856, 8095, 40872, 8111, 8124, 8140, 8188 };
+bool Titlecase::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kTitlecaseTable0,
+ kTitlecaseTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// Number: point.category in ['Nd', 'Nl', 'No' ]
+
+static const uint16_t kNumberTable0Size = 86;
+static const uint16_t kNumberTable0[86] = { 32816, 57, 32946, 179, 185, 32956, 190, 34400, 1641, 34544, 1785, 34752, 1993, 35174, 2415, 35302, 2543, 35316, 2553, 35430, 2671, 35558, 2799, 35686, 2927, 35814, 3058, 35942, 3183, 36070, 3311, 36198, 3439, 36432, 3673, 36560, 3801, 36640, 3891, 36928, 4169, 37737, 4988, 38638, 5872, 38880, 6121, 38896, 6137, 38928, 6169, 39238, 6479, 39376, 6617, 39760, 7001, 8304, 41076, 8313, 41088, 8329, 41299, 8578, 42080, 9371, 42218, 9471, 42870, 10131, 11517, 12295, 45089, 12329, 45112, 12346, 45458, 12693, 45600, 12841, 45649, 12895, 45696, 12937, 45745, 12991 };
+static const uint16_t kNumberTable1Size = 2;
+static const uint16_t kNumberTable1[2] = { 65296, 32537 };
+static const uint16_t kNumberTable2Size = 19;
+static const uint16_t kNumberTable2[19] = { 33031, 307, 33088, 376, 394, 33568, 803, 833, 842, 33745, 981, 33952, 1193, 35094, 2329, 35392, 2631, 41984, 9314 };
+static const uint16_t kNumberTable3Size = 4;
+static const uint16_t kNumberTable3[4] = { 54112, 21361, 55246, 22527 };
+bool Number::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kNumberTable0,
+ kNumberTable0Size,
+ c);
+ case 1: return LookupPredicate(kNumberTable1,
+ kNumberTable1Size,
+ c);
+ case 2: return LookupPredicate(kNumberTable2,
+ kNumberTable2Size,
+ c);
+ case 3: return LookupPredicate(kNumberTable3,
+ kNumberTable3Size,
+ c);
+ default: return false;
+ }
+}
+
+// DecimalDigit: point.category == 'Nd'
+
+static const uint16_t kDecimalDigitTable0Size = 44;
+static const uint16_t kDecimalDigitTable0[44] = { 32816, 57, 34400, 1641, 34544, 1785, 34752, 1993, 35174, 2415, 35302, 2543, 35430, 2671, 35558, 2799, 35686, 2927, 35814, 3055, 35942, 3183, 36070, 3311, 36198, 3439, 36432, 3673, 36560, 3801, 36640, 3881, 36928, 4169, 38880, 6121, 38928, 6169, 39238, 6479, 39376, 6617, 39760, 7001 };
+static const uint16_t kDecimalDigitTable1Size = 2;
+static const uint16_t kDecimalDigitTable1[2] = { 65296, 32537 };
+static const uint16_t kDecimalDigitTable2Size = 2;
+static const uint16_t kDecimalDigitTable2[2] = { 33952, 1193 };
+static const uint16_t kDecimalDigitTable3Size = 2;
+static const uint16_t kDecimalDigitTable3[2] = { 55246, 22527 };
+bool DecimalDigit::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kDecimalDigitTable0,
+ kDecimalDigitTable0Size,
+ c);
+ case 1: return LookupPredicate(kDecimalDigitTable1,
+ kDecimalDigitTable1Size,
+ c);
+ case 2: return LookupPredicate(kDecimalDigitTable2,
+ kDecimalDigitTable2Size,
+ c);
+ case 3: return LookupPredicate(kDecimalDigitTable3,
+ kDecimalDigitTable3Size,
+ c);
+ default: return false;
+ }
+}
+
+// Ideographic: 'Id' in point.properties
+
+static const uint16_t kIdeographicTable0Size = 9;
+static const uint16_t kIdeographicTable0[9] = { 45062, 12295, 45089, 12329, 45112, 12346, 13312, 19893, 19968 };
+static const uint16_t kIdeographicTable1Size = 5;
+static const uint16_t kIdeographicTable1[5] = { 8123, 63744, 31277, 64112, 31449 };
+static const uint16_t kIdeographicTable4Size = 1;
+static const uint16_t kIdeographicTable4[1] = { 0 };
+static const uint16_t kIdeographicTable5Size = 3;
+static const uint16_t kIdeographicTable5[3] = { 9942, 63488, 31261 };
+bool Ideographic::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kIdeographicTable0,
+ kIdeographicTable0Size,
+ c);
+ case 1: return LookupPredicate(kIdeographicTable1,
+ kIdeographicTable1Size,
+ c);
+ case 4: return LookupPredicate(kIdeographicTable4,
+ kIdeographicTable4Size,
+ c);
+ case 5: return LookupPredicate(kIdeographicTable5,
+ kIdeographicTable5Size,
+ c);
+ default: return false;
+ }
+}
+
+// WhiteSpace: 'Ws' in point.properties
+
+static const uint16_t kWhiteSpaceTable0Size = 14;
+static const uint16_t kWhiteSpaceTable0[14] = { 32777, 13, 32, 133, 160, 5760, 6158, 40960, 8202, 41000, 8233, 8239, 8287, 12288 };
+bool WhiteSpace::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kWhiteSpaceTable0,
+ kWhiteSpaceTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// HexDigit: 'Hd' in point.properties
+
+static const uint16_t kHexDigitTable0Size = 6;
+static const uint16_t kHexDigitTable0[6] = { 32816, 57, 32833, 70, 32865, 102 };
+static const uint16_t kHexDigitTable1Size = 6;
+static const uint16_t kHexDigitTable1[6] = { 65296, 32537, 65313, 32550, 65345, 32582 };
+bool HexDigit::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kHexDigitTable0,
+ kHexDigitTable0Size,
+ c);
+ case 1: return LookupPredicate(kHexDigitTable1,
+ kHexDigitTable1Size,
+ c);
+ default: return false;
+ }
+}
+
+// AsciiHexDigit: 'Ah' in point.properties
+
+static const uint16_t kAsciiHexDigitTable0Size = 6;
+static const uint16_t kAsciiHexDigitTable0[6] = { 32816, 57, 32833, 70, 32865, 102 };
+bool AsciiHexDigit::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kAsciiHexDigitTable0,
+ kAsciiHexDigitTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// BidiControl: 'Bc' in point.properties
+
+static const uint16_t kBidiControlTable0Size = 4;
+static const uint16_t kBidiControlTable0[4] = { 40974, 8207, 41002, 8238 };
+bool BidiControl::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kBidiControlTable0,
+ kBidiControlTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// JoinControl: 'Jc' in point.properties
+
+static const uint16_t kJoinControlTable0Size = 2;
+static const uint16_t kJoinControlTable0[2] = { 40972, 8205 };
+bool JoinControl::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kJoinControlTable0,
+ kJoinControlTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// Dash: 'Dh' in point.properties
+
+static const uint16_t kDashTable0Size = 14;
+static const uint16_t kDashTable0[14] = { 45, 1418, 1470, 6150, 40976, 8213, 8275, 8315, 8331, 8722, 11799, 12316, 12336, 12448 };
+static const uint16_t kDashTable1Size = 5;
+static const uint16_t kDashTable1[5] = { 65073, 32306, 32344, 32355, 32525 };
+bool Dash::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kDashTable0,
+ kDashTable0Size,
+ c);
+ case 1: return LookupPredicate(kDashTable1,
+ kDashTable1Size,
+ c);
+ default: return false;
+ }
+}
+
+// Hyphen: 'Hp' in point.properties
+
+static const uint16_t kHyphenTable0Size = 8;
+static const uint16_t kHyphenTable0[8] = { 45, 173, 1418, 6150, 40976, 8209, 11799, 12539 };
+static const uint16_t kHyphenTable1Size = 3;
+static const uint16_t kHyphenTable1[3] = { 32355, 32525, 32613 };
+bool Hyphen::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kHyphenTable0,
+ kHyphenTable0Size,
+ c);
+ case 1: return LookupPredicate(kHyphenTable1,
+ kHyphenTable1Size,
+ c);
+ default: return false;
+ }
+}
+
+// LineTerminator: 'Lt' in point.properties
+
+static const uint16_t kLineTerminatorTable0Size = 4;
+static const uint16_t kLineTerminatorTable0[4] = { 10, 13, 41000, 8233 };
+bool LineTerminator::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kLineTerminatorTable0,
+ kLineTerminatorTable0Size,
+ c);
+ default: return false;
+ }
+}
+
+// CombiningMark: point.category in ['Mn', 'Mc']
+
+static const uint16_t kCombiningMarkTable0Size = 214;
+static const uint16_t kCombiningMarkTable0[214] = { 33536, 879, 33923, 1158, 34193, 1469, 1471, 34241, 1474, 34244, 1477, 1479, 34320, 1557, 34379, 1630, 1648, 34518, 1756, 34527, 1764, 34535, 1768, 34538, 1773, 1809, 34608, 1866, 34726, 1968, 34795, 2035, 35073, 2307, 2364, 35134, 2381, 35153, 2388, 35170, 2403, 35201, 2435, 2492, 35262, 2500, 35271, 2504, 35275, 2509, 2519, 35298, 2531, 35329, 2563, 2620, 35390, 2626, 35399, 2632, 35403, 2637, 35440, 2673, 35457, 2691, 2748, 35518, 2757, 35527, 2761, 35531, 2765, 35554, 2787, 35585, 2819, 2876, 35646, 2883, 35655, 2888, 35659, 2893, 35670, 2903, 2946, 35774, 3010, 35782, 3016, 35786, 3021, 3031, 35841, 3075, 35902, 3140, 35910, 3144, 35914, 3149, 35925, 3158, 35970, 3203, 3260, 36030, 3268, 36038, 3272, 36042, 3277, 36053, 3286, 36066, 3299, 36098, 3331, 36158, 3395, 36166, 3400, 36170, 3405, 3415, 36226, 3459, 3530, 36303, 3540, 3542, 36312, 3551, 36338, 3571, 3633, 36404, 3642, 36423, 3662, 3761, 36532, 3769, 36539, 3772, 36552, 3789, 36632, 3865, 3893, 3895, 3897, 36670, 3903, 36721, 3972, 36742, 3975, 36752, 3991, 36761, 4028, 4038, 36908, 4146, 36918, 4153, 36950, 4185, 4959, 38674, 5908, 38706, 5940, 38738, 5971, 38770, 6003, 38838, 6099, 6109, 38923, 6157, 6313, 39200, 6443, 39216, 6459, 39344, 6592, 39368, 6601, 39447, 6683, 39680, 6916, 39732, 6980, 39787, 7027, 40384, 7626, 40446, 7679, 41168, 8412, 8417, 41189, 8431, 45098, 12335, 45209, 12442 };
+static const uint16_t kCombiningMarkTable1Size = 10;
+static const uint16_t kCombiningMarkTable1[10] = { 10242, 10246, 10251, 43043, 10279, 31518, 65024, 32271, 65056, 32291 };
+static const uint16_t kCombiningMarkTable2Size = 9;
+static const uint16_t kCombiningMarkTable2[9] = { 35329, 2563, 35333, 2566, 35340, 2575, 35384, 2618, 2623 };
+static const uint16_t kCombiningMarkTable3Size = 12;
+static const uint16_t kCombiningMarkTable3[12] = { 53605, 20841, 53613, 20850, 53627, 20866, 53637, 20875, 53674, 20909, 53826, 21060 };
+static const uint16_t kCombiningMarkTable28Size = 2;
+static const uint16_t kCombiningMarkTable28[2] = { 33024, 495 };
+bool CombiningMark::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kCombiningMarkTable0,
+ kCombiningMarkTable0Size,
+ c);
+ case 1: return LookupPredicate(kCombiningMarkTable1,
+ kCombiningMarkTable1Size,
+ c);
+ case 2: return LookupPredicate(kCombiningMarkTable2,
+ kCombiningMarkTable2Size,
+ c);
+ case 3: return LookupPredicate(kCombiningMarkTable3,
+ kCombiningMarkTable3Size,
+ c);
+ case 28: return LookupPredicate(kCombiningMarkTable28,
+ kCombiningMarkTable28Size,
+ c);
+ default: return false;
+ }
+}
+
+// ConnectorPunctuation: point.category == 'Pc'
+
+static const uint16_t kConnectorPunctuationTable0Size = 4;
+static const uint16_t kConnectorPunctuationTable0[4] = { 95, 41023, 8256, 8276 };
+static const uint16_t kConnectorPunctuationTable1Size = 5;
+static const uint16_t kConnectorPunctuationTable1[5] = { 65075, 32308, 65101, 32335, 32575 };
+bool ConnectorPunctuation::Is(uchar c) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kConnectorPunctuationTable0,
+ kConnectorPunctuationTable0Size,
+ c);
+ case 1: return LookupPredicate(kConnectorPunctuationTable1,
+ kConnectorPunctuationTable1Size,
+ c);
+ default: return false;
+ }
+}
+
+static const MultiCharacterSpecialCase kToLowercaseMultiStrings0[] = { {2, {105, 775}}, {0, {0}} };
+static const uint16_t kToLowercaseTable0Size = 531;
+static const uint16_t kToLowercaseTable0[1062] = { 32833, 128, 90, 128, 32960, 128, 214, 128, 32984, 128, 222, 128, 256, 4, 258, 4, 260, 4, 262, 4, 264, 4, 266, 4, 268, 4, 270, 4, 272, 4, 274, 4, 276, 4, 278, 4, 280, 4, 282, 4, 284, 4, 286, 4, 288, 4, 290, 4, 292, 4, 294, 4, 296, 4, 298, 4, 300, 4, 302, 4, 304, 1, 306, 4, 308, 4, 310, 4, 313, 4, 315, 4, 317, 4, 319, 4, 321, 4, 323, 4, 325, 4, 327, 4, 330, 4, 332, 4, 334, 4, 336, 4, 338, 4, 340, 4, 342, 4, 344, 4, 346, 4, 348, 4, 350, 4, 352, 4, 354, 4, 356, 4, 358, 4, 360, 4, 362, 4, 364, 4, 366, 4, 368, 4, 370, 4, 372, 4, 374, 4, 376, static_cast<uint16_t>(-484), 377, 4, 379, 4, 381, 4, 385, 840, 386, 4, 388, 4, 390, 824, 391, 4, 33161, 820, 394, 820, 395, 4, 398, 316, 399, 808, 400, 812, 401, 4, 403, 820, 404, 828, 406, 844, 407, 836, 408, 4, 412, 844, 413, 852, 415, 856, 416, 4, 418, 4, 420, 4, 422, 872, 423, 4, 425, 872, 428, 4, 430, 872, 431, 4, 33201, 868, 434, 868, 435, 4, 437, 4, 439, 876, 440, 4, 444, 4, 452, 8, 453, 4, 455, 8, 456, 4, 458, 8, 459, 4, 461, 4, 463, 4, 465, 4, 467, 4, 469, 4, 471, 4, 473, 4, 475, 4, 478, 4, 480, 4, 482, 4, 484, 4, 486, 4, 488, 4, 490, 4, 492, 4, 494, 4, 497, 8, 498, 4, 500, 4, 502, static_cast<uint16_t>(-388), 503, static_cast<uint16_t>(-224), 504, 4, 506, 4, 508, 4, 510, 4, 512, 4, 514, 4, 516, 4, 518, 4, 520, 4, 522, 4, 524, 4, 526, 4, 528, 4, 530, 4, 532, 4, 534, 4, 536, 4, 538, 4, 540, 4, 542, 4, 544, static_cast<uint16_t>(-520), 546, 4, 548, 4, 550, 4, 552, 4, 554, 4, 556, 4, 558, 4, 560, 4, 562, 4, 570, 43180, 571, 4, 573, static_cast<uint16_t>(-652), 574, 43168, 577, 4, 579, static_cast<uint16_t>(-780), 580, 276, 581, 284, 582, 4, 584, 4, 586, 4, 588, 4, 590, 4, 902, 152, 33672, 148, 906, 148, 908, 256, 33678, 252, 911, 252, 33681, 128, 929, 128, 33699, 6, 939, 128, 984, 4, 986, 4, 988, 4, 990, 4, 992, 4, 994, 4, 996, 4, 998, 4, 1000, 4, 1002, 4, 1004, 4, 1006, 4, 1012, static_cast<uint16_t>(-240), 1015, 4, 1017, static_cast<uint16_t>(-28), 1018, 4, 33789, static_cast<uint16_t>(-520), 1023, static_cast<uint16_t>(-520), 33792, 320, 1039, 320, 33808, 128, 1071, 128, 1120, 4, 1122, 4, 1124, 4, 1126, 4, 1128, 4, 1130, 4, 1132, 4, 1134, 4, 1136, 4, 1138, 4, 1140, 4, 1142, 4, 1144, 4, 1146, 4, 1148, 4, 1150, 4, 1152, 4, 1162, 4, 1164, 4, 1166, 4, 1168, 4, 1170, 4, 1172, 4, 1174, 4, 1176, 4, 1178, 4, 1180, 4, 1182, 4, 1184, 4, 1186, 4, 1188, 4, 1190, 4, 1192, 4, 1194, 4, 1196, 4, 1198, 4, 1200, 4, 1202, 4, 1204, 4, 1206, 4, 1208, 4, 1210, 4, 1212, 4, 1214, 4, 1216, 60, 1217, 4, 1219, 4, 1221, 4, 1223, 4, 1225, 4, 1227, 4, 1229, 4, 1232, 4, 1234, 4, 1236, 4, 1238, 4, 1240, 4, 1242, 4, 1244, 4, 1246, 4, 1248, 4, 1250, 4, 1252, 4, 1254, 4, 1256, 4, 1258, 4, 1260, 4, 1262, 4, 1264, 4, 1266, 4, 1268, 4, 1270, 4, 1272, 4, 1274, 4, 1276, 4, 1278, 4, 1280, 4, 1282, 4, 1284, 4, 1286, 4, 1288, 4, 1290, 4, 1292, 4, 1294, 4, 1296, 4, 1298, 4, 34097, 192, 1366, 192, 37024, 29056, 4293, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4, 7690, 4, 7692, 4, 7694, 4, 7696, 4, 7698, 4, 7700, 4, 7702, 4, 7704, 4, 7706, 4, 7708, 4, 7710, 4, 7712, 4, 7714, 4, 7716, 4, 7718, 4, 7720, 4, 7722, 4, 7724, 4, 7726, 4, 7728, 4, 7730, 4, 7732, 4, 7734, 4, 7736, 4, 7738, 4, 7740, 4, 7742, 4, 7744, 4, 7746, 4, 7748, 4, 7750, 4, 7752, 4, 7754, 4, 7756, 4, 7758, 4, 7760, 4, 7762, 4, 7764, 4, 7766, 4, 7768, 4, 7770, 4, 7772, 4, 7774, 4, 7776, 4, 7778, 4, 7780, 4, 7782, 4, 7784, 4, 7786, 4, 7788, 4, 7790, 4, 7792, 4, 7794, 4, 7796, 4, 7798, 4, 7800, 4, 7802, 4, 7804, 4, 7806, 4, 7808, 4, 7810, 4, 7812, 4, 7814, 4, 7816, 4, 7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7840, 4, 7842, 4, 7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4, 7858, 4, 7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4, 7874, 4, 7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4, 7890, 4, 7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4, 7906, 4, 7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4, 7922, 4, 7924, 4, 7926, 4, 7928, 4, 40712, static_cast<uint16_t>(-32), 7951, static_cast<uint16_t>(-32), 40728, static_cast<uint16_t>(-32), 7965, static_cast<uint16_t>(-32), 40744, static_cast<uint16_t>(-32), 7983, static_cast<uint16_t>(-32), 40760, static_cast<uint16_t>(-32), 7999, static_cast<uint16_t>(-32), 40776, static_cast<uint16_t>(-32), 8013, static_cast<uint16_t>(-32), 8025, static_cast<uint16_t>(-32), 8027, static_cast<uint16_t>(-32), 8029, static_cast<uint16_t>(-32), 8031, static_cast<uint16_t>(-32), 40808, static_cast<uint16_t>(-32), 8047, static_cast<uint16_t>(-32), 40840, static_cast<uint16_t>(-32), 8079, static_cast<uint16_t>(-32), 40856, static_cast<uint16_t>(-32), 8095, static_cast<uint16_t>(-32), 40872, static_cast<uint16_t>(-32), 8111, static_cast<uint16_t>(-32), 40888, static_cast<uint16_t>(-32), 8121, static_cast<uint16_t>(-32), 40890, static_cast<uint16_t>(-296), 8123, static_cast<uint16_t>(-296), 8124, static_cast<uint16_t>(-36), 40904, static_cast<uint16_t>(-344), 8139, static_cast<uint16_t>(-344), 8140, static_cast<uint16_t>(-36), 40920, static_cast<uint16_t>(-32), 8153, static_cast<uint16_t>(-32), 40922, static_cast<uint16_t>(-400), 8155, static_cast<uint16_t>(-400), 40936, static_cast<uint16_t>(-32), 8169, static_cast<uint16_t>(-32), 40938, static_cast<uint16_t>(-448), 8171, static_cast<uint16_t>(-448), 8172, static_cast<uint16_t>(-28), 40952, static_cast<uint16_t>(-512), 8185, static_cast<uint16_t>(-512), 40954, static_cast<uint16_t>(-504), 8187, static_cast<uint16_t>(-504), 8188, static_cast<uint16_t>(-36), 8486, static_cast<uint16_t>(-30068), 8490, static_cast<uint16_t>(-33532), 8491, static_cast<uint16_t>(-33048), 8498, 112, 41312, 64, 8559, 64, 8579, 4, 42166, 104, 9423, 104, 44032, 192, 11310, 192, 11360, 4, 11362, static_cast<uint16_t>(-42972), 11363, static_cast<uint16_t>(-15256), 11364, static_cast<uint16_t>(-42908), 11367, 4, 11369, 4, 11371, 4, 11381, 4, 11392, 4, 11394, 4, 11396, 4, 11398, 4, 11400, 4, 11402, 4, 11404, 4, 11406, 4, 11408, 4, 11410, 4, 11412, 4, 11414, 4, 11416, 4, 11418, 4, 11420, 4, 11422, 4, 11424, 4, 11426, 4, 11428, 4, 11430, 4, 11432, 4, 11434, 4, 11436, 4, 11438, 4, 11440, 4, 11442, 4, 11444, 4, 11446, 4, 11448, 4, 11450, 4, 11452, 4, 11454, 4, 11456, 4, 11458, 4, 11460, 4, 11462, 4, 11464, 4, 11466, 4, 11468, 4, 11470, 4, 11472, 4, 11474, 4, 11476, 4, 11478, 4, 11480, 4, 11482, 4, 11484, 4, 11486, 4, 11488, 4, 11490, 4 };
+static const MultiCharacterSpecialCase kToLowercaseMultiStrings1[] = { {0, {0}} };
+static const uint16_t kToLowercaseTable1Size = 2;
+static const uint16_t kToLowercaseTable1[4] = { 65313, 128, 32570, 128 };
+static const MultiCharacterSpecialCase kToLowercaseMultiStrings2[] = { {0, {0}} };
+static const uint16_t kToLowercaseTable2Size = 2;
+static const uint16_t kToLowercaseTable2[4] = { 33792, 160, 1063, 160 };
+int ToLowercase::Convert(uchar c,
+ uchar n,
+ uchar* result,
+ bool* allow_caching_ptr) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupMapping(kToLowercaseTable0,
+ kToLowercaseTable0Size,
+ kToLowercaseMultiStrings0,
+ c,
+ n,
+ result,
+ allow_caching_ptr);
+ case 1: return LookupMapping(kToLowercaseTable1,
+ kToLowercaseTable1Size,
+ kToLowercaseMultiStrings1,
+ c,
+ n,
+ result,
+ allow_caching_ptr);
+ case 2: return LookupMapping(kToLowercaseTable2,
+ kToLowercaseTable2Size,
+ kToLowercaseMultiStrings2,
+ c,
+ n,
+ result,
+ allow_caching_ptr);
+ default: return 0;
+ }
+}
+
+static const MultiCharacterSpecialCase kToUppercaseMultiStrings0[] = { {2, {83, 83}}, {2, {700, 78}}, {2, {74, 780}}, {3, {921, 776, 769}}, {3, {933, 776, 769}}, {2, {1333, 1362}}, {2, {72, 817}}, {2, {84, 776}}, {2, {87, 778}}, {2, {89, 778}}, {2, {65, 702}}, {2, {933, 787}}, {3, {933, 787, 768}}, {3, {933, 787, 769}}, {3, {933, 787, 834}}, {2, {7944, 921}}, {2, {7945, 921}}, {2, {7946, 921}}, {2, {7947, 921}}, {2, {7948, 921}}, {2, {7949, 921}}, {2, {7950, 921}}, {2, {7951, 921}}, {2, {7944, 921}}, {2, {7945, 921}}, {2, {7946, 921}}, {2, {7947, 921}}, {2, {7948, 921}}, {2, {7949, 921}}, {2, {7950, 921}}, {2, {7951, 921}}, {2, {7976, 921}}, {2, {7977, 921}}, {2, {7978, 921}}, {2, {7979, 921}}, {2, {7980, 921}}, {2, {7981, 921}}, {2, {7982, 921}}, {2, {7983, 921}}, {2, {7976, 921}}, {2, {7977, 921}}, {2, {7978, 921}}, {2, {7979, 921}}, {2, {7980, 921}}, {2, {7981, 921}}, {2, {7982, 921}}, {2, {7983, 921}}, {2, {8040, 921}}, {2, {8041, 921}}, {2, {8042, 921}}, {2, {8043, 921}}, {2, {8044, 921}}, {2, {8045, 921}}, {2, {8046, 921}}, {2, {8047, 921}}, {2, {8040, 921}}, {2, {8041, 921}}, {2, {8042, 921}}, {2, {8043, 921}}, {2, {8044, 921}}, {2, {8045, 921}}, {2, {8046, 921}}, {2, {8047, 921}}, {2, {8122, 921}}, {2, {913, 921}}, {2, {902, 921}}, {2, {913, 834}}, {3, {913, 834, 921}}, {2, {913, 921}}, {2, {8138, 921}}, {2, {919, 921}}, {2, {905, 921}}, {2, {919, 834}}, {3, {919, 834, 921}}, {2, {919, 921}}, {3, {921, 776, 768}}, {3, {921, 776, 769}}, {2, {921, 834}}, {3, {921, 776, 834}}, {3, {933, 776, 768}}, {3, {933, 776, 769}}, {2, {929, 787}}, {2, {933, 834}}, {3, {933, 776, 834}}, {2, {8186, 921}}, {2, {937, 921}}, {2, {911, 921}}, {2, {937, 834}}, {3, {937, 834, 921}}, {2, {937, 921}}, {0, {0}} };
+static const uint16_t kToUppercaseTable0Size = 621;
+static const uint16_t kToUppercaseTable0[1242] = { 32865, static_cast<uint16_t>(-128), 122, static_cast<uint16_t>(-128), 181, 2972, 223, 1, 32992, static_cast<uint16_t>(-128), 246, static_cast<uint16_t>(-128), 33016, static_cast<uint16_t>(-128), 254, static_cast<uint16_t>(-128), 255, 484, 257, static_cast<uint16_t>(-4), 259, static_cast<uint16_t>(-4), 261, static_cast<uint16_t>(-4), 263, static_cast<uint16_t>(-4), 265, static_cast<uint16_t>(-4), 267, static_cast<uint16_t>(-4), 269, static_cast<uint16_t>(-4), 271, static_cast<uint16_t>(-4), 273, static_cast<uint16_t>(-4), 275, static_cast<uint16_t>(-4), 277, static_cast<uint16_t>(-4), 279, static_cast<uint16_t>(-4), 281, static_cast<uint16_t>(-4), 283, static_cast<uint16_t>(-4), 285, static_cast<uint16_t>(-4), 287, static_cast<uint16_t>(-4), 289, static_cast<uint16_t>(-4), 291, static_cast<uint16_t>(-4), 293, static_cast<uint16_t>(-4), 295, static_cast<uint16_t>(-4), 297, static_cast<uint16_t>(-4), 299, static_cast<uint16_t>(-4), 301, static_cast<uint16_t>(-4), 303, static_cast<uint16_t>(-4), 305, static_cast<uint16_t>(-928), 307, static_cast<uint16_t>(-4), 309, static_cast<uint16_t>(-4), 311, static_cast<uint16_t>(-4), 314, static_cast<uint16_t>(-4), 316, static_cast<uint16_t>(-4), 318, static_cast<uint16_t>(-4), 320, static_cast<uint16_t>(-4), 322, static_cast<uint16_t>(-4), 324, static_cast<uint16_t>(-4), 326, static_cast<uint16_t>(-4), 328, static_cast<uint16_t>(-4), 329, 5, 331, static_cast<uint16_t>(-4), 333, static_cast<uint16_t>(-4), 335, static_cast<uint16_t>(-4), 337, static_cast<uint16_t>(-4), 339, static_cast<uint16_t>(-4), 341, static_cast<uint16_t>(-4), 343, static_cast<uint16_t>(-4), 345, static_cast<uint16_t>(-4), 347, static_cast<uint16_t>(-4), 349, static_cast<uint16_t>(-4), 351, static_cast<uint16_t>(-4), 353, static_cast<uint16_t>(-4), 355, static_cast<uint16_t>(-4), 357, static_cast<uint16_t>(-4), 359, static_cast<uint16_t>(-4), 361, static_cast<uint16_t>(-4), 363, static_cast<uint16_t>(-4), 365, static_cast<uint16_t>(-4), 367, static_cast<uint16_t>(-4), 369, static_cast<uint16_t>(-4), 371, static_cast<uint16_t>(-4), 373, static_cast<uint16_t>(-4), 375, static_cast<uint16_t>(-4), 378, static_cast<uint16_t>(-4), 380, static_cast<uint16_t>(-4), 382, static_cast<uint16_t>(-4), 383, static_cast<uint16_t>(-1200), 384, 780, 387, static_cast<uint16_t>(-4), 389, static_cast<uint16_t>(-4), 392, static_cast<uint16_t>(-4), 396, static_cast<uint16_t>(-4), 402, static_cast<uint16_t>(-4), 405, 388, 409, static_cast<uint16_t>(-4), 410, 652, 414, 520, 417, static_cast<uint16_t>(-4), 419, static_cast<uint16_t>(-4), 421, static_cast<uint16_t>(-4), 424, static_cast<uint16_t>(-4), 429, static_cast<uint16_t>(-4), 432, static_cast<uint16_t>(-4), 436, static_cast<uint16_t>(-4), 438, static_cast<uint16_t>(-4), 441, static_cast<uint16_t>(-4), 445, static_cast<uint16_t>(-4), 447, 224, 453, static_cast<uint16_t>(-4), 454, static_cast<uint16_t>(-8), 456, static_cast<uint16_t>(-4), 457, static_cast<uint16_t>(-8), 459, static_cast<uint16_t>(-4), 460, static_cast<uint16_t>(-8), 462, static_cast<uint16_t>(-4), 464, static_cast<uint16_t>(-4), 466, static_cast<uint16_t>(-4), 468, static_cast<uint16_t>(-4), 470, static_cast<uint16_t>(-4), 472, static_cast<uint16_t>(-4), 474, static_cast<uint16_t>(-4), 476, static_cast<uint16_t>(-4), 477, static_cast<uint16_t>(-316), 479, static_cast<uint16_t>(-4), 481, static_cast<uint16_t>(-4), 483, static_cast<uint16_t>(-4), 485, static_cast<uint16_t>(-4), 487, static_cast<uint16_t>(-4), 489, static_cast<uint16_t>(-4), 491, static_cast<uint16_t>(-4), 493, static_cast<uint16_t>(-4), 495, static_cast<uint16_t>(-4), 496, 9, 498, static_cast<uint16_t>(-4), 499, static_cast<uint16_t>(-8), 501, static_cast<uint16_t>(-4), 505, static_cast<uint16_t>(-4), 507, static_cast<uint16_t>(-4), 509, static_cast<uint16_t>(-4), 511, static_cast<uint16_t>(-4), 513, static_cast<uint16_t>(-4), 515, static_cast<uint16_t>(-4), 517, static_cast<uint16_t>(-4), 519, static_cast<uint16_t>(-4), 521, static_cast<uint16_t>(-4), 523, static_cast<uint16_t>(-4), 525, static_cast<uint16_t>(-4), 527, static_cast<uint16_t>(-4), 529, static_cast<uint16_t>(-4), 531, static_cast<uint16_t>(-4), 533, static_cast<uint16_t>(-4), 535, static_cast<uint16_t>(-4), 537, static_cast<uint16_t>(-4), 539, static_cast<uint16_t>(-4), 541, static_cast<uint16_t>(-4), 543, static_cast<uint16_t>(-4), 547, static_cast<uint16_t>(-4), 549, static_cast<uint16_t>(-4), 551, static_cast<uint16_t>(-4), 553, static_cast<uint16_t>(-4), 555, static_cast<uint16_t>(-4), 557, static_cast<uint16_t>(-4), 559, static_cast<uint16_t>(-4), 561, static_cast<uint16_t>(-4), 563, static_cast<uint16_t>(-4), 572, static_cast<uint16_t>(-4), 578, static_cast<uint16_t>(-4), 583, static_cast<uint16_t>(-4), 585, static_cast<uint16_t>(-4), 587, static_cast<uint16_t>(-4), 589, static_cast<uint16_t>(-4), 591, static_cast<uint16_t>(-4), 595, static_cast<uint16_t>(-840), 596, static_cast<uint16_t>(-824), 33366, static_cast<uint16_t>(-820), 599, static_cast<uint16_t>(-820), 601, static_cast<uint16_t>(-808), 603, static_cast<uint16_t>(-812), 608, static_cast<uint16_t>(-820), 611, static_cast<uint16_t>(-828), 616, static_cast<uint16_t>(-836), 617, static_cast<uint16_t>(-844), 619, 42972, 623, static_cast<uint16_t>(-844), 626, static_cast<uint16_t>(-852), 629, static_cast<uint16_t>(-856), 637, 42908, 640, static_cast<uint16_t>(-872), 643, static_cast<uint16_t>(-872), 648, static_cast<uint16_t>(-872), 649, static_cast<uint16_t>(-276), 33418, static_cast<uint16_t>(-868), 651, static_cast<uint16_t>(-868), 652, static_cast<uint16_t>(-284), 658, static_cast<uint16_t>(-876), 837, 336, 33659, 520, 893, 520, 912, 13, 940, static_cast<uint16_t>(-152), 33709, static_cast<uint16_t>(-148), 943, static_cast<uint16_t>(-148), 944, 17, 33713, static_cast<uint16_t>(-128), 961, static_cast<uint16_t>(-128), 962, static_cast<uint16_t>(-124), 33731, static_cast<uint16_t>(-128), 971, static_cast<uint16_t>(-128), 972, static_cast<uint16_t>(-256), 33741, static_cast<uint16_t>(-252), 974, static_cast<uint16_t>(-252), 976, static_cast<uint16_t>(-248), 977, static_cast<uint16_t>(-228), 981, static_cast<uint16_t>(-188), 982, static_cast<uint16_t>(-216), 985, static_cast<uint16_t>(-4), 987, static_cast<uint16_t>(-4), 989, static_cast<uint16_t>(-4), 991, static_cast<uint16_t>(-4), 993, static_cast<uint16_t>(-4), 995, static_cast<uint16_t>(-4), 997, static_cast<uint16_t>(-4), 999, static_cast<uint16_t>(-4), 1001, static_cast<uint16_t>(-4), 1003, static_cast<uint16_t>(-4), 1005, static_cast<uint16_t>(-4), 1007, static_cast<uint16_t>(-4), 1008, static_cast<uint16_t>(-344), 1009, static_cast<uint16_t>(-320), 1010, 28, 1013, static_cast<uint16_t>(-384), 1016, static_cast<uint16_t>(-4), 1019, static_cast<uint16_t>(-4), 33840, static_cast<uint16_t>(-128), 1103, static_cast<uint16_t>(-128), 33872, static_cast<uint16_t>(-320), 1119, static_cast<uint16_t>(-320), 1121, static_cast<uint16_t>(-4), 1123, static_cast<uint16_t>(-4), 1125, static_cast<uint16_t>(-4), 1127, static_cast<uint16_t>(-4), 1129, static_cast<uint16_t>(-4), 1131, static_cast<uint16_t>(-4), 1133, static_cast<uint16_t>(-4), 1135, static_cast<uint16_t>(-4), 1137, static_cast<uint16_t>(-4), 1139, static_cast<uint16_t>(-4), 1141, static_cast<uint16_t>(-4), 1143, static_cast<uint16_t>(-4), 1145, static_cast<uint16_t>(-4), 1147, static_cast<uint16_t>(-4), 1149, static_cast<uint16_t>(-4), 1151, static_cast<uint16_t>(-4), 1153, static_cast<uint16_t>(-4), 1163, static_cast<uint16_t>(-4), 1165, static_cast<uint16_t>(-4), 1167, static_cast<uint16_t>(-4), 1169, static_cast<uint16_t>(-4), 1171, static_cast<uint16_t>(-4), 1173, static_cast<uint16_t>(-4), 1175, static_cast<uint16_t>(-4), 1177, static_cast<uint16_t>(-4), 1179, static_cast<uint16_t>(-4), 1181, static_cast<uint16_t>(-4), 1183, static_cast<uint16_t>(-4), 1185, static_cast<uint16_t>(-4), 1187, static_cast<uint16_t>(-4), 1189, static_cast<uint16_t>(-4), 1191, static_cast<uint16_t>(-4), 1193, static_cast<uint16_t>(-4), 1195, static_cast<uint16_t>(-4), 1197, static_cast<uint16_t>(-4), 1199, static_cast<uint16_t>(-4), 1201, static_cast<uint16_t>(-4), 1203, static_cast<uint16_t>(-4), 1205, static_cast<uint16_t>(-4), 1207, static_cast<uint16_t>(-4), 1209, static_cast<uint16_t>(-4), 1211, static_cast<uint16_t>(-4), 1213, static_cast<uint16_t>(-4), 1215, static_cast<uint16_t>(-4), 1218, static_cast<uint16_t>(-4), 1220, static_cast<uint16_t>(-4), 1222, static_cast<uint16_t>(-4), 1224, static_cast<uint16_t>(-4), 1226, static_cast<uint16_t>(-4), 1228, static_cast<uint16_t>(-4), 1230, static_cast<uint16_t>(-4), 1231, static_cast<uint16_t>(-60), 1233, static_cast<uint16_t>(-4), 1235, static_cast<uint16_t>(-4), 1237, static_cast<uint16_t>(-4), 1239, static_cast<uint16_t>(-4), 1241, static_cast<uint16_t>(-4), 1243, static_cast<uint16_t>(-4), 1245, static_cast<uint16_t>(-4), 1247, static_cast<uint16_t>(-4), 1249, static_cast<uint16_t>(-4), 1251, static_cast<uint16_t>(-4), 1253, static_cast<uint16_t>(-4), 1255, static_cast<uint16_t>(-4), 1257, static_cast<uint16_t>(-4), 1259, static_cast<uint16_t>(-4), 1261, static_cast<uint16_t>(-4), 1263, static_cast<uint16_t>(-4), 1265, static_cast<uint16_t>(-4), 1267, static_cast<uint16_t>(-4), 1269, static_cast<uint16_t>(-4), 1271, static_cast<uint16_t>(-4), 1273, static_cast<uint16_t>(-4), 1275, static_cast<uint16_t>(-4), 1277, static_cast<uint16_t>(-4), 1279, static_cast<uint16_t>(-4), 1281, static_cast<uint16_t>(-4), 1283, static_cast<uint16_t>(-4), 1285, static_cast<uint16_t>(-4), 1287, static_cast<uint16_t>(-4), 1289, static_cast<uint16_t>(-4), 1291, static_cast<uint16_t>(-4), 1293, static_cast<uint16_t>(-4), 1295, static_cast<uint16_t>(-4), 1297, static_cast<uint16_t>(-4), 1299, static_cast<uint16_t>(-4), 34145, static_cast<uint16_t>(-192), 1414, static_cast<uint16_t>(-192), 1415, 21, 7549, 15256, 7681, static_cast<uint16_t>(-4), 7683, static_cast<uint16_t>(-4), 7685, static_cast<uint16_t>(-4), 7687, static_cast<uint16_t>(-4), 7689, static_cast<uint16_t>(-4), 7691, static_cast<uint16_t>(-4), 7693, static_cast<uint16_t>(-4), 7695, static_cast<uint16_t>(-4), 7697, static_cast<uint16_t>(-4), 7699, static_cast<uint16_t>(-4), 7701, static_cast<uint16_t>(-4), 7703, static_cast<uint16_t>(-4), 7705, static_cast<uint16_t>(-4), 7707, static_cast<uint16_t>(-4), 7709, static_cast<uint16_t>(-4), 7711, static_cast<uint16_t>(-4), 7713, static_cast<uint16_t>(-4), 7715, static_cast<uint16_t>(-4), 7717, static_cast<uint16_t>(-4), 7719, static_cast<uint16_t>(-4), 7721, static_cast<uint16_t>(-4), 7723, static_cast<uint16_t>(-4), 7725, static_cast<uint16_t>(-4), 7727, static_cast<uint16_t>(-4), 7729, static_cast<uint16_t>(-4), 7731, static_cast<uint16_t>(-4), 7733, static_cast<uint16_t>(-4), 7735, static_cast<uint16_t>(-4), 7737, static_cast<uint16_t>(-4), 7739, static_cast<uint16_t>(-4), 7741, static_cast<uint16_t>(-4), 7743, static_cast<uint16_t>(-4), 7745, static_cast<uint16_t>(-4), 7747, static_cast<uint16_t>(-4), 7749, static_cast<uint16_t>(-4), 7751, static_cast<uint16_t>(-4), 7753, static_cast<uint16_t>(-4), 7755, static_cast<uint16_t>(-4), 7757, static_cast<uint16_t>(-4), 7759, static_cast<uint16_t>(-4), 7761, static_cast<uint16_t>(-4), 7763, static_cast<uint16_t>(-4), 7765, static_cast<uint16_t>(-4), 7767, static_cast<uint16_t>(-4), 7769, static_cast<uint16_t>(-4), 7771, static_cast<uint16_t>(-4), 7773, static_cast<uint16_t>(-4), 7775, static_cast<uint16_t>(-4), 7777, static_cast<uint16_t>(-4), 7779, static_cast<uint16_t>(-4), 7781, static_cast<uint16_t>(-4), 7783, static_cast<uint16_t>(-4), 7785, static_cast<uint16_t>(-4), 7787, static_cast<uint16_t>(-4), 7789, static_cast<uint16_t>(-4), 7791, static_cast<uint16_t>(-4), 7793, static_cast<uint16_t>(-4), 7795, static_cast<uint16_t>(-4), 7797, static_cast<uint16_t>(-4), 7799, static_cast<uint16_t>(-4), 7801, static_cast<uint16_t>(-4), 7803, static_cast<uint16_t>(-4), 7805, static_cast<uint16_t>(-4), 7807, static_cast<uint16_t>(-4), 7809, static_cast<uint16_t>(-4), 7811, static_cast<uint16_t>(-4), 7813, static_cast<uint16_t>(-4), 7815, static_cast<uint16_t>(-4), 7817, static_cast<uint16_t>(-4), 7819, static_cast<uint16_t>(-4), 7821, static_cast<uint16_t>(-4), 7823, static_cast<uint16_t>(-4), 7825, static_cast<uint16_t>(-4), 7827, static_cast<uint16_t>(-4), 7829, static_cast<uint16_t>(-4), 7830, 25, 7831, 29, 7832, 33, 7833, 37, 7834, 41, 7835, static_cast<uint16_t>(-236), 7841, static_cast<uint16_t>(-4), 7843, static_cast<uint16_t>(-4), 7845, static_cast<uint16_t>(-4), 7847, static_cast<uint16_t>(-4), 7849, static_cast<uint16_t>(-4), 7851, static_cast<uint16_t>(-4), 7853, static_cast<uint16_t>(-4), 7855, static_cast<uint16_t>(-4), 7857, static_cast<uint16_t>(-4), 7859, static_cast<uint16_t>(-4), 7861, static_cast<uint16_t>(-4), 7863, static_cast<uint16_t>(-4), 7865, static_cast<uint16_t>(-4), 7867, static_cast<uint16_t>(-4), 7869, static_cast<uint16_t>(-4), 7871, static_cast<uint16_t>(-4), 7873, static_cast<uint16_t>(-4), 7875, static_cast<uint16_t>(-4), 7877, static_cast<uint16_t>(-4), 7879, static_cast<uint16_t>(-4), 7881, static_cast<uint16_t>(-4), 7883, static_cast<uint16_t>(-4), 7885, static_cast<uint16_t>(-4), 7887, static_cast<uint16_t>(-4), 7889, static_cast<uint16_t>(-4), 7891, static_cast<uint16_t>(-4), 7893, static_cast<uint16_t>(-4), 7895, static_cast<uint16_t>(-4), 7897, static_cast<uint16_t>(-4), 7899, static_cast<uint16_t>(-4), 7901, static_cast<uint16_t>(-4), 7903, static_cast<uint16_t>(-4), 7905, static_cast<uint16_t>(-4), 7907, static_cast<uint16_t>(-4), 7909, static_cast<uint16_t>(-4), 7911, static_cast<uint16_t>(-4), 7913, static_cast<uint16_t>(-4), 7915, static_cast<uint16_t>(-4), 7917, static_cast<uint16_t>(-4), 7919, static_cast<uint16_t>(-4), 7921, static_cast<uint16_t>(-4), 7923, static_cast<uint16_t>(-4), 7925, static_cast<uint16_t>(-4), 7927, static_cast<uint16_t>(-4), 7929, static_cast<uint16_t>(-4), 40704, 32, 7943, 32, 40720, 32, 7957, 32, 40736, 32, 7975, 32, 40752, 32, 7991, 32, 40768, 32, 8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53, 8021, 32, 8022, 57, 8023, 32, 40800, 32, 8039, 32, 40816, 296, 8049, 296, 40818, 344, 8053, 344, 40822, 400, 8055, 400, 40824, 512, 8057, 512, 40826, 448, 8059, 448, 40828, 504, 8061, 504, 8064, 61, 8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85, 8071, 89, 8072, 93, 8073, 97, 8074, 101, 8075, 105, 8076, 109, 8077, 113, 8078, 117, 8079, 121, 8080, 125, 8081, 129, 8082, 133, 8083, 137, 8084, 141, 8085, 145, 8086, 149, 8087, 153, 8088, 157, 8089, 161, 8090, 165, 8091, 169, 8092, 173, 8093, 177, 8094, 181, 8095, 185, 8096, 189, 8097, 193, 8098, 197, 8099, 201, 8100, 205, 8101, 209, 8102, 213, 8103, 217, 8104, 221, 8105, 225, 8106, 229, 8107, 233, 8108, 237, 8109, 241, 8110, 245, 8111, 249, 40880, 32, 8113, 32, 8114, 253, 8115, 257, 8116, 261, 8118, 265, 8119, 269, 8124, 273, 8126, static_cast<uint16_t>(-28820), 8130, 277, 8131, 281, 8132, 285, 8134, 289, 8135, 293, 8140, 297, 40912, 32, 8145, 32, 8146, 301, 8147, 305, 8150, 309, 8151, 313, 40928, 32, 8161, 32, 8162, 317, 8163, 321, 8164, 325, 8165, 28, 8166, 329, 8167, 333, 8178, 337, 8179, 341, 8180, 345, 8182, 349, 8183, 353, 8188, 357, 8526, static_cast<uint16_t>(-112), 41328, static_cast<uint16_t>(-64), 8575, static_cast<uint16_t>(-64), 8580, static_cast<uint16_t>(-4), 42192, static_cast<uint16_t>(-104), 9449, static_cast<uint16_t>(-104), 44080, static_cast<uint16_t>(-192), 11358, static_cast<uint16_t>(-192), 11361, static_cast<uint16_t>(-4), 11365, static_cast<uint16_t>(-43180), 11366, static_cast<uint16_t>(-43168), 11368, static_cast<uint16_t>(-4), 11370, static_cast<uint16_t>(-4), 11372, static_cast<uint16_t>(-4), 11382, static_cast<uint16_t>(-4), 11393, static_cast<uint16_t>(-4), 11395, static_cast<uint16_t>(-4), 11397, static_cast<uint16_t>(-4), 11399, static_cast<uint16_t>(-4), 11401, static_cast<uint16_t>(-4), 11403, static_cast<uint16_t>(-4), 11405, static_cast<uint16_t>(-4), 11407, static_cast<uint16_t>(-4), 11409, static_cast<uint16_t>(-4), 11411, static_cast<uint16_t>(-4), 11413, static_cast<uint16_t>(-4), 11415, static_cast<uint16_t>(-4), 11417, static_cast<uint16_t>(-4), 11419, static_cast<uint16_t>(-4), 11421, static_cast<uint16_t>(-4), 11423, static_cast<uint16_t>(-4), 11425, static_cast<uint16_t>(-4), 11427, static_cast<uint16_t>(-4), 11429, static_cast<uint16_t>(-4), 11431, static_cast<uint16_t>(-4), 11433, static_cast<uint16_t>(-4), 11435, static_cast<uint16_t>(-4), 11437, static_cast<uint16_t>(-4), 11439, static_cast<uint16_t>(-4), 11441, static_cast<uint16_t>(-4), 11443, static_cast<uint16_t>(-4), 11445, static_cast<uint16_t>(-4), 11447, static_cast<uint16_t>(-4), 11449, static_cast<uint16_t>(-4), 11451, static_cast<uint16_t>(-4), 11453, static_cast<uint16_t>(-4), 11455, static_cast<uint16_t>(-4), 11457, static_cast<uint16_t>(-4), 11459, static_cast<uint16_t>(-4), 11461, static_cast<uint16_t>(-4), 11463, static_cast<uint16_t>(-4), 11465, static_cast<uint16_t>(-4), 11467, static_cast<uint16_t>(-4), 11469, static_cast<uint16_t>(-4), 11471, static_cast<uint16_t>(-4), 11473, static_cast<uint16_t>(-4), 11475, static_cast<uint16_t>(-4), 11477, static_cast<uint16_t>(-4), 11479, static_cast<uint16_t>(-4), 11481, static_cast<uint16_t>(-4), 11483, static_cast<uint16_t>(-4), 11485, static_cast<uint16_t>(-4), 11487, static_cast<uint16_t>(-4), 11489, static_cast<uint16_t>(-4), 11491, static_cast<uint16_t>(-4), 44288, static_cast<uint16_t>(-29056), 11557, static_cast<uint16_t>(-29056) };
+static const MultiCharacterSpecialCase kToUppercaseMultiStrings1[] = { {2, {70, 70}}, {2, {70, 73}}, {2, {70, 76}}, {3, {70, 70, 73}}, {3, {70, 70, 76}}, {2, {83, 84}}, {2, {83, 84}}, {2, {1348, 1350}}, {2, {1348, 1333}}, {2, {1348, 1339}}, {2, {1358, 1350}}, {2, {1348, 1341}}, {0, {0}} };
+static const uint16_t kToUppercaseTable1Size = 14;
+static const uint16_t kToUppercaseTable1[28] = { 31488, 1, 31489, 5, 31490, 9, 31491, 13, 31492, 17, 31493, 21, 31494, 25, 31507, 29, 31508, 33, 31509, 37, 31510, 41, 31511, 45, 65345, static_cast<uint16_t>(-128), 32602, static_cast<uint16_t>(-128) };
+static const MultiCharacterSpecialCase kToUppercaseMultiStrings2[] = { {0, {0}} };
+static const uint16_t kToUppercaseTable2Size = 2;
+static const uint16_t kToUppercaseTable2[4] = { 33832, static_cast<uint16_t>(-160), 1103, static_cast<uint16_t>(-160) };
+int ToUppercase::Convert(uchar c,
+ uchar n,
+ uchar* result,
+ bool* allow_caching_ptr) {
+ int chunk_index = c >> 15;
+ switch (chunk_index) {
+ case 0: return LookupMapping(kToUppercaseTable0,
+ kToUppercaseTable0Size,
+ kToUppercaseMultiStrings0,
+ c,
+ n,
+ result,
+ allow_caching_ptr);
+ case 1: return LookupMapping(kToUppercaseTable1,
+ kToUppercaseTable1Size,
+ kToUppercaseMultiStrings1,
+ c,
+ n,
+ result,
+ allow_caching_ptr);
+ case 2: return LookupMapping(kToUppercaseTable2,
+ kToUppercaseTable2Size,
+ kToUppercaseMultiStrings2,
+ c,
+ n,
+ result,
+ allow_caching_ptr);
+ default: return 0;
+ }
+}
+
+
+uchar UnicodeData::kMaxCodePoint = 1114109;
+
+int UnicodeData::GetByteCount() {
+ return 0 + (sizeof(uint16_t) * kUppercaseTable0Size) + (sizeof(uint16_t) * kUppercaseTable1Size) + (sizeof(uint16_t) * kUppercaseTable2Size) + (sizeof(uint16_t) * kUppercaseTable3Size) + (sizeof(uint16_t) * kLowercaseTable0Size) + (sizeof(uint16_t) * kLowercaseTable1Size) + (sizeof(uint16_t) * kLowercaseTable2Size) + (sizeof(uint16_t) * kLowercaseTable3Size) + (sizeof(uint16_t) * kLetterTable0Size) + (sizeof(uint16_t) * kLetterTable1Size) + (sizeof(uint16_t) * kLetterTable2Size) + (sizeof(uint16_t) * kLetterTable3Size) + (sizeof(uint16_t) * kLetterTable4Size) + (sizeof(uint16_t) * kLetterTable5Size) + (sizeof(uint16_t) * kSpaceTable0Size) + (sizeof(uint16_t) * kTitlecaseTable0Size) + (sizeof(uint16_t) * kNumberTable0Size) + (sizeof(uint16_t) * kNumberTable1Size) + (sizeof(uint16_t) * kNumberTable2Size) + (sizeof(uint16_t) * kNumberTable3Size) + (sizeof(uint16_t) * kDecimalDigitTable0Size) + (sizeof(uint16_t) * kDecimalDigitTable1Size) + (sizeof(uint16_t) * kDecimalDigitTable2Size) + (sizeof(uint16_t) * kDecimalDigitTable3Size) + (sizeof(uint16_t) * kIdeographicTable0Size) + (sizeof(uint16_t) * kIdeographicTable1Size) + (sizeof(uint16_t) * kIdeographicTable4Size) + (sizeof(uint16_t) * kIdeographicTable5Size) + (sizeof(uint16_t) * kWhiteSpaceTable0Size) + (sizeof(uint16_t) * kHexDigitTable0Size) + (sizeof(uint16_t) * kHexDigitTable1Size) + (sizeof(uint16_t) * kAsciiHexDigitTable0Size) + (sizeof(uint16_t) * kBidiControlTable0Size) + (sizeof(uint16_t) * kJoinControlTable0Size) + (sizeof(uint16_t) * kDashTable0Size) + (sizeof(uint16_t) * kDashTable1Size) + (sizeof(uint16_t) * kHyphenTable0Size) + (sizeof(uint16_t) * kHyphenTable1Size) + (sizeof(uint16_t) * kLineTerminatorTable0Size) + (sizeof(uint16_t) * kCombiningMarkTable0Size) + (sizeof(uint16_t) * kCombiningMarkTable1Size) + (sizeof(uint16_t) * kCombiningMarkTable2Size) + (sizeof(uint16_t) * kCombiningMarkTable3Size) + (sizeof(uint16_t) * kCombiningMarkTable28Size) + (sizeof(uint16_t) * kConnectorPunctuationTable0Size) + (sizeof(uint16_t) * kConnectorPunctuationTable1Size) + (sizeof(uint16_t) * kToLowercaseTable0Size) + (sizeof(uint16_t) * kToLowercaseTable1Size) + (sizeof(uint16_t) * kToLowercaseTable2Size) + (sizeof(uint16_t) * kToUppercaseTable0Size) + (sizeof(uint16_t) * kToUppercaseTable1Size) + (sizeof(uint16_t) * kToUppercaseTable2Size);
+}
+
+} // namespace unicode
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef __UNIBROW_H__
+#define __UNIBROW_H__
+
+#include <sys/types.h>
+
+/**
+ * \file
+ * Definitions and convenience functions for working with unicode.
+ */
+
+namespace unibrow {
+
+typedef unsigned int uchar;
+typedef unsigned char byte;
+
+/**
+ * The max length of the result of converting the case of a single
+ * character.
+ */
+static const int kMaxCaseConvertedSize = 3;
+
+template <class T, int size = 256>
+class Predicate {
+ public:
+ inline Predicate() { }
+ inline bool get(uchar c);
+ private:
+ friend class Test;
+ bool CalculateValue(uchar c);
+ struct CacheEntry {
+ inline CacheEntry() : code_point_(0), value_(0) { }
+ inline CacheEntry(uchar code_point, bool value)
+ : code_point_(code_point),
+ value_(value) { }
+ uchar code_point_ : 21;
+ bool value_ : 1;
+ };
+ static const int kSize = size;
+ static const int kMask = kSize - 1;
+ CacheEntry entries_[kSize];
+};
+
+// A cache used in case conversion. It caches the value for characters
+// that either have no mapping or map to a single character independent
+// of context. Characters that map to more than one character or that
+// map differently depending on context are always looked up.
+template <class T, int size = 256>
+class Mapping {
+ public:
+ inline Mapping() { }
+ inline int get(uchar c, uchar n, uchar* result);
+ private:
+ friend class Test;
+ int CalculateValue(uchar c, uchar n, uchar* result);
+ struct CacheEntry {
+ inline CacheEntry() : code_point_(0), offset_(0) { }
+ inline CacheEntry(uchar code_point, signed offset)
+ : code_point_(code_point),
+ offset_(offset) { }
+ uchar code_point_ : 21;
+ signed offset_ : 11;
+ };
+ static const int kSize = size;
+ static const int kMask = kSize - 1;
+ CacheEntry entries_[kSize];
+};
+
+class UnicodeData {
+ private:
+ friend class Test;
+ static int GetByteCount();
+ static uchar kMaxCodePoint;
+};
+
+// --- U t f 8 ---
+
+template <typename Data>
+class Buffer {
+ public:
+ inline Buffer(Data data, unsigned length) : data_(data), length_(length) { }
+ inline Buffer() : data_(0), length_(0) { }
+ Data data() { return data_; }
+ unsigned length() { return length_; }
+ private:
+ Data data_;
+ unsigned length_;
+};
+
+class Utf8 {
+ public:
+ static inline uchar Length(uchar chr);
+ static inline unsigned Encode(char* out, uchar c);
+ static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
+ unsigned capacity, unsigned* chars_read, unsigned* offset);
+ static const uchar kBadChar = 0xFFFD;
+ static const unsigned kMaxEncodedSize = 4;
+ static const unsigned kMaxOneByteChar = 0x7f;
+ static const unsigned kMaxTwoByteChar = 0x7ff;
+ static const unsigned kMaxThreeByteChar = 0xffff;
+ static const unsigned kMaxFourByteChar = 0x1fffff;
+
+ private:
+ template <unsigned s> friend class Utf8InputBuffer;
+ friend class Test;
+ static inline uchar ValueOf(const byte* str,
+ unsigned length,
+ unsigned* cursor);
+ static uchar CalculateValue(const byte* str,
+ unsigned length,
+ unsigned* cursor);
+};
+
+// --- C h a r a c t e r S t r e a m ---
+
+class CharacterStream {
+ public:
+ inline uchar GetNext();
+ inline bool has_more() { return remaining_ != 0; }
+ // Note that default implementation is not efficient.
+ virtual void Seek(unsigned);
+ unsigned Length();
+ virtual ~CharacterStream() { }
+ static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
+ unsigned& offset);
+ static inline bool EncodeAsciiCharacter(uchar c, byte* buffer,
+ unsigned capacity, unsigned& offset);
+ static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer,
+ unsigned capacity, unsigned& offset);
+ static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
+ virtual void Rewind() = 0;
+ protected:
+ virtual void FillBuffer() = 0;
+ // The number of characters left in the current buffer
+ unsigned remaining_;
+ // The current offset within the buffer
+ unsigned cursor_;
+ // The buffer containing the decoded characters.
+ const byte* buffer_;
+};
+
+// --- I n p u t B u f f e r ---
+
+/**
+ * Provides efficient access to encoded characters in strings. It
+ * does so by reading characters one block at a time, rather than one
+ * character at a time, which gives string implementations an
+ * opportunity to optimize the decoding.
+ */
+template <class Reader, class Input = Reader*, unsigned kSize = 256>
+class InputBuffer : public CharacterStream {
+ public:
+ virtual void Rewind();
+ inline void Reset(Input input);
+ void Seek(unsigned position);
+ inline void Reset(unsigned position, Input input);
+ protected:
+ InputBuffer() { }
+ explicit InputBuffer(Input input) { Reset(input); }
+ virtual void FillBuffer();
+
+ // A custom offset that can be used by the string implementation to
+ // mark progress within the encoded string.
+ unsigned offset_;
+ // The input string
+ Input input_;
+ // To avoid heap allocation, we keep an internal buffer to which
+ // the encoded string can write its characters. The string
+ // implementation is free to decide whether it wants to use this
+ // buffer or not.
+ byte util_buffer_[kSize];
+};
+
+// --- U t f 8 I n p u t B u f f e r ---
+
+template <unsigned s = 256>
+class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> {
+ public:
+ inline Utf8InputBuffer() { }
+ inline Utf8InputBuffer(const char* data, unsigned length);
+ inline void Reset(const char* data, unsigned length) {
+ InputBuffer<Utf8, Buffer<const char*>, s>::Reset(
+ Buffer<const char*>(data, length));
+ }
+};
+
+struct Uppercase {
+ static bool Is(uchar c);
+};
+struct Lowercase {
+ static bool Is(uchar c);
+};
+struct Letter {
+ static bool Is(uchar c);
+};
+struct Space {
+ static bool Is(uchar c);
+};
+struct Titlecase {
+ static bool Is(uchar c);
+};
+struct Number {
+ static bool Is(uchar c);
+};
+struct DecimalDigit {
+ static bool Is(uchar c);
+};
+struct Ideographic {
+ static bool Is(uchar c);
+};
+struct WhiteSpace {
+ static bool Is(uchar c);
+};
+struct HexDigit {
+ static bool Is(uchar c);
+};
+struct AsciiHexDigit {
+ static bool Is(uchar c);
+};
+struct BidiControl {
+ static bool Is(uchar c);
+};
+struct JoinControl {
+ static bool Is(uchar c);
+};
+struct Dash {
+ static bool Is(uchar c);
+};
+struct Hyphen {
+ static bool Is(uchar c);
+};
+struct LineTerminator {
+ static bool Is(uchar c);
+};
+struct CombiningMark {
+ static bool Is(uchar c);
+};
+struct ConnectorPunctuation {
+ static bool Is(uchar c);
+};
+struct ToLowercase {
+ static int Convert(uchar c,
+ uchar n,
+ uchar* result,
+ bool* allow_caching_ptr);
+};
+struct ToUppercase {
+ static int Convert(uchar c,
+ uchar n,
+ uchar* result,
+ bool* allow_caching_ptr);
+};
+
+} // namespace unibrow
+
+#endif // __UNIBROW_H__
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains support for URI manipulations written in
+// JavaScript.
+
+// Expect $String = global.String;
+
+function URIAddEncodedOctetToBuffer(octet, result, index) {
+ result[index++] = 37; // Char code of '%'.
+ result[index++] = hexCharCodeArray[octet >> 4];
+ result[index++] = hexCharCodeArray[octet & 0x0F];
+ return index;
+};
+
+
+function URIEncodeOctets(octets, result, index) {
+ index = URIAddEncodedOctetToBuffer(octets[0], result, index);
+ if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
+ if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
+ if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
+ return index;
+};
+
+
+function URIEncodeSingle(cc, result, index) {
+ var x = (cc >> 12) & 0xF;
+ var y = (cc >> 6) & 63;
+ var z = cc & 63;
+ var octets = new $Array(3);
+ if (cc <= 0x007F) {
+ octets[0] = cc;
+ } else if (cc <= 0x07FF) {
+ octets[0] = y + 192;
+ octets[1] = z + 128;
+ } else {
+ octets[0] = x + 224;
+ octets[1] = y + 128;
+ octets[2] = z + 128;
+ }
+ return URIEncodeOctets(octets, result, index);
+};
+
+
+function URIEncodePair(cc1 , cc2, result, index) {
+ var u = ((cc1 >> 6) & 0xF) + 1;
+ var w = (cc1 >> 2) & 0xF;
+ var x = cc1 & 3;
+ var y = (cc2 >> 6) & 0xF;
+ var z = cc2 & 63;
+ var octets = new $Array(4);
+ octets[0] = (u >> 2) + 240;
+ octets[1] = (((u & 3) << 4) | w) + 128;
+ octets[2] = ((x << 4) | y) + 128;
+ octets[3] = z + 128;
+ return URIEncodeOctets(octets, result, index);
+};
+
+
+function URIHexCharsToCharCode(ch1, ch2) {
+ if (HexValueOf(ch1) == -1 || HexValueOf(ch2) == -1) {
+ throw new $URIError("URI malformed");
+ }
+ return HexStrToCharCode(ch1 + ch2);
+};
+
+
+function URIDecodeOctets(octets, result, index) {
+ if (octets[3]) {
+ var x = (octets[2] >> 4) & 3;
+ var y = octets[2] & 0xF;
+ var z = octets[3] & 63;
+ var v = (((octets[0] & 7) << 2) | ((octets[1] >> 4) & 3)) - 1;
+ var w = octets[1] & 0xF;
+ result[index++] = 55296 | (v << 6) | (w << 2) | x;
+ result[index++] = 56320 | (y << 6) | z;
+ return index;
+ }
+ if (octets[2]) {
+ var x = octets[0] & 0xF;
+ var y = octets[1] & 63;
+ var z = octets[2] & 63;
+ result[index++] = (x << 12) | (y << 6) | z;
+ return index;
+ }
+ var z = octets[1] & 63;
+ var y = octets[0] & 31;
+ result[index++] = (y << 6) | z;
+ return index;
+};
+
+
+// ECMA-262, section 15.1.3
+function Encode(uri, unescape) {
+ var uriLength = uri.length;
+ var result = new $Array(uriLength);
+ var index = 0;
+ for (var k = 0; k < uriLength; k++) {
+ var cc1 = uri.charCodeAt(k);
+ if (unescape(cc1)) {
+ result[index++] = cc1;
+ } else {
+ if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
+ if (cc1 < 0xD800 || cc1 > 0xDBFF) {
+ index = URIEncodeSingle(cc1, result, index);
+ } else {
+ k++;
+ if (k == uriLength) throw new $URIError("URI malformed");
+ var cc2 = uri.charCodeAt(k);
+ if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
+ index = URIEncodePair(cc1, cc2, result, index);
+ }
+ }
+ }
+ return %StringFromCharCodeArray(result);
+};
+
+
+// ECMA-262, section 15.1.3
+function Decode(uri, reserved) {
+ var uriLength = uri.length;
+ var result = new $Array(uriLength);
+ var index = 0;
+ for (var k = 0; k < uriLength; k++) {
+ var ch = uri.charAt(k);
+ if (ch == '%') {
+ if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+ var cc = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
+ if (cc >> 7) {
+ var n = 0;
+ while (((cc << ++n) & 0x80) != 0) ;
+ if (n == 1 || n > 4) throw new $URIError("URI malformed");
+ var octets = new $Array(n);
+ octets[0] = cc;
+ if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
+ for (var i = 1; i < n; i++) {
+ k++;
+ octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
+ }
+ index = URIDecodeOctets(octets, result, index);
+ } else {
+ if (reserved(cc)) {
+ result[index++] = 37; // Char code of '%'.
+ result[index++] = uri.charCodeAt(k - 1);
+ result[index++] = uri.charCodeAt(k);
+ } else {
+ result[index++] = cc;
+ }
+ }
+ } else {
+ result[index++] = ch.charCodeAt(0);
+ }
+ }
+ result.length = index;
+ return %StringFromCharCodeArray(result);
+};
+
+
+// ECMA-262 - 15.1.3.1.
+function URIDecode(uri) {
+ function reservedPredicate(cc) {
+ // #$
+ if (35 <= cc && cc <= 36) return true;
+ // &
+ if (cc == 38) return true;
+ // +,
+ if (43 <= cc && cc <= 44) return true;
+ // /
+ if (cc == 47) return true;
+ // :;
+ if (58 <= cc && cc <= 59) return true;
+ // =
+ if (cc == 61) return true;
+ // ?@
+ if (63 <= cc && cc <= 64) return true;
+
+ return false;
+ };
+ var string = ToString(uri);
+ return Decode(string, reservedPredicate);
+};
+
+
+// ECMA-262 - 15.1.3.2.
+function URIDecodeComponent(component) {
+ function reservedPredicate(cc) { return false; };
+ var string = ToString(component);
+ return Decode(string, reservedPredicate);
+};
+
+
+// Does the char code correspond to an alpha-numeric char.
+function isAlphaNumeric(cc) {
+ // a - z
+ if (97 <= cc && cc <= 122) return true;
+ // A - Z
+ if (65 <= cc && cc <= 90) return true;
+ // 0 - 9
+ if (48 <= cc && cc <= 57) return true;
+
+ return false;
+};
+
+
+// ECMA-262 - 15.1.3.3.
+function URIEncode(uri) {
+ function unescapePredicate(cc) {
+ if (isAlphaNumeric(cc)) return true;
+ // !
+ if (cc == 33) return true;
+ // #$
+ if (35 <= cc && cc <= 36) return true;
+ // &'()*+,-./
+ if (38 <= cc && cc <= 47) return true;
+ // :;
+ if (58 <= cc && cc <= 59) return true;
+ // =
+ if (cc == 61) return true;
+ // ?@
+ if (63 <= cc && cc <= 64) return true;
+ // _
+ if (cc == 95) return true;
+ // ~
+ if (cc == 126) return true;
+
+ return false;
+ };
+
+ var string = ToString(uri);
+ return Encode(string, unescapePredicate);
+};
+
+
+// ECMA-262 - 15.1.3.4
+function URIEncodeComponent(component) {
+ function unescapePredicate(cc) {
+ if (isAlphaNumeric(cc)) return true;
+ // !
+ if (cc == 33) return true;
+ // '()*
+ if (39 <= cc && cc <= 42) return true;
+ // -.
+ if (45 <= cc && cc <= 46) return true;
+ // _
+ if (cc == 95) return true;
+ // ~
+ if (cc == 126) return true;
+
+ return false;
+ };
+
+ var string = ToString(component);
+ return Encode(string, unescapePredicate);
+};
+
+
+const hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "A", "B", "C", "D", "E", "F"];
+
+const hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 65, 66, 67, 68, 69, 70];
+
+
+function HexValueOf(c) {
+ var code = c.charCodeAt(0);
+
+ // 0-9
+ if (code >= 48 && code <= 57) return code - 48;
+ // A-F
+ if (code >= 65 && code <= 70) return code - 55;
+ // a-f
+ if (code >= 97 && code <= 102) return code - 87;
+
+ return -1;
+};
+
+
+// Convert a character code to 4-digit hex string representation
+// 64 -> 0040, 62234 -> F31A.
+function CharCodeToHex4Str(cc) {
+ var r = "";
+ for (var i = 0; i < 4; ++i) {
+ var c = hexCharArray[cc & 0x0F];
+ r = c + r;
+ cc = cc >>> 4;
+ }
+ return r;
+};
+
+
+// Converts hex string to char code. Not efficient.
+function HexStrToCharCode(s) {
+ var m = 0;
+ var r = 0;
+ for (var i = s.length - 1; i >= 0; --i) {
+ r = r + (HexValueOf(s.charAt(i)) << m);
+ m = m + 4;
+ }
+ return r;
+};
+
+
+// Returns true if all digits in string s are valid hex numbers
+function IsValidHex(s) {
+ for (var i = 0; i < s.length; ++i) {
+ var cc = s.charCodeAt(i);
+ if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
+ // '0'..'9', 'A'..'F' and 'a' .. 'f'.
+ } else {
+ return false;
+ }
+ }
+ return true;
+};
+
+
+// ECMA-262 - B.2.1.
+function URIEscape(str) {
+ var s = ToString(str);
+ return %URIEscape(s);
+};
+
+
+// ECMA-262 - B.2.2.
+function URIUnescape(str) {
+ var s = ToString(str);
+ return %URIUnescape(s);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetupURI() {
+ // Setup non-enumerable URI properties of the global object.
+ InstallProperties(global, DONT_ENUM, {
+ escape: URIEscape,
+ unescape: URIUnescape,
+ decodeURI: URIDecode,
+ decodeURIComponent: URIDecodeComponent,
+ encodeURI: URIEncode,
+ encodeURIComponent: URIEncodeComponent
+ });
+};
+
+SetupURI();
+
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+#include "usage-analyzer.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(usage_computation, true, "compute variable usage counts");
+
+// Weight boundaries
+static const int MinWeight = 1;
+static const int MaxWeight = 1000000;
+static const int InitialWeight = 100;
+
+
+class UsageComputer: public Visitor {
+ public:
+ static bool Traverse(Node* node);
+
+ void VisitBlock(Block* node);
+ void VisitDeclaration(Declaration* node);
+ void VisitExpressionStatement(ExpressionStatement* node);
+ void VisitEmptyStatement(EmptyStatement* node);
+ void VisitIfStatement(IfStatement* node);
+ void VisitContinueStatement(ContinueStatement* node);
+ void VisitBreakStatement(BreakStatement* node);
+ void VisitReturnStatement(ReturnStatement* node);
+ void VisitWithEnterStatement(WithEnterStatement* node);
+ void VisitWithExitStatement(WithExitStatement* node);
+ void VisitSwitchStatement(SwitchStatement* node);
+ void VisitLoopStatement(LoopStatement* node);
+ void VisitForInStatement(ForInStatement* node);
+ void VisitTryCatch(TryCatch* node);
+ void VisitTryFinally(TryFinally* node);
+ void VisitDebuggerStatement(DebuggerStatement* node);
+ void VisitFunctionLiteral(FunctionLiteral* node);
+ void VisitFunctionBoilerplateLiteral(FunctionBoilerplateLiteral* node);
+ void VisitConditional(Conditional* node);
+ void VisitSlot(Slot* node);
+ void VisitVariable(Variable* node);
+ void VisitVariableProxy(VariableProxy* node);
+ void VisitLiteral(Literal* node);
+ void VisitRegExpLiteral(RegExpLiteral* node);
+ void VisitObjectLiteral(ObjectLiteral* node);
+ void VisitArrayLiteral(ArrayLiteral* node);
+ void VisitAssignment(Assignment* node);
+ void VisitThrow(Throw* node);
+ void VisitProperty(Property* node);
+ void VisitCall(Call* node);
+ void VisitCallNew(CallNew* node);
+ void VisitCallRuntime(CallRuntime* node);
+ void VisitUnaryOperation(UnaryOperation* node);
+ void VisitCountOperation(CountOperation* node);
+ void VisitBinaryOperation(BinaryOperation* node);
+ void VisitCompareOperation(CompareOperation* node);
+ void VisitThisFunction(ThisFunction* node);
+
+ private:
+ int weight_;
+ bool is_write_;
+
+ UsageComputer(int weight, bool is_write);
+ virtual ~UsageComputer();
+
+ // Helper functions
+ void RecordUses(UseCount* uses);
+ void Read(Expression* x);
+ void Write(Expression* x);
+ void ReadList(ZoneList<Expression*>* list);
+ void ReadList(ZoneList<ObjectLiteral::Property*>* list);
+
+ friend class WeightScaler;
+};
+
+
+class WeightScaler BASE_EMBEDDED {
+ public:
+ WeightScaler(UsageComputer* uc, float scale);
+ ~WeightScaler();
+
+ private:
+ UsageComputer* uc_;
+ int old_weight_;
+};
+
+
+// ----------------------------------------------------------------------------
+// Implementation of UsageComputer
+
+bool UsageComputer::Traverse(Node* node) {
+ UsageComputer uc(InitialWeight, false);
+ uc.Visit(node);
+ return !uc.HasStackOverflow();
+}
+
+
+void UsageComputer::VisitBlock(Block* node) {
+ VisitStatements(node->statements());
+}
+
+
+void UsageComputer::VisitDeclaration(Declaration* node) {
+ Write(node->proxy());
+ if (node->fun() != NULL)
+ VisitFunctionLiteral(node->fun());
+}
+
+
+void UsageComputer::VisitExpressionStatement(ExpressionStatement* node) {
+ Visit(node->expression());
+}
+
+
+void UsageComputer::VisitEmptyStatement(EmptyStatement* node) {
+ // nothing to do
+}
+
+
+void UsageComputer::VisitIfStatement(IfStatement* node) {
+ Read(node->condition());
+ { WeightScaler ws(this, 0.5); // executed 50% of the time
+ Visit(node->then_statement());
+ Visit(node->else_statement());
+ }
+}
+
+
+void UsageComputer::VisitContinueStatement(ContinueStatement* node) {
+ // nothing to do
+}
+
+
+void UsageComputer::VisitBreakStatement(BreakStatement* node) {
+ // nothing to do
+}
+
+
+void UsageComputer::VisitReturnStatement(ReturnStatement* node) {
+ Read(node->expression());
+}
+
+
+void UsageComputer::VisitWithEnterStatement(WithEnterStatement* node) {
+ Read(node->expression());
+}
+
+
+void UsageComputer::VisitWithExitStatement(WithExitStatement* node) {
+ // nothing to do
+}
+
+
+void UsageComputer::VisitSwitchStatement(SwitchStatement* node) {
+ Read(node->tag());
+ ZoneList<CaseClause*>* cases = node->cases();
+ for (int i = cases->length(); i-- > 0;) {
+ WeightScaler ws(this, static_cast<float>(1.0 / cases->length()));
+ CaseClause* clause = cases->at(i);
+ if (!clause->is_default())
+ Read(clause->label());
+ VisitStatements(clause->statements());
+ }
+}
+
+
+void UsageComputer::VisitLoopStatement(LoopStatement* node) {
+ if (node->init() != NULL)
+ Visit(node->init());
+ { WeightScaler ws(this, 10.0); // executed in each iteration
+ if (node->cond() != NULL)
+ Read(node->cond());
+ if (node->next() != NULL)
+ Visit(node->next());
+ Visit(node->body());
+ }
+}
+
+
+void UsageComputer::VisitForInStatement(ForInStatement* node) {
+ WeightScaler ws(this, 10.0);
+ Write(node->each());
+ Read(node->enumerable());
+ Visit(node->body());
+}
+
+
+void UsageComputer::VisitTryCatch(TryCatch* node) {
+ Visit(node->try_block());
+ { WeightScaler ws(this, 0.25);
+ Write(node->catch_var());
+ Visit(node->catch_block());
+ }
+}
+
+
+void UsageComputer::VisitTryFinally(TryFinally* node) {
+ Visit(node->try_block());
+ Expression* var = node->finally_var();
+ if (var != NULL) {
+ Write(var);
+ Read(var);
+ }
+ Visit(node->finally_block());
+}
+
+
+void UsageComputer::VisitDebuggerStatement(DebuggerStatement* node) {
+}
+
+
+void UsageComputer::VisitFunctionLiteral(FunctionLiteral* node) {
+ ZoneList<Declaration*>* decls = node->scope()->declarations();
+ for (int i = 0; i < decls->length(); i++) VisitDeclaration(decls->at(i));
+ VisitStatements(node->body());
+}
+
+
+void UsageComputer::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ // Do nothing.
+}
+
+
+void UsageComputer::VisitConditional(Conditional* node) {
+ Read(node->condition());
+ { WeightScaler ws(this, 0.5);
+ Read(node->then_expression());
+ Read(node->else_expression());
+ }
+}
+
+
+void UsageComputer::VisitSlot(Slot* node) {
+ UNREACHABLE();
+}
+
+
+void UsageComputer::VisitVariable(Variable* node) {
+ RecordUses(node->var_uses());
+}
+
+
+void UsageComputer::VisitVariableProxy(VariableProxy* node) {
+ // The proxy may refer to a variable in which case it was bound via
+ // VariableProxy::BindTo.
+ RecordUses(node->var_uses());
+}
+
+
+void UsageComputer::VisitLiteral(Literal* node) {
+ // nothing to do
+}
+
+void UsageComputer::VisitRegExpLiteral(RegExpLiteral* node) {
+ // nothing to do
+}
+
+
+void UsageComputer::VisitObjectLiteral(ObjectLiteral* node) {
+ Read(node->result());
+ ReadList(node->properties());
+}
+
+
+void UsageComputer::VisitArrayLiteral(ArrayLiteral* node) {
+ Read(node->result());
+ ReadList(node->values());
+}
+
+
+void UsageComputer::VisitAssignment(Assignment* node) {
+ if (node->op() != Token::ASSIGN)
+ Read(node->target());
+ Write(node->target());
+ Read(node->value());
+}
+
+
+void UsageComputer::VisitThrow(Throw* node) {
+ Read(node->exception());
+}
+
+
+void UsageComputer::VisitProperty(Property* node) {
+ // In any case (read or write) we read both the
+ // node's object and the key.
+ Read(node->obj());
+ Read(node->key());
+ // If the node's object is a variable proxy,
+ // we have a 'simple' object property access. We count
+ // the access via the variable or proxy's object uses.
+ VariableProxy* proxy = node->obj()->AsVariableProxy();
+ if (proxy != NULL) {
+ RecordUses(proxy->obj_uses());
+ }
+}
+
+
+void UsageComputer::VisitCall(Call* node) {
+ Read(node->expression());
+ ReadList(node->arguments());
+}
+
+
+void UsageComputer::VisitCallNew(CallNew* node) {
+ VisitCall(node);
+}
+
+
+void UsageComputer::VisitCallRuntime(CallRuntime* node) {
+ ReadList(node->arguments());
+}
+
+
+void UsageComputer::VisitUnaryOperation(UnaryOperation* node) {
+ Read(node->expression());
+}
+
+
+void UsageComputer::VisitCountOperation(CountOperation* node) {
+ Read(node->expression());
+ Write(node->expression());
+}
+
+
+void UsageComputer::VisitBinaryOperation(BinaryOperation* node) {
+ Read(node->left());
+ Read(node->right());
+}
+
+
+void UsageComputer::VisitCompareOperation(CompareOperation* node) {
+ Read(node->left());
+ Read(node->right());
+}
+
+
+void UsageComputer::VisitThisFunction(ThisFunction* node) {
+}
+
+
+UsageComputer::UsageComputer(int weight, bool is_write) {
+ weight_ = weight;
+ is_write_ = is_write;
+}
+
+
+UsageComputer::~UsageComputer() {
+ // nothing to do
+}
+
+
+void UsageComputer::RecordUses(UseCount* uses) {
+ if (is_write_)
+ uses->RecordWrite(weight_);
+ else
+ uses->RecordRead(weight_);
+}
+
+
+void UsageComputer::Read(Expression* x) {
+ if (is_write_) {
+ UsageComputer uc(weight_, false);
+ uc.Visit(x);
+ } else {
+ Visit(x);
+ }
+}
+
+
+void UsageComputer::Write(Expression* x) {
+ if (!is_write_) {
+ UsageComputer uc(weight_, true);
+ uc.Visit(x);
+ } else {
+ Visit(x);
+ }
+}
+
+
+void UsageComputer::ReadList(ZoneList<Expression*>* list) {
+ for (int i = list->length(); i-- > 0; )
+ Read(list->at(i));
+}
+
+
+void UsageComputer::ReadList(ZoneList<ObjectLiteral::Property*>* list) {
+ for (int i = list->length(); i-- > 0; )
+ Read(list->at(i)->value());
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of WeightScaler
+
+WeightScaler::WeightScaler(UsageComputer* uc, float scale) {
+ uc_ = uc;
+ old_weight_ = uc->weight_;
+ int new_weight = static_cast<int>(uc->weight_ * scale);
+ if (new_weight <= 0) new_weight = MinWeight;
+ else if (new_weight > MaxWeight) new_weight = MaxWeight;
+ uc->weight_ = new_weight;
+}
+
+
+WeightScaler::~WeightScaler() {
+ uc_->weight_ = old_weight_;
+}
+
+
+// ----------------------------------------------------------------------------
+// Interface to variable usage analysis
+
+bool AnalyzeVariableUsage(FunctionLiteral* lit) {
+ if (!FLAG_usage_computation) return true;
+ return UsageComputer::Traverse(lit);
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_USAGE_ANALYSER_H_
+#define V8_USAGE_ANALYSER_H_
+
+namespace v8 { namespace internal {
+
+// Compute usage counts for all variables.
+// Used for variable allocation.
+bool AnalyzeVariableUsage(FunctionLiteral* lit);
+
+} } // namespace v8::internal
+
+#endif // V8_USAGE_ANALYSER_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "platform.h"
+
+#include "sys/stat.h"
+
+namespace v8 { namespace internal {
+
+
+int32_t NextPowerOf2(uint32_t x) {
+ x = x - 1;
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >> 16);
+ return x + 1;
+}
+
+
+byte* EncodeInt(byte* p, int x) {
+ while (x < -64 || x >= 64) {
+ *p++ = static_cast<byte>(x & 127);
+ x = ArithmeticShiftRight(x, 7);
+ }
+ // -64 <= x && x < 64
+ *p++ = static_cast<byte>(x + 192);
+ return p;
+}
+
+
+byte* DecodeInt(byte* p, int* x) {
+ int r = 0;
+ unsigned int s = 0;
+ byte b = *p++;
+ while (b < 128) {
+ r |= static_cast<int>(b) << s;
+ s += 7;
+ b = *p++;
+ }
+ // b >= 128
+ *x = r | ((static_cast<int>(b) - 192) << s);
+ return p;
+}
+
+
+byte* EncodeUnsignedIntBackward(byte* p, unsigned int x) {
+ while (x >= 128) {
+ *--p = static_cast<byte>(x & 127);
+ x = x >> 7;
+ }
+ // x < 128
+ *--p = static_cast<byte>(x + 128);
+ return p;
+}
+
+
+void PrintF(const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
+
+void Flush() {
+ fflush(stdout);
+}
+
+
+char* ReadLine(const char* prompt) {
+ char* result = NULL;
+ char line_buf[256];
+ int offset = 0;
+ bool keep_going = true;
+ fprintf(stdout, prompt);
+ fflush(stdout);
+ while (keep_going) {
+ if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+ // fgets got an error. Just give up.
+ if (result != NULL) {
+ DeleteArray(result);
+ }
+ return NULL;
+ }
+ int len = strlen(line_buf);
+ if (len > 1 &&
+ line_buf[len - 2] == '\\' &&
+ line_buf[len - 1] == '\n') {
+ // When we read a line that ends with a "\" we remove the escape and
+ // append the remainder.
+ line_buf[len - 2] = '\n';
+ line_buf[len - 1] = 0;
+ len -= 1;
+ } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keep_going = false;
+ }
+ if (result == NULL) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result = NewArray<char>(len + 1);
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = NewArray<char>(new_len);
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result, offset * kCharSize);
+ DeleteArray(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ memcpy(result + offset, line_buf, len * kCharSize);
+ offset += len;
+ }
+ ASSERT(result != NULL);
+ result[offset] = '\0';
+ return result;
+}
+
+
+char* ReadCharsFromFile(const char* filename,
+ int* size,
+ int extra_space,
+ bool verbose) {
+ FILE* file = fopen(filename, "rb");
+ if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
+ if (verbose) {
+ OS::PrintError("Cannot read from file %s.\n", filename);
+ }
+ return NULL;
+ }
+
+ // Get the size of the file and rewind it.
+ *size = ftell(file);
+ rewind(file);
+
+ char* result = NewArray<char>(*size + extra_space);
+ for (int i = 0; i < *size;) {
+ int read = fread(&result[i], 1, *size - i, file);
+ if (read <= 0) {
+ fclose(file);
+ DeleteArray(result);
+ return NULL;
+ }
+ i += read;
+ }
+ fclose(file);
+ return result;
+}
+
+
+char* ReadChars(const char* filename, int* size, bool verbose) {
+ return ReadCharsFromFile(filename, size, 0, verbose);
+}
+
+
+Vector<const char> ReadFile(const char* filename,
+ bool* exists,
+ bool verbose) {
+ int size;
+ char* result = ReadCharsFromFile(filename, &size, 1, verbose);
+ if (!result) {
+ *exists = false;
+ return Vector<const char>::empty();
+ }
+ result[size] = '\0';
+ *exists = true;
+ return Vector<const char>(result, size);
+}
+
+
+int WriteCharsToFile(const char* str, int size, FILE* f) {
+ int total = 0;
+ while (total < size) {
+ int write = fwrite(str, 1, size - total, f);
+ if (write == 0) {
+ return total;
+ }
+ total += write;
+ str += write;
+ }
+ return total;
+}
+
+
+int WriteChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose) {
+ FILE* f = fopen(filename, "wb");
+ if (f == NULL) {
+ if (verbose) {
+ OS::PrintError("Cannot open file %s for reading.\n", filename);
+ }
+ return 0;
+ }
+ int written = WriteCharsToFile(str, size, f);
+ fclose(f);
+ return written;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UTILS_H_
+#define V8_UTILS_H_
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// General helper functions
+
+// Returns true iff x is a power of 2. Does not work for zero.
+template <typename T>
+static inline bool IsPowerOf2(T x) {
+ return (x & (x - 1)) == 0;
+}
+
+
+// Returns smallest power of 2 greater or equal to x (from Hacker's Delight).
+int32_t NextPowerOf2(uint32_t x);
+
+
+// The C++ standard leaves the semantics of '>>'
+// undefined for negative signed operands. Most
+// implementations do the right thing, though.
+static inline int ArithmeticShiftRight(int x, int s) {
+ return x >> s;
+}
+
+
+// Compute the 0-relative offset of some absolute value x of type T.
+// This allows conversion of Addresses and integral types into 0-relative
+// int offsets.
+template <typename T>
+static inline int OffsetFrom(T x) {
+ return x - static_cast<T>(0);
+}
+
+
+// Compute the absolute value of type T for some 0-relative offset x.
+// This allows conversion of 0-relative int offsets into Addresses
+// and integral types.
+template <typename T>
+static inline T AddressFrom(int x) {
+ return static_cast<T>(0) + x;
+}
+
+
+// Return the largest multiple of m which is <= x.
+template <typename T>
+static inline T RoundDown(T x, int m) {
+ ASSERT(IsPowerOf2(m));
+ return AddressFrom<T>(OffsetFrom(x) & -m);
+}
+
+
+// Return the smallest multiple of m which is >= x.
+template <typename T>
+static inline T RoundUp(T x, int m) {
+ return RoundDown(x + m - 1, m);
+}
+
+
+template <typename T>
+static inline bool IsAligned(T value, T alignment) {
+ ASSERT(IsPowerOf2(alignment));
+ return (value & (alignment - 1)) == 0;
+}
+
+
+// Returns true if (addr + offset) is aligned.
+static inline bool IsAddressAligned(Address addr, int alignment, int offset) {
+ int offs = OffsetFrom(addr + offset);
+ return IsAligned(offs, alignment);
+}
+
+
+// Returns the maximum of the two parameters.
+template <typename T>
+static T Max(T a, T b) {
+ return a < b ? b : a;
+}
+
+
+// Returns the minimum of the two parameters.
+template <typename T>
+static T Min(T a, T b) {
+ return a < b ? a : b;
+}
+
+
+// ----------------------------------------------------------------------------
+// BitField is a help template for encoding and decode bitfield with unsigned
+// content.
+template<class T, int shift, int size>
+class BitField {
+ public:
+ // Tells whether the provided value fits into the bit field.
+ static bool is_valid(T value) {
+ return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
+ }
+
+ // Returns a uint32_t mask of bit field.
+ static uint32_t mask() {
+ return (1U << (size + shift)) - (1U << shift);
+ }
+
+ // Returns a uint32_t with the bit field value encoded.
+ static uint32_t encode(T value) {
+ ASSERT(is_valid(value));
+ return static_cast<uint32_t>(value) << shift;
+ }
+
+ // Extracts the bit field from the value.
+ static T decode(uint32_t value) {
+ return static_cast<T>((value >> shift) & ((1U << (size)) - 1));
+ }
+};
+
+
+// ----------------------------------------------------------------------------
+// Support for compressed, machine-independent encoding
+// and decoding of integer values of arbitrary size.
+
+// Encoding and decoding from/to a buffer at position p;
+// the result is the position after the encoded integer.
+// Small signed integers in the range -64 <= x && x < 64
+// are encoded in 1 byte; larger values are encoded in 2
+// or more bytes. At most sizeof(int) + 1 bytes are used
+// in the worst case.
+byte* EncodeInt(byte* p, int x);
+byte* DecodeInt(byte* p, int* x);
+
+
+// Encoding and decoding from/to a buffer at position p - 1
+// moving backward; the result is the position of the last
+// byte written. These routines are useful to read/write
+// into a buffer starting at the end of the buffer.
+byte* EncodeUnsignedIntBackward(byte* p, unsigned int x);
+
+// The decoding function is inlined since its performance is
+// important to mark-sweep garbage collection.
+inline byte* DecodeUnsignedIntBackward(byte* p, unsigned int* x) {
+ byte b = *--p;
+ if (b >= 128) {
+ *x = static_cast<unsigned int>(b) - 128;
+ return p;
+ }
+ unsigned int r = static_cast<unsigned int>(b);
+ unsigned int s = 7;
+ b = *--p;
+ while (b < 128) {
+ r |= static_cast<unsigned int>(b) << s;
+ s += 7;
+ b = *--p;
+ }
+ // b >= 128
+ *x = r | ((static_cast<unsigned int>(b) - 128) << s);
+ return p;
+}
+
+
+// ----------------------------------------------------------------------------
+// I/O support.
+
+// Our version of printf(). Avoids compilation errors that we get
+// with standard printf when attempting to print pointers, etc.
+// (the errors are due to the extra compilation flags, which we
+// want elsewhere).
+void PrintF(const char* format, ...);
+
+// Our version of fflush.
+void Flush();
+
+
+// Read a line of characters after printing the prompt to stdout. The resulting
+// char* needs to be disposed off with DeleteArray by the caller.
+char* ReadLine(const char* prompt);
+
+
+// Read and return the raw chars in a file. the size of the buffer is returned
+// in size.
+// The returned buffer is not 0-terminated. It must be freed by the caller.
+char* ReadChars(const char* filename, int* size, bool verbose = true);
+
+
+// Write size chars from str to the file given by filename.
+// The file is overwritten. Returns the number of chars written.
+int WriteChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose = true);
+
+
+// Write the C code
+// const char* <varname> = "<str>";
+// const int <varname>_len = <len>;
+// to the file given by filename. Only the first len chars are written.
+int WriteAsCFile(const char* filename, const char* varname,
+ const char* str, int size, bool verbose = true);
+
+
+// ----------------------------------------------------------------------------
+// Miscellaneous
+
+// A static resource holds a static instance that can be reserved in
+// a local scope using an instance of Access. Attempts to re-reserve
+// the instance will cause an error.
+template <typename T>
+class StaticResource {
+ public:
+ StaticResource() : is_reserved_(false) {}
+
+ private:
+ template <typename S> friend class Access;
+ T instance_;
+ bool is_reserved_;
+};
+
+
+// Locally scoped access to a static resource.
+template <typename T>
+class Access {
+ public:
+ explicit Access(StaticResource<T>* resource)
+ : resource_(resource)
+ , instance_(&resource->instance_) {
+ ASSERT(!resource->is_reserved_);
+ resource->is_reserved_ = true;
+ }
+
+ ~Access() {
+ resource_->is_reserved_ = false;
+ resource_ = NULL;
+ instance_ = NULL;
+ }
+
+ T* value() { return instance_; }
+ T* operator -> () { return instance_; }
+
+ private:
+ StaticResource<T>* resource_;
+ T* instance_;
+};
+
+
+template <typename T>
+class Vector {
+ public:
+ Vector(T* data, int length) : start_(data), length_(length) {
+ ASSERT(length == 0 || (length > 0 && data != NULL));
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns whether or not the vector is empty.
+ bool is_empty() const { return length_ == 0; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ // Returns a clone of this vector with a new backing store.
+ Vector<T> Clone() const {
+ T* result = NewArray<T>(length_);
+ for (int i = 0; i < length_; i++) result[i] = start_[i];
+ return Vector<T>(result, length_);
+ }
+
+ // Releases the array underlying this vector. Once disposed the
+ // vector is empty.
+ void Dispose() {
+ DeleteArray(start_);
+ start_ = NULL;
+ length_ = 0;
+ }
+
+ // Factory method for creating empty vectors.
+ static Vector<T> empty() { return Vector<T>(NULL, 0); }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+
+inline Vector<const char> CStrVector(const char* data) {
+ return Vector<const char>(data, strlen(data));
+}
+
+inline Vector<char> MutableCStrVector(char* data) {
+ return Vector<char>(data, strlen(data));
+}
+
+template <typename T>
+inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
+ int length) {
+ return Vector< Handle<Object> >(
+ reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
+}
+
+
+// Simple support to read a file into a 0-terminated C-string.
+// The returned buffer must be freed by the caller.
+// On return, *exits tells whether the file exisited.
+Vector<const char> ReadFile(const char* filename,
+ bool* exists,
+ bool verbose = true);
+
+
+// Simple wrapper that allows an ExternalString to refer to a
+// Vector<const char>. Doesn't assume ownership of the data.
+class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
+ public:
+ explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
+
+ virtual const char* data() const { return data_.start(); }
+
+ virtual size_t length() const { return data_.length(); }
+
+ private:
+ Vector<const char> data_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_UTILS_H_
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "v8-counters.h"
+
+namespace v8 { namespace internal {
+
+#define SR(name, caption) StatsRate Counters::name(L###caption, k_##name);
+ STATS_RATE_LIST(SR)
+#undef SR
+
+#define SC(name, caption) StatsCounter Counters::name(L###caption, k_##name);
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+StatsCounter Counters::state_counters[state_tag_count] = {
+#define COUNTER_NAME(name) StatsCounter(L"V8.State" L###name, k_##name),
+ STATE_TAG_LIST(COUNTER_NAME)
+#undef COUNTER_NAME
+};
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_COUNTERS_H_
+#define V8_V8_COUNTERS_H_
+
+#include "counters.h"
+
+namespace v8 { namespace internal {
+
+#define STATS_RATE_LIST(SR) \
+ SR(gc_compactor, V8.GCCompactor) /* GC Compactor time */ \
+ SR(gc_scavenger, V8.GCScavenger) /* GC Scavenger time */ \
+ SR(compile, V8.Compile) /* Compile time*/ \
+ SR(compile_eval, V8.CompileEval) /* Eval compile time */ \
+ SR(compile_lazy, V8.CompileLazy) /* Lazy compile time */ \
+ SR(parse, V8.Parse) /* Parse time */ \
+ SR(parse_lazy, V8.ParseLazy) /* Lazy parse time */ \
+ SR(pre_parse, V8.PreParse) /* Pre-parse time */
+
+// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
+// Intellisense to crash. It was broken into two macros (each of length 40
+// lines) rather than one macro (of length about 80 lines) to work around
+// this problem. Please avoid using recursive macros of this length when
+// possible.
+#define STATS_COUNTER_LIST_1(SC) \
+ /* Global Handle Count*/ \
+ SC(global_handles, V8.GlobalHandles) \
+ /* Global Object Count */ \
+ SC(global_objects, V8.GlobalObjects) \
+ /* Mallocs from PCRE */ \
+ SC(pcre_mallocs, V8.PcreMallocCount) \
+ /* OS Memory allocated */ \
+ SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
+ SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
+ SC(symbol_table_capacity, V8.SymbolTableCapacity) \
+ SC(number_of_symbols, V8.NumberOfSymbols) \
+ /* Current amount of memory in external string buffers. */ \
+ SC(total_external_string_memory, V8.TotalExternalStringMemory) \
+ SC(script_wrappers, V8.ScriptWrappers) \
+ SC(call_initialize_stubs, V8.CallInitializeStubs) \
+ SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
+ SC(call_normal_stubs, V8.CallNormalStubs) \
+ SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
+ SC(arguments_adaptors, V8.ArgumentsAdaptors) \
+ /* Amount of evaled source code. */ \
+ SC(total_eval_size, V8.TotalEvalSize) \
+ /* Amount of loaded source code. */ \
+ SC(total_load_size, V8.TotalLoadSize) \
+ /* Amount of parsed source code. */ \
+ SC(total_parse_size, V8.TotalParseSize) \
+ /* Amount of source code skipped over using preparsing. */ \
+ SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Amount of compiled source code. */ \
+ SC(total_compile_size, V8.TotalCompileSize)
+
+
+#define STATS_COUNTER_LIST_2(SC) \
+ /* Number of code stubs. */ \
+ SC(code_stubs, V8.CodeStubs) \
+ /* Amount of stub code. */ \
+ SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
+ /* Amount of (JS) compiled code. */ \
+ SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
+ SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
+ SC(gc_compactor_caused_by_promoted_data, \
+ V8.GCCompactorCausedByPromotedData) \
+ SC(gc_compactor_caused_by_oldspace_exhaustion, \
+ V8.GCCompactorCausedByOldspaceExhaustion) \
+ SC(gc_compactor_caused_by_weak_handles, \
+ V8.GCCompactorCausedByWeakHandles) \
+ /* How is the generic keyed-load stub used? */ \
+ SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
+ SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
+ SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
+ /* Count how much the monomorphic keyed-load stubs are hit. */ \
+ SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
+ SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
+ SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
+ SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
+ SC(keyed_load_field, V8.KeyedLoadField) \
+ SC(keyed_load_callback, V8.KeyedLoadCallback) \
+ SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
+ SC(keyed_store_field, V8.KeyedStoreField) \
+ SC(for_in, V8.ForIn) \
+ SC(enum_cache_hits, V8.EnumCacheHits) \
+ SC(enum_cache_misses, V8.EnumCacheMisses) \
+ SC(reloc_info_count, V8.RelocInfoCount) \
+ SC(reloc_info_size, V8.RelocInfoSize)
+
+
+// This file contains all the v8 counters that are in use.
+class Counters : AllStatic {
+ public:
+#define SR(name, caption) static StatsRate name;
+ STATS_RATE_LIST(SR)
+#undef SR
+
+#define SC(name, caption) static StatsCounter name;
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+ enum Id {
+#define RATE_ID(name, caption) k_##name,
+ STATS_RATE_LIST(RATE_ID)
+#undef RATE_ID
+#define COUNTER_ID(name, caption) k_##name,
+ STATS_COUNTER_LIST_1(COUNTER_ID)
+ STATS_COUNTER_LIST_2(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) k_##name,
+ STATE_TAG_LIST(COUNTER_ID)
+#undef COUNTER_ID
+ stats_counter_count
+ };
+
+ // Sliding state window counters.
+ static StatsCounter state_counters[];
+};
+
+} } // namespace v8::internal
+
+#endif // V8_COUNTERS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "debug.h"
+#include "serialize.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(preemption, false,
+ "activate a 100ms timer that switches between V8 threads");
+
+bool V8::has_been_setup_ = false;
+bool V8::has_been_disposed_ = false;
+
+bool V8::Initialize(Deserializer *des) {
+ bool create_heap_objects = des == NULL;
+ if (HasBeenDisposed()) return false;
+ if (HasBeenSetup()) return true;
+ has_been_setup_ = true;
+#ifdef DEBUG
+ // The initialization process does not handle memory exhaustion.
+ DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+ // Enable logging before setting up the heap
+ Logger::Setup();
+ if (des) des->GetLog();
+
+ // Setup the CPU support.
+ CPU::Setup();
+
+ // Setup the platform OS support.
+ OS::Setup();
+
+ // Setup the object heap
+ ASSERT(!Heap::HasBeenSetup());
+ if (!Heap::Setup(create_heap_objects)) {
+ has_been_setup_ = false;
+ return false;
+ }
+
+ // Initialize other runtime facilities
+ Bootstrapper::Initialize(create_heap_objects);
+ Builtins::Setup(create_heap_objects);
+ Top::Initialize();
+
+ if (FLAG_preemption) {
+ v8::Locker locker;
+ v8::Locker::StartPreemption(100);
+ }
+
+ Debug::Setup(create_heap_objects);
+ StubCache::Initialize(create_heap_objects);
+
+ // If we are deserializing, read the state into the now-empty heap.
+ if (des != NULL) {
+ des->Deserialize();
+ StubCache::Clear();
+ }
+
+ return true;
+}
+
+
+void V8::TearDown() {
+ if (HasBeenDisposed()) return;
+ if (!HasBeenSetup()) return;
+
+ if (FLAG_preemption) {
+ v8::Locker locker;
+ v8::Locker::StopPreemption();
+ }
+
+ Builtins::TearDown();
+ Bootstrapper::TearDown();
+
+ Heap::TearDown();
+ Logger::TearDown();
+
+ has_been_setup_ = false;
+ has_been_disposed_ = true;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google, Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Top include for all V8 .cc files.
+//
+
+#ifndef V8_V8_H_
+#define V8_V8_H_
+
+// Google3 uses NDEBUG.
+#if defined(GOOGLE3) && !defined(NDEBUG)
+#define DEBUG
+#endif
+
+// V8 only uses DEBUG, but included external files
+// may use NDEBUG - make sure they are consistent.
+#if defined(DEBUG) && defined(NDEBUG)
+#error both DEBUG and NDEBUG are set
+#endif
+
+// Basic includes
+#include "../public/v8.h"
+#include "globals.h"
+#include "checks.h"
+#include "allocation.h"
+#include "utils.h"
+#include "flags-inl.h"
+
+// Objects & heap
+#include "objects.h"
+#include "spaces.h"
+#include "heap.h"
+#include "objects-inl.h"
+#include "spaces-inl.h"
+#include "heap-inl.h"
+#include "messages.h"
+
+namespace v8 { namespace internal {
+
+class V8 : public AllStatic {
+ public:
+ // Global actions.
+
+ // If Initialize is called with des == NULL, the
+ // initial state is created from scratch. If a non-null Deserializer
+ // is given, the initial state is created by reading the
+ // deserialized data into an empty heap.
+ static bool Initialize(Deserializer* des);
+ static void TearDown();
+ static bool HasBeenSetup() { return has_been_setup_; }
+ static bool HasBeenDisposed() { return has_been_disposed_; }
+
+ // Report process out of memory. Implementation found in api.cc.
+ static void FatalProcessOutOfMemory(const char* location);
+ private:
+ static bool has_been_setup_;
+ static bool has_been_disposed_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_V8_H_
--- /dev/null
+// Copyright 2006-2007 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// const $Object = global.Object;
+// const $Boolean = global.Boolean;
+// const $Number = global.Number;
+// const $Function = global.Function;
+// const $Array = global.Array;
+// const $NaN = %NumberNaN(1);
+
+
+// ECMA 262 - 15.1.1.1.
+%AddProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
+
+
+// ECMA-262 - 15.1.1.2.
+%AddProperty(global, "Infinity", %NumberPositiveInfinity(1), DONT_ENUM | DONT_DELETE);
+
+
+// ECMA-262 - 15.1.1.3.
+%AddProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
+
+
+// ECMA 262 - 15.1.4
+function $isNaN(number) {
+ return %NumberIsNaN(ToNumber(number));
+};
+%AddProperty(global, "isNaN", $isNaN, DONT_ENUM);
+
+
+// ECMA 262 - 15.1.5
+function $isFinite(number) {
+ return %NumberIsFinite(ToNumber(number));
+};
+%AddProperty(global, "isFinite", $isFinite, DONT_ENUM);
+
+
+// ECMA-262 - 15.1.2.2
+%AddProperty(global, "parseInt", function(string, radix) {
+ if (radix === void 0) {
+ radix = 0;
+ // Some people use parseInt instead of Math.floor. This
+ // optimization makes parseInt on a Smi 12 times faster (60ns
+ // vs 800ns). The following optimization makes parseInt on a
+ // non-Smi number 9 times faster (230ns vs 2070ns). Together
+ // they make parseInt on a string 1.4% slower (274ns vs 270ns).
+ if (%_IsSmi(string)) return string;
+ if (IS_NUMBER(string)) {
+ if (string >= 0.01 && string < 1e9)
+ return $Math_floor(string);
+ if (string <= -0.01 && string > -1e9)
+ return - $Math_floor(-string);
+ }
+ } else {
+ radix = TO_INT32(radix);
+ if (!(radix == 0 || (2 <= radix && radix <= 36)))
+ return $NaN;
+ }
+ return %StringParseInt(ToString(string), radix);
+}, DONT_ENUM);
+
+
+// ECMA-262 - 15.1.2.3
+%AddProperty(global, "parseFloat", function(string) {
+ return %StringParseFloat(ToString(string));
+}, DONT_ENUM);
+
+
+// ----------------------------------------------------------------------------
+// Boolean (first part of definition)
+
+
+%SetCode($Boolean, function(x) {
+ if (%IsConstructCall(this)) {
+ %_SetValueOf(this, ToBoolean(x));
+ } else {
+ return ToBoolean(x);
+ }
+});
+
+%FunctionSetPrototype($Boolean, new $Boolean(false));
+
+%AddProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
+
+// ----------------------------------------------------------------------------
+// Object
+
+$Object.prototype.constructor = $Object;
+
+%AddProperty($Object.prototype, "toString", function() {
+ var c = %ClassOf(this);
+ // Hide Arguments from the outside.
+ if (c === 'Arguments') c = 'Object';
+ return "[object " + c + "]";
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.2.4.3, page 84.
+%AddProperty($Object.prototype, "toLocaleString", function() {
+ return this.toString();
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.2.4.4, page 85.
+%AddProperty($Object.prototype, "valueOf", function() {
+ return this;
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.2.4.5, page 85.
+%AddProperty($Object.prototype, "hasOwnProperty", function(V) {
+ return %HasLocalProperty(ToObject(this), ToString(V));
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.2.4.6, page 85.
+%AddProperty($Object.prototype, "isPrototypeOf", function(V) {
+ if (!IS_OBJECT(V) && !IS_FUNCTION(V)) return false;
+ return %IsInPrototypeChain(this, V);
+}, DONT_ENUM);
+
+
+// ECMA-262, section 15.2.4.6, page 85.
+%AddProperty($Object.prototype, "propertyIsEnumerable", function(V) {
+ if (this == null) return false;
+ if (!IS_OBJECT(this) && !IS_FUNCTION(this)) return false;
+ return %IsPropertyEnumerable(this, ToString(V));
+}, DONT_ENUM);
+
+
+// Extensions for providing property getters and setters.
+%AddProperty($Object.prototype, "__defineGetter__", function(name, fun) {
+ if (this == null) throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
+ if (!IS_FUNCTION(fun)) throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
+ return %DefineAccessor(ToObject(this), ToString(name), GETTER, fun);
+}, DONT_ENUM);
+
+
+
+%AddProperty($Object.prototype, "__lookupGetter__", function(name) {
+ if (this == null) throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
+ return %LookupAccessor(ToObject(this), ToString(name), GETTER);
+}, DONT_ENUM);
+
+
+%AddProperty($Object.prototype, "__defineSetter__", function(name, fun) {
+ if (this == null) throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
+ if (!IS_FUNCTION(fun)) throw new $TypeError('Object.prototype.__defineSetter__: Expecting function');
+ return %DefineAccessor(ToObject(this), ToString(name), SETTER, fun);
+}, DONT_ENUM);
+
+
+%AddProperty($Object.prototype, "__lookupSetter__", function(name) {
+ if (this == null) throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
+ return %LookupAccessor(ToObject(this), ToString(name), SETTER);
+}, DONT_ENUM);
+
+
+%SetCode($Object, function(x) {
+ if (%IsConstructCall(this)) {
+ if (x == null) return this;
+ return ToObject(x);
+ } else {
+ if (x == null) return { };
+ return ToObject(x);
+ }
+});
+
+
+// ----------------------------------------------------------------------------
+// Global stuff...
+
+%AddProperty(global, "eval", function(x) {
+ if (!IS_STRING(x)) return x;
+
+ var f = %CompileString(x, true);
+ if (!IS_FUNCTION(f)) return f;
+
+ return f.call(%EvalReceiver(this));
+}, DONT_ENUM);
+
+
+// execScript for IE compatibility.
+%AddProperty(global, "execScript", function(expr, lang) {
+ // NOTE: We don't care about the character casing.
+ if (!lang || /javascript/i.test(lang)) {
+ %CompileString(ToString(expr), false)();
+ }
+ return null;
+}, DONT_ENUM);
+
+
+// ----------------------------------------------------------------------------
+// Boolean
+
+%AddProperty($Boolean.prototype, "toString", function() {
+ // NOTE: Both Boolean objects and values can enter here as
+ // 'this'. This is not as dictated by ECMA-262.
+ if (!IS_BOOLEAN(this) && %ClassOf(this) !== 'Boolean')
+ throw new $TypeError('Boolean.prototype.toString is not generic');
+ return ToString(%_ValueOf(this));
+}, DONT_ENUM);
+
+
+%AddProperty($Boolean.prototype, "valueOf", function() {
+ // NOTE: Both Boolean objects and values can enter here as
+ // 'this'. This is not as dictated by ECMA-262.
+ if (!IS_BOOLEAN(this) && %ClassOf(this) !== 'Boolean')
+ throw new $TypeError('Boolean.prototype.valueOf is not generic');
+ return %_ValueOf(this);
+}, DONT_ENUM);
+
+
+// ----------------------------------------------------------------------------
+// Number
+
+// Set the Number function and constructor.
+%SetCode($Number, function(x) {
+ var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
+ if (%IsConstructCall(this)) {
+ %_SetValueOf(this, value);
+ } else {
+ return value;
+ }
+});
+
+%FunctionSetPrototype($Number, new $Number(0));
+
+%AddProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
+
+// ECMA-262 section 15.7.3.1.
+%AddProperty($Number, "MAX_VALUE", %NumberMaxValue(1), DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262 section 15.7.3.2.
+%AddProperty($Number, "MIN_VALUE", %NumberMinValue(1), DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262 section 15.7.3.3.
+%AddProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262 section 15.7.3.4.
+%AddProperty($Number, "NEGATIVE_INFINITY", %NumberNegativeInfinity(1), DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262 section 15.7.3.5.
+%AddProperty($Number, "POSITIVE_INFINITY", %NumberPositiveInfinity(1), DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+// ECMA-262 section 15.7.4.2.
+%AddProperty($Number.prototype, "toString", function(radix) {
+ // NOTE: Both Number objects and values can enter here as
+ // 'this'. This is not as dictated by ECMA-262.
+ var number = this;
+ if (!IS_NUMBER(this)) {
+ if (%ClassOf(this) !== 'Number')
+ throw new $TypeError('Number.prototype.toString is not generic');
+ // Get the value of this number in case it's an object.
+ number = %_ValueOf(this);
+ }
+ // Fast case: Convert number in radix 10.
+ if (IS_UNDEFINED(radix) || radix === 10) {
+ return ToString(number);
+ }
+
+ // Convert the radix to an integer and check the range.
+ radix = TO_INTEGER(radix);
+ if (radix < 2 || radix > 36) {
+ throw new $RangeError('toString() radix argument must be between 2 and 36');
+ }
+ // Convert the number to a string in the given radix.
+ return %NumberToRadixString(number, radix);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.7.4.3
+%AddProperty($Number.prototype, "toLocaleString", function() {
+ return this.toString();
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.7.4.4
+%AddProperty($Number.prototype, "valueOf", function() {
+ // NOTE: Both Number objects and values can enter here as
+ // 'this'. This is not as dictated by ECMA-262.
+ if (!IS_NUMBER(this) && %ClassOf(this) !== 'Number')
+ throw new $TypeError('Number.prototype.valueOf is not generic');
+ return %_ValueOf(this);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.7.4.5
+%AddProperty($Number.prototype, "toFixed", function(fractionDigits) {
+ var f = TO_INTEGER(fractionDigits);
+ if (f < 0 || f > 20) {
+ throw new $RangeError("toFixed() digits argument must be between 0 and 20");
+ }
+ var x = ToNumber(this);
+ return %NumberToFixed(x, f);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.7.4.6
+%AddProperty($Number.prototype, "toExponential", function(fractionDigits) {
+ var f = -1;
+ if (!IS_UNDEFINED(fractionDigits)) {
+ f = TO_INTEGER(fractionDigits);
+ if (f < 0 || f > 20) {
+ throw new $RangeError("toExponential() argument must be between 0 and 20");
+ }
+ }
+ var x = ToNumber(this);
+ return %NumberToExponential(x, f);
+}, DONT_ENUM);
+
+
+// ECMA-262 section 15.7.4.7
+%AddProperty($Number.prototype, "toPrecision", function(precision) {
+ if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
+ var p = TO_INTEGER(precision);
+ if (p < 1 || p > 21) {
+ throw new $RangeError("toPrecision() argument must be between 1 and 21");
+ }
+ var x = ToNumber(this);
+ return %NumberToPrecision(x, p);
+}, DONT_ENUM);
+
+
+// ----------------------------------------------------------------------------
+// Function
+
+$Function.prototype.constructor = $Function;
+
+
+function FunctionSourceString(func) {
+ // NOTE: Both Function objects and values can enter here as
+ // 'func'. This is not as dictated by ECMA-262.
+ if (!IS_FUNCTION(func) && %ClassOf(func) != 'Function')
+ throw new $TypeError('Function.prototype.toString is not generic');
+
+ var source = %FunctionGetSourceCode(func);
+ if (!IS_STRING(source)) {
+ var name = %FunctionGetName(func);
+ if (name) {
+ // Mimic what KJS does.
+ return 'function ' + name + '() { [native code] }';
+ } else {
+ return 'function () { [native code] }';
+ }
+ }
+
+ // Censor occurrences of internal calls. We do that for all
+ // functions and don't cache under the assumption that people rarly
+ // convert functions to strings. Note that we (apparently) can't
+ // use regular expression literals in natives files.
+ var regexp = ORIGINAL_REGEXP("%(\\w+\\()", "gm");
+ if (source.match(regexp)) source = source.replace(regexp, "$1");
+ var name = %FunctionGetName(func);
+ return 'function ' + name + source;
+};
+
+
+%AddProperty($Function.prototype, "toString", function() {
+ return FunctionSourceString(this);
+}, DONT_ENUM);
+
+
+function NewFunction(arg1) { // length == 1
+ var n = %_ArgumentsLength();
+ var p = '';
+ if (n > 1) {
+ p = new $Array(n - 1);
+ // Explicitly convert all parameters to strings.
+ // Array.prototype.join replaces null with empty strings which is
+ // not appropriate.
+ for (var i = 0; i < n - 1; i++) p[i] = ToString(%_Arguments(i));
+ p = p.join(',');
+ // If the formal parameters string include ) - an illegal
+ // character - it may make the combined function expression
+ // compile. We avoid this problem by checking for this early on.
+ if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
+ }
+ var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
+ var source = '(function anonymous(' + p + ') { ' + body + ' })';
+
+ // The call to SetNewFunctionAttributes will ensure the prototype
+ // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
+ return %SetNewFunctionAttributes(%CompileString(source, false)());
+};
+
+%SetCode($Function, NewFunction);
+
+
+// NOTE: The following functions (call and apply) are only used in this
+// form on the ARM platform. On IA-32 they are handled through specialized
+// builtins; see builtins-ia32.cc.
+
+%AddProperty($Function.prototype, "call", function(receiver) {
+ // Make sure the receiver of this call is a function. If it isn't
+ // we "fake" a call of it (without the right arguments) to force
+ // an exception to be thrown.
+ if (!IS_FUNCTION(this)) this();
+
+ // If receiver is null or undefined set the receiver to the global
+ // object. If the receiver isn't an object, we convert the
+ // receiver to an object.
+ if (receiver == null) receiver = global;
+ else if (!IS_OBJECT(receiver)) receiver = ToObject(receiver);
+
+ %_SetThisFunction(this);
+ %_SetThis(receiver);
+
+ var len = %_GetArgumentsLength(1);
+ return %_ShiftDownAndTailCall(len ? len - 1 : 0);
+}, DONT_ENUM);
+
+
+// This implementation of Function.prototype.apply replaces the stack frame
+// of the apply call with the new stack frame containing the arguments from
+// the args array.
+%AddProperty($Function.prototype, "apply", function(receiver, args) {
+ var length = (args == null) ? 0 : ToUint32(args.length);
+
+ // We can handle any number of apply arguments if the stack is
+ // big enough, but sanity check the value to avoid overflow when
+ // multiplying with pointer size.
+ if (length > 0x800000) {
+ throw new $RangeError(
+ "Function.prototype.apply cannot support " + length + " arguments.");
+ }
+
+ if (!IS_FUNCTION(this)) {
+ throw new $TypeError('Function.prototype.apply was called on ' + this.toString() + ', which is a ' + (typeof this) + ' and not a function');
+ }
+
+ // Make sure args has the right type.
+ if (args != null && %ClassOf(args) !== 'Array' && %ClassOf(args) !== 'Arguments') {
+ throw new $TypeError('Function.prototype.apply: args has wrong type');
+ }
+
+ // If receiver is null or undefined set the receiver to the global
+ // object. If the receiver isn't an object, we convert the
+ // receiver to an object.
+ if (receiver == null) receiver = global;
+ else if (!IS_OBJECT(receiver)) receiver = ToObject(receiver);
+
+ %_SetThisFunction(this);
+ %_SetThis(receiver);
+
+ var arguments_length = %_GetArgumentsLength(2);
+
+ // This method has 2 formal arguments so if less are passed, then space has
+ // been made.
+ if (arguments_length < 2)
+ arguments_length = 2;
+
+ // Move some stuff to locals so they don't get overwritten when we start
+ // expanding the args array.
+ var saved_args = args;
+
+ if (arguments_length > length) {
+ // We have too many arguments - we need to squash the frame.
+ %_SquashFrame(arguments_length, length);
+ } else if (arguments_length != length) {
+ // We have too few spaces for arguments - we need to expand the frame.
+ if (!%_ExpandFrame(arguments_length, length)) {
+ throw new $RangeError(
+ "Function.prototype.apply cannot find stack space for " + length + " arguments.");
+ }
+ // GC doesn't like junk in the arguments!
+ for (var i = 0; i < length; i++) {
+ %_SetArgument(i, 0, length);
+ }
+ }
+
+ // Update-number-of-arguments field to keep things looking consistent for
+ // stack traces, and uses of arguments or arguments.length.
+ %_SetArgumentsLength(length);
+
+ // NOTE: For the fast case this should be implemented in assembler,
+ // which would allow us to omit bounds and class checks galore. The
+ // assembler version could fall back to this implementation if
+ // tricky stuff is found, like arrays implemented as dictionaries or
+ // holes in arrays.
+ for (var i = 0; i < length; i++) {
+ %_SetArgument(i, saved_args[i], length);
+ }
+
+ // Replaces the current frame with the new call. This has the added effect
+ // of removing apply from the stack trace entirely, which matches the
+ // behaviour of Firefox.
+ return %_TailCallWithArguments(length);
+}, DONT_ENUM);
+
+
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "debug.h"
+#include "execution.h"
+#include "v8threads.h"
+
+namespace v8 {
+
+static internal::Thread::LocalStorageKey thread_state_key =
+ internal::Thread::CreateThreadLocalKey();
+
+// Constructor for the Locker object. Once the Locker is constructed the
+// current thread will be guaranteed to have the big V8 lock.
+Locker::Locker() : has_lock_(false), top_level_(true) {
+ // Get the big lock if necessary.
+ if (!internal::ThreadManager::IsLockedByCurrentThread()) {
+ internal::ThreadManager::Lock();
+ has_lock_ = true;
+ // This may be a locker within an unlocker in which case we have to
+ // get the saved state for this thread and restore it.
+ if (internal::ThreadManager::RestoreThread()) {
+ top_level_ = false;
+ }
+ }
+ ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+}
+
+
+#ifdef DEBUG
+void Locker::AssertIsLocked() {
+ ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+}
+#endif
+
+
+Locker::~Locker() {
+ ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+ if (has_lock_) {
+ if (!top_level_) {
+ internal::ThreadManager::ArchiveThread();
+ }
+ internal::ThreadManager::Unlock();
+ }
+}
+
+
+Unlocker::Unlocker() {
+ ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+ internal::ThreadManager::ArchiveThread();
+ internal::ThreadManager::Unlock();
+}
+
+
+Unlocker::~Unlocker() {
+ ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
+ internal::ThreadManager::Lock();
+ internal::ThreadManager::RestoreThread();
+}
+
+
+void Locker::StartPreemption(int every_n_ms) {
+ v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
+}
+
+
+void Locker::StopPreemption() {
+ v8::internal::ContextSwitcher::StopPreemption();
+}
+
+
+namespace internal {
+
+
+bool ThreadManager::RestoreThread() {
+ // First check whether the current thread has been 'lazily archived', ie
+ // not archived at all. If that is the case we put the state storage we
+ // had prepared back in the free list, since we didn't need it after all.
+ if (lazily_archived_thread_.IsSelf()) {
+ lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+ ASSERT(Thread::GetThreadLocal(thread_state_key) ==
+ lazily_archived_thread_state_);
+ lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
+ lazily_archived_thread_state_ = NULL;
+ Thread::SetThreadLocal(thread_state_key, NULL);
+ return true;
+ }
+ // If there is another thread that was lazily archived then we have to really
+ // archive it now.
+ if (lazily_archived_thread_.IsValid()) {
+ EagerlyArchiveThread();
+ }
+ ThreadState* state =
+ reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
+ if (state == NULL) {
+ return false;
+ }
+ char* from = state->data();
+ from = HandleScopeImplementer::RestoreThread(from);
+ from = Top::RestoreThread(from);
+ from = Debug::RestoreDebug(from);
+ from = StackGuard::RestoreStackGuard(from);
+ Thread::SetThreadLocal(thread_state_key, NULL);
+ state->Unlink();
+ state->LinkInto(ThreadState::FREE_LIST);
+ return true;
+}
+
+
+void ThreadManager::Lock() {
+ mutex_->Lock();
+ mutex_owner_.Initialize(ThreadHandle::SELF);
+ ASSERT(IsLockedByCurrentThread());
+}
+
+
+void ThreadManager::Unlock() {
+ mutex_owner_.Initialize(ThreadHandle::INVALID);
+ mutex_->Unlock();
+}
+
+
+static int ArchiveSpacePerThread() {
+ return HandleScopeImplementer::ArchiveSpacePerThread() +
+ Top::ArchiveSpacePerThread() +
+ Debug::ArchiveSpacePerThread() +
+ StackGuard::ArchiveSpacePerThread();
+}
+
+
+ThreadState* ThreadState::free_anchor_ = new ThreadState();
+ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
+
+
+ThreadState::ThreadState() : next_(this), previous_(this) {
+}
+
+
+void ThreadState::AllocateSpace() {
+ data_ = NewArray<char>(ArchiveSpacePerThread());
+}
+
+
+void ThreadState::Unlink() {
+ next_->previous_ = previous_;
+ previous_->next_ = next_;
+}
+
+
+void ThreadState::LinkInto(List list) {
+ ThreadState* flying_anchor =
+ list == FREE_LIST ? free_anchor_ : in_use_anchor_;
+ next_ = flying_anchor->next_;
+ previous_ = flying_anchor;
+ flying_anchor->next_ = this;
+ next_->previous_ = this;
+}
+
+
+ThreadState* ThreadState::GetFree() {
+ ThreadState* gotten = free_anchor_->next_;
+ if (gotten == free_anchor_) {
+ ThreadState* new_thread_state = new ThreadState();
+ new_thread_state->AllocateSpace();
+ return new_thread_state;
+ }
+ return gotten;
+}
+
+
+// Gets the first in the list of archived threads.
+ThreadState* ThreadState::FirstInUse() {
+ return in_use_anchor_->Next();
+}
+
+
+ThreadState* ThreadState::Next() {
+ if (next_ == in_use_anchor_) return NULL;
+ return next_;
+}
+
+
+Mutex* ThreadManager::mutex_ = OS::CreateMutex();
+ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
+ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
+ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
+
+
+void ThreadManager::ArchiveThread() {
+ ASSERT(!lazily_archived_thread_.IsValid());
+ ASSERT(Thread::GetThreadLocal(thread_state_key) == NULL);
+ ThreadState* state = ThreadState::GetFree();
+ state->Unlink();
+ Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
+ lazily_archived_thread_.Initialize(ThreadHandle::SELF);
+ lazily_archived_thread_state_ = state;
+}
+
+
+void ThreadManager::EagerlyArchiveThread() {
+ ThreadState* state = lazily_archived_thread_state_;
+ state->LinkInto(ThreadState::IN_USE_LIST);
+ char* to = state->data();
+ to = HandleScopeImplementer::ArchiveThread(to);
+ to = Top::ArchiveThread(to);
+ to = Debug::ArchiveDebug(to);
+ to = StackGuard::ArchiveStackGuard(to);
+ lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+ lazily_archived_thread_state_ = NULL;
+}
+
+
+void ThreadManager::Iterate(ObjectVisitor* v) {
+ // Expecting no threads during serialization/deserialization
+ for (ThreadState* state = ThreadState::FirstInUse();
+ state != NULL;
+ state = state->Next()) {
+ char* data = state->data();
+ data = HandleScopeImplementer::Iterate(v, data);
+ data = Top::Iterate(v, data);
+ }
+}
+
+
+void ThreadManager::MarkCompactPrologue() {
+ for (ThreadState* state = ThreadState::FirstInUse();
+ state != NULL;
+ state = state->Next()) {
+ char* data = state->data();
+ data += HandleScopeImplementer::ArchiveSpacePerThread();
+ Top::MarkCompactPrologue(data);
+ }
+}
+
+
+void ThreadManager::MarkCompactEpilogue() {
+ for (ThreadState* state = ThreadState::FirstInUse();
+ state != NULL;
+ state = state->Next()) {
+ char* data = state->data();
+ data += HandleScopeImplementer::ArchiveSpacePerThread();
+ Top::MarkCompactEpilogue(data);
+ }
+}
+
+
+ContextSwitcher::ContextSwitcher(int every_n_ms)
+ : preemption_semaphore_(OS::CreateSemaphore(0)),
+ keep_going_(true),
+ sleep_ms_(every_n_ms) {
+}
+
+
+static v8::internal::ContextSwitcher* switcher;
+
+
+void ContextSwitcher::StartPreemption(int every_n_ms) {
+ Locker::AssertIsLocked();
+ if (switcher == NULL) {
+ switcher = new ContextSwitcher(every_n_ms);
+ switcher->Start();
+ } else {
+ switcher->sleep_ms_ = every_n_ms;
+ }
+}
+
+
+void ContextSwitcher::StopPreemption() {
+ Locker::AssertIsLocked();
+ if (switcher != NULL) {
+ switcher->Stop();
+ delete(switcher);
+ switcher = NULL;
+ }
+}
+
+
+void ContextSwitcher::Run() {
+ while (keep_going_) {
+ OS::Sleep(sleep_ms_);
+ StackGuard::Preempt();
+ WaitForPreemption();
+ }
+}
+
+
+void ContextSwitcher::Stop() {
+ Locker::AssertIsLocked();
+ keep_going_ = false;
+ preemption_semaphore_->Signal();
+ Join();
+}
+
+
+void ContextSwitcher::WaitForPreemption() {
+ preemption_semaphore_->Wait();
+}
+
+
+void ContextSwitcher::PreemptionReceived() {
+ Locker::AssertIsLocked();
+ switcher->preemption_semaphore_->Signal();
+}
+
+
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8THREADS_H_
+#define V8_V8THREADS_H_
+
+namespace v8 { namespace internal {
+
+
+class ThreadState {
+ public:
+ // Iterate over in-use states.
+ static ThreadState* FirstInUse();
+ // Returns NULL after the last one.
+ ThreadState* Next();
+
+ enum List {FREE_LIST, IN_USE_LIST};
+
+ void LinkInto(List list);
+ void Unlink();
+
+ static ThreadState* GetFree();
+
+ // Get data area for archiving a thread.
+ char* data() { return data_; }
+ private:
+ ThreadState();
+
+ void AllocateSpace();
+
+ char* data_;
+ ThreadState* next_;
+ ThreadState* previous_;
+ // In the following two lists there is always at least one object on the list.
+ // The first object is a flying anchor that is only there to simplify linking
+ // and unlinking.
+ // Head of linked list of free states.
+ static ThreadState* free_anchor_;
+ // Head of linked list of states in use.
+ static ThreadState* in_use_anchor_;
+};
+
+
+class ThreadManager : public AllStatic {
+ public:
+ static void Lock();
+ static void Unlock();
+
+ static void ArchiveThread();
+ static bool RestoreThread();
+
+ static void Iterate(ObjectVisitor* v);
+ static void MarkCompactPrologue();
+ static void MarkCompactEpilogue();
+ static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
+ private:
+ static void EagerlyArchiveThread();
+
+ static Mutex* mutex_;
+ static ThreadHandle mutex_owner_;
+ static ThreadHandle lazily_archived_thread_;
+ static ThreadState* lazily_archived_thread_state_;
+};
+
+
+class ContextSwitcher: public Thread {
+ public:
+ void Run();
+ static void StartPreemption(int every_n_ms);
+ static void StopPreemption();
+ static void PreemptionReceived();
+ private:
+ explicit ContextSwitcher(int every_n_ms);
+ void WaitForPreemption();
+ void Stop();
+ Semaphore* preemption_semaphore_;
+ bool keep_going_;
+ int sleep_ms_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_V8THREADS_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+#include "variables.h"
+
+namespace v8 { namespace internal {
+
+// ----------------------------------------------------------------------------
+// Implementation UseCount.
+
+UseCount::UseCount()
+ : nreads_(0),
+ nwrites_(0) {
+}
+
+
+void UseCount::RecordRead(int weight) {
+ ASSERT(weight > 0);
+ nreads_ += weight;
+ // We must have a positive nreads_ here. Handle
+ // any kind of overflow by setting nreads_ to
+ // some large-ish value.
+ if (nreads_ <= 0) nreads_ = 1000000;
+ ASSERT(is_read() & is_used());
+}
+
+
+void UseCount::RecordWrite(int weight) {
+ ASSERT(weight > 0);
+ nwrites_ += weight;
+ // We must have a positive nwrites_ here. Handle
+ // any kind of overflow by setting nwrites_ to
+ // some large-ish value.
+ if (nwrites_ <= 0) nwrites_ = 1000000;
+ ASSERT(is_written() && is_used());
+}
+
+
+void UseCount::RecordAccess(int weight) {
+ RecordRead(weight);
+ RecordWrite(weight);
+}
+
+
+void UseCount::RecordUses(UseCount* uses) {
+ if (uses->nreads() > 0) RecordRead(uses->nreads());
+ if (uses->nwrites() > 0) RecordWrite(uses->nwrites());
+}
+
+
+#ifdef DEBUG
+void UseCount::Print() {
+ // PrintF("r = %d, w = %d", nreads_, nwrites_);
+ PrintF("%du = %dr + %dw", nuses(), nreads(), nwrites());
+}
+#endif
+
+
+// ----------------------------------------------------------------------------
+// Implementation Variable.
+
+
+const char* Variable::Mode2String(Mode mode) {
+ switch (mode) {
+ case VAR: return "VAR";
+ case CONST: return "CONST";
+ case DYNAMIC: return "DYNAMIC";
+ case INTERNAL: return "INTERNAL";
+ case TEMPORARY: return "TEMPORARY";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Property* Variable::AsProperty() {
+ return rewrite_ == NULL ? NULL : rewrite_->AsProperty();
+}
+
+
+Variable* Variable::AsVariable() {
+ return rewrite_ == NULL || rewrite_->AsSlot() != NULL ? this : NULL;
+}
+
+
+Slot* Variable::slot() const {
+ return rewrite_ != NULL ? rewrite_->AsSlot() : NULL;
+}
+
+
+Variable::Variable(Scope* scope,
+ Handle<String> name,
+ Mode mode,
+ bool is_valid_LHS,
+ bool is_this)
+ : scope_(scope),
+ name_(name),
+ mode_(mode),
+ is_valid_LHS_(is_valid_LHS),
+ is_this_(is_this),
+ is_accessed_from_inner_scope_(false),
+ rewrite_(NULL) {
+ // names must be canonicalized for fast equality checks
+ ASSERT(name->IsSymbol());
+}
+
+
+bool Variable::is_global() const {
+ // Temporaries are never global, they must always be allocated in the
+ // activation frame.
+ return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VARIABLES_H_
+#define V8_VARIABLES_H_
+
+#include "zone.h"
+
+namespace v8 { namespace internal {
+
+class UseCount BASE_EMBEDDED {
+ public:
+ UseCount();
+
+ // Inform the node of a "use". The weight can be used to indicate
+ // heavier use, for instance if the variable is accessed inside a loop.
+ void RecordRead(int weight);
+ void RecordWrite(int weight);
+ void RecordAccess(int weight); // records a read & write
+ void RecordUses(UseCount* uses);
+
+ int nreads() const { return nreads_; }
+ int nwrites() const { return nwrites_; }
+ int nuses() const { return nreads_ + nwrites_; }
+
+ bool is_read() const { return nreads() > 0; }
+ bool is_written() const { return nwrites() > 0; }
+ bool is_used() const { return nuses() > 0; }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ int nreads_;
+ int nwrites_;
+};
+
+
+// The AST refers to variables via VariableProxies - placeholders for the actual
+// variables. Variables themselves are never directly referred to from the AST,
+// they are maintained by scopes, and referred to from VariableProxies and Slots
+// after binding and variable allocation.
+
+class Variable: public ZoneObject {
+ public:
+ enum Mode {
+ // User declared variables:
+ VAR, // declared via 'var', and 'function' declarations
+ CONST, // declared via 'const' declarations
+
+ // Variables introduced by the compiler:
+ DYNAMIC, // always require dynamic lookup (we don't know the declaration)
+ INTERNAL, // like VAR, but not user-visible (may or may not be in a
+ // context)
+ TEMPORARY // temporary variables (not user-visible), never in a context
+ };
+
+ // Printing support
+ static const char* Mode2String(Mode mode);
+
+ // Type testing & conversion
+ Property* AsProperty();
+ Variable* AsVariable();
+ bool IsValidLeftHandSide() { return is_valid_LHS_; }
+
+ // The source code for an eval() call may refer to a variable that is
+ // in an outer scope about which we don't know anything (it may not
+ // be the global scope). scope() is NULL in that case. Currently the
+ // scope is only used to follow the context chain length.
+ Scope* scope() const { return scope_; }
+ // If this assertion fails it means that some code has tried to
+ // treat the special this variable as an ordinary variable with
+ // the name "this".
+ Handle<String> name() const { return name_; }
+ Mode mode() const { return mode_; }
+ bool is_accessed_from_inner_scope() const {
+ return is_accessed_from_inner_scope_;
+ }
+ UseCount* var_uses() { return &var_uses_; }
+ UseCount* obj_uses() { return &obj_uses_; }
+
+ bool IsVariable(Handle<String> n) {
+ return !is_this() && name().is_identical_to(n);
+ }
+
+ bool is_global() const;
+ bool is_this() const { return is_this_; }
+
+ Expression* rewrite() const { return rewrite_; }
+ Slot* slot() const;
+
+ private:
+ Variable(Scope* scope, Handle<String> name, Mode mode, bool is_valid_LHS,
+ bool is_this);
+
+ Scope* scope_;
+ Handle<String> name_;
+ Mode mode_;
+ bool is_valid_LHS_;
+ bool is_this_;
+
+ // Usage info.
+ bool is_accessed_from_inner_scope_; // set by variable resolver
+ UseCount var_uses_; // uses of the variable value
+ UseCount obj_uses_; // uses of the object the variable points to
+
+ // Code generation.
+ // rewrite_ is usually a Slot or a Property, but maybe any expression.
+ Expression* rewrite_;
+
+ friend class VariableProxy;
+ friend class Scope;
+ friend class LocalsMap;
+ friend class AstBuildingParser;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_VARIABLES_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ZONE_INL_H_
+#define V8_ZONE_INL_H_
+
+#include "zone.h"
+
+namespace v8 { namespace internal {
+
+
+inline void* Zone::New(int size) {
+ ASSERT(AssertNoZoneAllocation::allow_allocation());
+ // Round up the requested size to fit the alignment.
+ size = RoundUp(size, kAlignment);
+
+ // Check if the requested size is available without expanding.
+ Address result = position_;
+ if ((position_ += size) > limit_) result = NewExpand(size);
+
+ // Check that the result has the proper alignment and return it.
+ ASSERT(IsAddressAligned(result, kAlignment, 0));
+ return reinterpret_cast<void*>(result);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ZONE_INL_H_
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "zone-inl.h"
+
+namespace v8 { namespace internal {
+
+
+Address Zone::position_ = 0;
+Address Zone::limit_ = 0;
+
+bool AssertNoZoneAllocation::allow_allocation_ = true;
+
+
+// Segments represent chunks of memory: They have starting address
+// (encoded in the this pointer) and a size in bytes. Segments are
+// chained together forming a LIFO structure with the newest segment
+// available as Segment::head(). Segments are allocated using malloc()
+// and de-allocated using free().
+
+class Segment {
+ public:
+ Segment* next() const { return next_; }
+ void clear_next() { next_ = NULL; }
+
+ int size() const { return size_; }
+ int capacity() const { return size_ - sizeof(Segment); }
+
+ Address start() const { return address(sizeof(Segment)); }
+ Address end() const { return address(size_); }
+
+ static Segment* head() { return head_; }
+ static void set_head(Segment* head) { head_ = head; }
+
+ // Creates a new segment, sets it size, and pushes it to the front
+ // of the segment chain. Returns the new segment.
+ static Segment* New(int size) {
+ Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+ if (result != NULL) {
+ result->next_ = head_;
+ result->size_ = size;
+ head_ = result;
+ }
+ return result;
+ }
+
+ // Deletes the given segment. Does not touch the segment chain.
+ static void Delete(Segment* segment) {
+ Malloced::Delete(segment);
+ }
+
+ private:
+ // Computes the address of the nth byte in this segment.
+ Address address(int n) const {
+ return Address(this) + n;
+ }
+
+ static Segment* head_;
+ Segment* next_;
+ int size_;
+};
+
+
+Segment* Segment::head_ = NULL;
+
+
+void Zone::DeleteAll() {
+#ifdef DEBUG
+ // Constant byte value used for zapping dead memory in debug mode.
+ static const unsigned char kZapDeadByte = 0xcd;
+#endif
+
+ // Find a segment with a suitable size to keep around.
+ Segment* keep = Segment::head();
+ while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
+ keep = keep->next();
+ }
+
+ // Traverse the chained list of segments, zapping (in debug mode)
+ // and freeing every segment except the one we wish to keep.
+ Segment* current = Segment::head();
+ while (current != NULL) {
+ Segment* next = current->next();
+ if (current == keep) {
+ // Unlink the segment we wish to keep from the list.
+ current->clear_next();
+ } else {
+#ifdef DEBUG
+ // Zap the entire current segment (including the header).
+ memset(current, kZapDeadByte, current->size());
+#endif
+ Segment::Delete(current);
+ }
+ current = next;
+ }
+
+ // If we have found a segment we want to keep, we must recompute the
+ // variables 'position' and 'limit' to prepare for future allocate
+ // attempts. Otherwise, we must clear the position and limit to
+ // force a new segment to be allocated on demand.
+ if (keep != NULL) {
+ Address start = keep->start();
+ position_ = RoundUp(start, kAlignment);
+ limit_ = keep->end();
+#ifdef DEBUG
+ // Zap the contents of the kept segment (but not the header).
+ memset(start, kZapDeadByte, keep->capacity());
+#endif
+ } else {
+ position_ = limit_ = 0;
+ }
+
+ // Update the head segment to be the kept segment (if any).
+ Segment::set_head(keep);
+}
+
+
+Address Zone::NewExpand(int size) {
+ // Make sure the requested size is already properly aligned and that
+ // there isn't enough room in the Zone to satisfy the request.
+ ASSERT(size == RoundDown(size, kAlignment));
+ ASSERT(position_ + size > limit_);
+
+ // Compute the new segment size. We use a 'high water mark'
+ // strategy, where we increase the segment size every time we expand
+ // except that we employ a maximum segment size when we delete. This
+ // is to avoid excessive malloc() and free() overhead.
+ Segment* head = Segment::head();
+ int old_size = (head == NULL) ? 0 : head->size();
+ int new_size = sizeof(Segment) + kAlignment + size + (old_size << 1);
+ if (new_size < kMinimumSegmentSize) new_size = kMinimumSegmentSize;
+ Segment* segment = Segment::New(new_size);
+ if (segment == NULL) V8::FatalProcessOutOfMemory("Zone");
+
+ // Recompute 'top' and 'limit' based on the new segment.
+ Address result = RoundUp(segment->start(), kAlignment);
+ position_ = result + size;
+ limit_ = segment->end();
+ ASSERT(position_ <= limit_);
+ return result;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ZONE_H_
+#define V8_ZONE_H_
+
+namespace v8 { namespace internal {
+
+
+// The Zone supports very fast allocation of small chunks of
+// memory. The chunks cannot be deallocated individually, but instead
+// the Zone supports deallocating all chunks in one fast
+// operation. The Zone is used to hold temporary data structures like
+// the abstract syntax tree, which is deallocated after compilation.
+
+// Note: There is no need to initialize the Zone; the first time an
+// allocation is attempted, a segment of memory will be requested
+// through a call to malloc().
+
+// Note: The implementation is inherently not thread safe. Do not use
+// from multi-threaded code.
+
+class Zone {
+ public:
+ // Allocate 'size' bytes of memory in the Zone; expands the Zone by
+ // allocating new segments of memory on demand using malloc().
+ static inline void* New(int size);
+
+ // Delete all objects and free all memory allocated in the Zone.
+ static void DeleteAll();
+
+ private:
+ // All pointers returned from New() have this alignment.
+ static const int kAlignment = kPointerSize;
+
+ // Never allocate segments smaller than this size in bytes.
+ static const int kMinimumSegmentSize = 8 * KB;
+
+ // Never keep segments larger than this size in bytes around.
+ static const int kMaximumKeptSegmentSize = 64 * KB;
+
+
+ // The Zone is intentionally a singleton; you should not try to
+ // allocate instances of the class.
+ Zone() { UNREACHABLE(); }
+
+
+ // Expand the Zone to hold at least 'size' more bytes and allocate
+ // the bytes. Returns the address of the newly allocated chunk of
+ // memory in the Zone. Should only be called if there isn't enough
+ // room in the Zone already.
+ static Address NewExpand(int size);
+
+
+ // The free region in the current (front) segment is represented as
+ // the half-open interval [position, limit). The 'position' variable
+ // is guaranteed to be aligned as dictated by kAlignment.
+ static Address position_;
+ static Address limit_;
+};
+
+
+// ZoneObject is an abstraction that helps define classes of objects
+// allocated in the Zone. Use it as a base class; see ast.h.
+class ZoneObject {
+ public:
+ // Allocate a new ZoneObject of 'size' bytes in the Zone.
+ void* operator new(size_t size) { return Zone::New(size); }
+
+ // Ideally, the delete operator should be private instead of
+ // public, but unfortuately the compiler sometimes synthesizes
+ // (unused) destructors for classes derived from ZoneObject, which
+ // require the operator to be visible. MSVC requires the delete
+ // operator to be public.
+
+ // ZoneObjects should never be deleted individually; use
+ // Zone::DeleteAll() to delete all zone objects in one go.
+ void operator delete(void*, size_t) { UNREACHABLE(); }
+};
+
+
+class AssertNoZoneAllocation {
+ public:
+ AssertNoZoneAllocation() : prev_(allow_allocation_) {
+ allow_allocation_ = false;
+ }
+ ~AssertNoZoneAllocation() { allow_allocation_ = prev_; }
+ static bool allow_allocation() { return allow_allocation_; }
+ private:
+ bool prev_;
+ static bool allow_allocation_;
+};
+
+
+// The ZoneListAllocationPolicy is used to specialize the GenericList
+// implementation to allocate ZoneLists and their elements in the
+// Zone.
+class ZoneListAllocationPolicy {
+ public:
+ // Allocate 'size' bytes of memory in the zone.
+ static void* New(int size) { return Zone::New(size); }
+
+ // De-allocation attempts are silently ignored.
+ static void Delete(void* p) { }
+};
+
+
+// ZoneLists are growable lists with constant-time access to the
+// elements. The list itself and all its elements are allocated in the
+// Zone. ZoneLists cannot be deleted individually; you can delete all
+// objects in the Zone by calling Zone::DeleteAll().
+template<typename T>
+class ZoneList: public List<T, ZoneListAllocationPolicy> {
+ public:
+ // Construct a new ZoneList with the given capacity; the length is
+ // always zero. The capacity must be non-negative.
+ explicit ZoneList(int capacity)
+ : List<T, ZoneListAllocationPolicy>(capacity) { }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ZONE_H_
--- /dev/null
+# Copyright 2006-2008 Google Inc. All Rights Reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for converting JavaScript source code into C-style
+# char arrays. It is used for embedded JavaScript code in the V8
+# library.
+
+import os, re, sys, string
+
+
+def ToCArray(lines):
+ result = []
+ for chr in lines:
+ value = ord(chr)
+ assert value < 128
+ result.append(str(value))
+ result.append("0")
+ return ", ".join(result)
+
+
+def CompressScript(lines):
+ # Remove stuff from the source that we don't want to appear when
+ # people print the source code using Function.prototype.toString().
+ # Note that we could easily compress the scripts mode but don't
+ # since we want it to remain readable.
+ lines = re.sub('//.*\n', '\n', lines) # end-of-line comments
+ lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
+ return lines
+
+
+def ReadFile(filename):
+ file = open(filename, "rt")
+ try:
+ lines = file.read()
+ finally:
+ file.close()
+ return lines
+
+
+def ReadLines(filename):
+ result = []
+ for line in open(filename, "rt"):
+ if '#' in line:
+ line = line[:line.index('#')]
+ line = line.strip()
+ if len(line) > 0:
+ result.append(line)
+ return result
+
+
+def LoadConfigFrom(name):
+ import ConfigParser
+ config = ConfigParser.ConfigParser()
+ config.read(name)
+ return config
+
+
+def ParseValue(string):
+ string = string.strip()
+ if string.startswith('[') and string.endswith(']'):
+ return string.lstrip('[').rstrip(']').split()
+ else:
+ return string
+
+
+def MakeVersion(source, target):
+ TEMPLATE = """
+ #include "v8.h"
+
+ void v8::V8::GetVersion(v8::VersionInfo *info) {
+ info->major = %(major)s;
+ info->minor = %(minor)s;
+ info->build_major = %(build_major)s;
+ info->build_minor = %(build_minor)s;
+ info->revision = %(revision)s;
+ }
+"""
+ PATTERN = re.compile('\$[a-zA-Z]+:\s*([0-9]+)\s*\$')
+ def VersionToInt(str):
+ match = PATTERN.match(str)
+ if match: return match.group(1)
+ else: return str
+ config = LoadConfigFrom(source)
+ map = { }
+ for key, value in config.items('VERSION'):
+ map[key] = VersionToInt(value)
+ output = TEMPLATE % map
+ file = open(target, "w")
+ file.write(output)
+ file.close()
+
+
+def ExpandConstants(lines, constants):
+ for key, value in constants.items():
+ lines = lines.replace(key, str(value))
+ return lines
+
+def ExpandMacros(lines, macros):
+ for name, macro in macros.items():
+ start = lines.find(name, 0)
+ while start != -1:
+ # Scan over the arguments
+ assert lines[start + len(name)] == '('
+ height = 1
+ end = start + len(name) + 1
+ last_match = end
+ arg_index = 0
+ mapping = { }
+ def add_arg(str):
+ # Remember to expand recursively in the arguments
+ replacement = ExpandMacros(str.strip(), macros)
+ mapping[macro.args[arg_index]] = replacement
+ while end < len(lines) and height > 0:
+ # We don't count commas at higher nesting levels.
+ if lines[end] == ',' and height == 1:
+ add_arg(lines[last_match:end])
+ last_match = end + 1
+ elif lines[end] in ['(', '{', '[']:
+ height = height + 1
+ elif lines[end] in [')', '}', ']']:
+ height = height - 1
+ end = end + 1
+ # Remember to add the last match.
+ add_arg(lines[last_match:end-1])
+ result = macro.expand(mapping)
+ # Replace the occurrence of the macro with the expansion
+ lines = lines[:start] + result + lines[end:]
+ start = lines.find(name, end)
+ return lines
+
+class TextMacro:
+ def __init__(self, args, body):
+ self.args = args
+ self.body = body
+ def expand(self, mapping):
+ result = self.body
+ for key, value in mapping.items():
+ result = result.replace(key, value)
+ return result
+
+class PythonMacro:
+ def __init__(self, args, fun):
+ self.args = args
+ self.fun = fun
+ def expand(self, mapping):
+ args = []
+ for arg in self.args:
+ args.append(mapping[arg])
+ return str(self.fun(*args))
+
+CONST_PATTERN = re.compile('^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
+MACRO_PATTERN = re.compile('^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
+PYTHON_MACRO_PATTERN = re.compile('^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
+
+def ReadMacros(lines):
+ constants = { }
+ macros = { }
+ for line in lines:
+ hash = line.find('#')
+ if hash != -1: line = line[:hash]
+ line = line.strip()
+ if len(line) is 0: continue
+ const_match = CONST_PATTERN.match(line)
+ if const_match:
+ name = const_match.group(1)
+ value = const_match.group(2).strip()
+ constants[name] = value
+ else:
+ macro_match = MACRO_PATTERN.match(line)
+ if macro_match:
+ name = macro_match.group(1)
+ args = map(string.strip, macro_match.group(2).split(','))
+ body = macro_match.group(3).strip()
+ macros[name] = TextMacro(args, body)
+ else:
+ python_match = PYTHON_MACRO_PATTERN.match(line)
+ if python_match:
+ name = python_match.group(1)
+ args = map(string.strip, python_match.group(2).split(','))
+ body = python_match.group(3).strip()
+ fun = eval("lambda " + ",".join(args) + ': ' + body)
+ macros[name] = PythonMacro(args, fun)
+ else:
+ raise ("Illegal line: " + line)
+ return (constants, macros)
+
+
+HEADER_TEMPLATE = """\
+// Copyright 2008 Google Inc. All Rights Reserved.
+
+// This file was generated from .js source files by SCons. If you
+// want to make changes to this file you should either change the
+// javascript source files or the SConstruct script.
+
+#include "v8.h"
+#include "natives.h"
+
+namespace v8 {
+namespace internal {
+
+%(source_lines)s\
+
+ int Natives::GetBuiltinsCount() {
+ return %(builtin_count)i;
+ }
+
+ int Natives::GetDelayCount() {
+ return %(delay_count)i;
+ }
+
+ int Natives::GetIndex(const char* name) {
+%(get_index_cases)s\
+ return -1;
+ }
+
+ Vector<const char> Natives::GetScriptSource(int index) {
+%(get_script_source_cases)s\
+ return Vector<const char>("", 0);
+ }
+
+ Vector<const char> Natives::GetScriptName(int index) {
+%(get_script_name_cases)s\
+ return Vector<const char>("", 0);
+ }
+
+} // internal
+} // v8
+"""
+
+
+SOURCE_DECLARATION = """\
+ static const char %(id)s[] = { %(data)s };
+"""
+
+
+GET_DELAY_INDEX_CASE = """\
+ if (strcmp(name, "%(id)s") == 0) return %(i)i;
+"""
+
+
+GET_DELAY_SCRIPT_SOURCE_CASE = """\
+ if (index == %(i)i) return Vector<const char>(%(id)s, %(length)i);
+"""
+
+
+GET_DELAY_SCRIPT_NAME_CASE = """\
+ if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
+"""
+
+def JS2C(source, target, env):
+ ids = []
+ delay_ids = []
+ modules = []
+ # Locate the macros file name.
+ consts = {}
+ macros = {}
+ for s in source:
+ if 'macros.py' == (os.path.split(str(s))[1]):
+ (consts, macros) = ReadMacros(ReadLines(str(s)))
+ else:
+ modules.append(s)
+
+ # Build source code lines
+ source_lines = [ ]
+ source_lines_empty = []
+ for s in modules:
+ delay = str(s).endswith('-delay.js')
+ lines = ReadFile(str(s))
+ lines = ExpandConstants(lines, consts)
+ lines = ExpandMacros(lines, macros)
+ lines = CompressScript(lines)
+ data = ToCArray(lines)
+ id = (os.path.split(str(s))[1])[:-3]
+ if delay: id = id[:-6]
+ if delay:
+ delay_ids.append((id, len(lines)))
+ source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
+ else:
+ ids.append((id, len(lines)))
+ source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
+ source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
+
+ # Build delay support functions
+ get_index_cases = [ ]
+ get_script_source_cases = [ ]
+ get_script_name_cases = [ ]
+
+ i = 0
+ for (id, length) in delay_ids:
+ native_name = "native %s.js" % id
+ get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i })
+ get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % {
+ 'id': id,
+ 'length': length,
+ 'i': i
+ })
+ get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % {
+ 'name': native_name,
+ 'length': len(native_name),
+ 'i': i
+ });
+ i = i + 1
+
+ for (id, length) in ids:
+ native_name = "native %s.js" % id
+ get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i })
+ get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % {
+ 'id': id,
+ 'length': length,
+ 'i': i
+ })
+ get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % {
+ 'name': native_name,
+ 'length': len(native_name),
+ 'i': i
+ });
+ i = i + 1
+
+ # Emit result
+ output = open(str(target[0]), "w")
+ output.write(HEADER_TEMPLATE % {
+ 'builtin_count': len(ids) + len(delay_ids),
+ 'delay_count': len(delay_ids),
+ 'source_lines': "\n".join(source_lines),
+ 'get_index_cases': "".join(get_index_cases),
+ 'get_script_source_cases': "".join(get_script_source_cases),
+ 'get_script_name_cases': "".join(get_script_name_cases)
+ })
+ output.close()
+ output = open(str(target[1]), "w")
+ output.write(HEADER_TEMPLATE % {
+ 'builtin_count': len(ids) + len(delay_ids),
+ 'delay_count': len(delay_ids),
+ 'source_lines': "\n".join(source_lines_empty),
+ 'get_index_cases': "".join(get_index_cases),
+ 'get_script_source_cases': "".join(get_script_source_cases),
+ 'get_script_name_cases': "".join(get_script_name_cases)
+ })
+ output.close()
+
+def main():
+ output = sys.argv[1]
+ input = sys.argv[2:]
+ JS2C(input, [output], None)
+
+if __name__ == "__main__":
+ main()