From: kasperl@chromium.org Date: Tue, 7 Dec 2010 11:01:02 +0000 (+0000) Subject: Revert r5920. Will re-land shortly. X-Git-Tag: upstream/4.7.83~20879 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=51b494d09693941767d03487d2e4c89324fce13e;p=platform%2Fupstream%2Fv8.git Revert r5920. Will re-land shortly. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5921 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/SConstruct b/SConstruct index 63fc33c68..ca63c299c 100644 --- a/SConstruct +++ b/SConstruct @@ -523,8 +523,7 @@ SAMPLE_FLAGS = { 'CCFLAGS': ['-O2'] }, 'mode:debug': { - 'CCFLAGS': ['-g', '-O0'], - 'CPPDEFINES': ['DEBUG'] + 'CCFLAGS': ['-g', '-O0'] }, 'prof:oprofile': { 'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'], @@ -579,14 +578,13 @@ SAMPLE_FLAGS = { 'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752'] }, 'mode:debug': { - 'CCFLAGS': ['/Od'], - 'LINKFLAGS': ['/DEBUG'], - 'CPPDEFINES': ['DEBUG'], + 'CCFLAGS': ['/Od'], + 'LINKFLAGS': ['/DEBUG'], 'msvcrt:static': { - 'CCFLAGS': ['/MTd'] + 'CCFLAGS': ['/MTd'] }, 'msvcrt:shared': { - 'CCFLAGS': ['/MDd'] + 'CCFLAGS': ['/MDd'] } } } diff --git a/include/v8-debug.h b/include/v8-debug.h old mode 100644 new mode 100755 diff --git a/include/v8-testing.h b/include/v8-testing.h deleted file mode 100644 index 4db30a440..000000000 --- a/include/v8-testing.h +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_V8_TEST_H_ -#define V8_V8_TEST_H_ - -#include "v8.h" - -#ifdef _WIN32 -// Setup for Windows DLL export/import. See v8.h in this directory for -// information on how to build/use V8 as a DLL. -#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) -#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\ - build configuration to ensure that at most one of these is set -#endif - -#ifdef BUILDING_V8_SHARED -#define V8EXPORT __declspec(dllexport) -#elif USING_V8_SHARED -#define V8EXPORT __declspec(dllimport) -#else -#define V8EXPORT -#endif - -#else // _WIN32 - -// Setup for Linux shared library export. See v8.h in this directory for -// information on how to build/use V8 as shared library. -#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) -#define V8EXPORT __attribute__ ((visibility("default"))) -#else // defined(__GNUC__) && (__GNUC__ >= 4) -#define V8EXPORT -#endif // defined(__GNUC__) && (__GNUC__ >= 4) - -#endif // _WIN32 - - -/** - * Testing support for the V8 JavaScript engine. - */ -namespace v8 { - -class V8EXPORT Testing { - public: - enum StressType { - kStressTypeOpt, - kStressTypeDeopt - }; - - /** - * Set the type of stressing to do. The default if not set is kStressTypeOpt. - */ - static void SetStressRunType(StressType type); - - /** - * Get the number of runs of a given test that is required to get the full - * stress coverage. - */ - static int GetStressRuns(); - - /** - * Indicate the number of the run which is about to start. The value of run - * should be between 0 and one less than the result from GetStressRuns() - */ - static void PrepareStressRun(int run); -}; - - -} // namespace v8 - - -#undef V8EXPORT - - -#endif // V8_V8_TEST_H_ diff --git a/samples/shell.cc b/samples/shell.cc index 460457552..1a13f5f80 100644 --- a/samples/shell.cc +++ b/samples/shell.cc @@ -26,7 +26,6 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -#include #include #include #include @@ -45,10 +44,10 @@ v8::Handle Quit(const v8::Arguments& args); v8::Handle Version(const v8::Arguments& args); v8::Handle ReadFile(const char* name); void ReportException(v8::TryCatch* handler); -void SetFlagsFromString(const char* flags); int RunMain(int argc, char* argv[]) { + v8::V8::SetFlagsFromCommandLine(&argc, argv, true); v8::HandleScope handle_scope; // Create a template for the global object. v8::Handle global = v8::ObjectTemplate::New(); @@ -64,11 +63,11 @@ int RunMain(int argc, char* argv[]) { global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version)); // Create a new execution environment containing the built-in // functions - v8::Persistent context = v8::Context::New(NULL, global); + v8::Handle context = v8::Context::New(NULL, global); + // Enter the newly created execution environment. + v8::Context::Scope context_scope(context); bool run_shell = (argc == 1); for (int i = 1; i < argc; i++) { - // Enter the execution environment before evaluating any code. - v8::Context::Scope context_scope(context); const char* str = argv[i]; if (strcmp(str, "--shell") == 0) { run_shell = true; @@ -100,48 +99,12 @@ int RunMain(int argc, char* argv[]) { } } if (run_shell) RunShell(context); - context.Dispose(); return 0; } int main(int argc, char* argv[]) { - // Figure out if we're requested to stress the optimization - // infrastructure by running tests multiple times and forcing - // optimization in the last run. - bool FLAG_stress_opt = false; - bool FLAG_stress_deopt = false; - for (int i = 0; i < argc; i++) { - if (strcmp(argv[i], "--stress-opt") == 0) { - FLAG_stress_opt = true; - argv[i] = NULL; - } else if (strcmp(argv[i], "--stress-deopt") == 0) { - FLAG_stress_deopt = true; - argv[i] = NULL; - } else if (strcmp(argv[i], "--noalways-opt") == 0) { - // No support for stressing if we can't use --always-opt. - FLAG_stress_opt = false; - FLAG_stress_deopt = false; - break; - } - } - - v8::V8::SetFlagsFromCommandLine(&argc, argv, true); - int result = 0; - if (FLAG_stress_opt || FLAG_stress_deopt) { - v8::Testing::SetStressRunType(FLAG_stress_opt - ? v8::Testing::kStressTypeOpt - : v8::Testing::kStressTypeDeopt); - int stress_runs = v8::Testing::GetStressRuns(); - for (int i = 0; i < stress_runs && result == 0; i++) { - printf("============ Stress %d/%d ============\n", - i + 1, stress_runs); - v8::Testing::PrepareStressRun(i); - result = RunMain(argc, argv); - } - } else { - result = RunMain(argc, argv); - } + int result = RunMain(argc, argv); v8::V8::Dispose(); return result; } @@ -258,8 +221,6 @@ v8::Handle ReadFile(const char* name) { void RunShell(v8::Handle context) { printf("V8 version %s\n", v8::V8::GetVersion()); static const int kBufferSize = 256; - // Enter the execution environment before evaluating any code. - v8::Context::Scope context_scope(context); while (true) { char buffer[kBufferSize]; printf("> "); @@ -345,8 +306,3 @@ void ReportException(v8::TryCatch* try_catch) { } } } - - -void SetFlagsFromString(const char* flags) { - v8::V8::SetFlagsFromString(flags, strlen(flags)); -} diff --git a/src/SConscript b/src/SConscript index 8ab781e80..895369879 100755 --- a/src/SConscript +++ b/src/SConscript @@ -40,7 +40,6 @@ SOURCES = { api.cc assembler.cc ast.cc - atomicops_internals_x86_gcc.cc bignum.cc bignum-dtoa.cc bootstrapper.cc @@ -60,7 +59,6 @@ SOURCES = { dateparser.cc debug-agent.cc debug.cc - deoptimizer.cc disassembler.cc diy-fp.cc dtoa.cc @@ -78,13 +76,10 @@ SOURCES = { hashmap.cc heap-profiler.cc heap.cc - hydrogen.cc - hydrogen-instructions.cc ic.cc interpreter-irregexp.cc jsregexp.cc jump-target.cc - lithium-allocator.cc liveedit.cc log-utils.cc log.cc @@ -104,8 +99,6 @@ SOURCES = { register-allocator.cc rewriter.cc runtime.cc - runtime-profiler.cc - safepoint-table.cc scanner-base.cc scanner.cc scopeinfo.cc @@ -141,14 +134,11 @@ SOURCES = { arm/constants-arm.cc arm/cpu-arm.cc arm/debug-arm.cc - arm/deoptimizer-arm.cc arm/disasm-arm.cc arm/frames-arm.cc arm/full-codegen-arm.cc arm/ic-arm.cc arm/jump-target-arm.cc - arm/lithium-arm.cc - arm/lithium-codegen-arm.cc arm/macro-assembler-arm.cc arm/regexp-macro-assembler-arm.cc arm/register-allocator-arm.cc @@ -182,14 +172,11 @@ SOURCES = { ia32/codegen-ia32.cc ia32/cpu-ia32.cc ia32/debug-ia32.cc - ia32/deoptimizer-ia32.cc ia32/disasm-ia32.cc ia32/frames-ia32.cc ia32/full-codegen-ia32.cc ia32/ic-ia32.cc ia32/jump-target-ia32.cc - ia32/lithium-codegen-ia32.cc - ia32/lithium-ia32.cc ia32/macro-assembler-ia32.cc ia32/regexp-macro-assembler-ia32.cc ia32/register-allocator-ia32.cc @@ -205,7 +192,6 @@ SOURCES = { x64/codegen-x64.cc x64/cpu-x64.cc x64/debug-x64.cc - x64/deoptimizer-x64.cc x64/disasm-x64.cc x64/frames-x64.cc x64/full-codegen-x64.cc diff --git a/src/accessors.cc b/src/accessors.cc index 43d54fe47..08ef41b9f 100644 --- a/src/accessors.cc +++ b/src/accessors.cc @@ -28,11 +28,8 @@ #include "v8.h" #include "accessors.h" -#include "ast.h" -#include "deoptimizer.h" #include "execution.h" #include "factory.h" -#include "safepoint-table.h" #include "scopeinfo.h" #include "top.h" @@ -506,9 +503,11 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) { // If the function isn't compiled yet, the length is not computed // correctly yet. Compile it now and return the right length. HandleScope scope; - Handle handle(function); - if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception(); - return Smi::FromInt(handle->shared()->length()); + Handle shared(function->shared()); + if (!CompileLazyShared(shared, KEEP_EXCEPTION)) { + return Failure::Exception(); + } + return Smi::FromInt(shared->length()); } else { return Smi::FromInt(function->shared()->length()); } @@ -546,208 +545,6 @@ const AccessorDescriptor Accessors::FunctionName = { // Accessors::FunctionArguments // -static Address SlotAddress(JavaScriptFrame* frame, int slot_index) { - if (slot_index >= 0) { - const int offset = JavaScriptFrameConstants::kLocal0Offset; - return frame->fp() + offset - (slot_index * kPointerSize); - } else { - const int offset = JavaScriptFrameConstants::kReceiverOffset; - return frame->caller_sp() + offset + (slot_index * kPointerSize); - } -} - - -// We can't intermix stack decoding and allocations because -// deoptimization infrastracture is not GC safe. -// Thus we build a temporary structure in malloced space. -class SlotRef BASE_EMBEDDED { - public: - enum SlotRepresentation { - UNKNOWN, - TAGGED, - INT32, - DOUBLE, - LITERAL - }; - - SlotRef() - : addr_(NULL), representation_(UNKNOWN) { } - - SlotRef(Address addr, SlotRepresentation representation) - : addr_(addr), representation_(representation) { } - - explicit SlotRef(Object* literal) - : literal_(literal), representation_(LITERAL) { } - - Handle GetValue() { - switch (representation_) { - case TAGGED: - return Handle(Memory::Object_at(addr_)); - - case INT32: { - int value = Memory::int32_at(addr_); - if (Smi::IsValid(value)) { - return Handle(Smi::FromInt(value)); - } else { - return Factory::NewNumberFromInt(value); - } - } - - case DOUBLE: { - double value = Memory::double_at(addr_); - return Factory::NewNumber(value); - } - - case LITERAL: - return literal_; - - default: - UNREACHABLE(); - return Handle::null(); - } - } - - private: - Address addr_; - Handle literal_; - SlotRepresentation representation_; -}; - - -static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator, - DeoptimizationInputData* data, - JavaScriptFrame* frame) { - Translation::Opcode opcode = - static_cast(iterator->Next()); - - switch (opcode) { - case Translation::BEGIN: - case Translation::FRAME: - // Peeled off before getting here. - break; - - case Translation::ARGUMENTS_OBJECT: - // This can be only emitted for local slots not for argument slots. - break; - - case Translation::REGISTER: - case Translation::INT32_REGISTER: - case Translation::DOUBLE_REGISTER: - case Translation::DUPLICATE: - // We are at safepoint which corresponds to call. All registers are - // saved by caller so there would be no live registers at this - // point. Thus these translation commands should not be used. - break; - - case Translation::STACK_SLOT: { - int slot_index = iterator->Next(); - Address slot_addr = SlotAddress(frame, slot_index); - return SlotRef(slot_addr, SlotRef::TAGGED); - } - - case Translation::INT32_STACK_SLOT: { - int slot_index = iterator->Next(); - Address slot_addr = SlotAddress(frame, slot_index); - return SlotRef(slot_addr, SlotRef::INT32); - } - - case Translation::DOUBLE_STACK_SLOT: { - int slot_index = iterator->Next(); - Address slot_addr = SlotAddress(frame, slot_index); - return SlotRef(slot_addr, SlotRef::DOUBLE); - } - - case Translation::LITERAL: { - int literal_index = iterator->Next(); - return SlotRef(data->LiteralArray()->get(literal_index)); - } - } - - UNREACHABLE(); - return SlotRef(); -} - - - - - -static void ComputeSlotMappingForArguments(JavaScriptFrame* frame, - int inlined_frame_index, - Vector* args_slots) { - AssertNoAllocation no_gc; - - int deopt_index = AstNode::kNoNumber; - - DeoptimizationInputData* data = - static_cast(frame)->GetDeoptimizationData(&deopt_index); - - TranslationIterator it(data->TranslationByteArray(), - data->TranslationIndex(deopt_index)->value()); - - Translation::Opcode opcode = static_cast(it.Next()); - ASSERT(opcode == Translation::BEGIN); - int frame_count = it.Next(); - - USE(frame_count); - ASSERT(frame_count > inlined_frame_index); - - int frames_to_skip = inlined_frame_index; - while (true) { - opcode = static_cast(it.Next()); - - // Skip over operands to advance to the next opcode. - it.Skip(Translation::NumberOfOperandsFor(opcode)); - - if (opcode == Translation::FRAME) { - if (frames_to_skip == 0) { - // We reached frame corresponding to inlined function in question. - // Process translation commands for arguments. - - // Skip translation command for receiver. - it.Skip(Translation::NumberOfOperandsFor( - static_cast(it.Next()))); - - // Compute slots for arguments. - for (int i = 0; i < args_slots->length(); ++i) { - (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame); - } - - return; - } - - frames_to_skip--; - } - } - - UNREACHABLE(); -} - - -static MaybeObject* ConstructArgumentsObjectForInlinedFunction( - JavaScriptFrame* frame, - Handle inlined_function, - int inlined_frame_index) { - - int args_count = inlined_function->shared()->formal_parameter_count(); - - ScopedVector args_slots(args_count); - - ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots); - - Handle arguments = - Factory::NewArgumentsObject(inlined_function, args_count); - - Handle array = Factory::NewFixedArray(args_count); - for (int i = 0; i < args_count; ++i) { - Handle value = args_slots[i].GetValue(); - array->set(i, *value); - } - arguments->set_elements(*array); - - // Return the freshly allocated arguments object. - return *arguments; -} - MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { HandleScope scope; @@ -757,50 +554,38 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { Handle function(holder); // Find the top invocation of the function by traversing frames. - List functions(2); for (JavaScriptFrameIterator it; !it.done(); it.Advance()) { + // Skip all frames that aren't invocations of the given function. JavaScriptFrame* frame = it.frame(); - frame->GetFunctions(&functions); - for (int i = functions.length() - 1; i >= 0; i--) { - // Skip all frames that aren't invocations of the given function. - if (functions[i] != *function) continue; - - if (i > 0) { - // Function in question was inlined. - return ConstructArgumentsObjectForInlinedFunction(frame, function, i); - } else { - // If there is an arguments variable in the stack, we return that. - int index = function->shared()->scope_info()-> - StackSlotIndex(Heap::arguments_symbol()); - if (index >= 0) { - Handle arguments = - Handle(frame->GetExpression(index)); - if (!arguments->IsTheHole()) return *arguments; - } - - // If there isn't an arguments variable in the stack, we need to - // find the frame that holds the actual arguments passed to the - // function on the stack. - it.AdvanceToArgumentsFrame(); - frame = it.frame(); - - // Get the number of arguments and construct an arguments object - // mirror for the right frame. - const int length = frame->GetProvidedParametersCount(); - Handle arguments = Factory::NewArgumentsObject(function, - length); - Handle array = Factory::NewFixedArray(length); - - // Copy the parameters to the arguments object. - ASSERT(array->length() == length); - for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i)); - arguments->set_elements(*array); - - // Return the freshly allocated arguments object. - return *arguments; - } + if (frame->function() != *function) continue; + + // If there is an arguments variable in the stack, we return that. + int index = function->shared()->scope_info()-> + StackSlotIndex(Heap::arguments_symbol()); + if (index >= 0) { + Handle arguments = Handle(frame->GetExpression(index)); + if (!arguments->IsTheHole()) return *arguments; } - functions.Rewind(0); + + // If there isn't an arguments variable in the stack, we need to + // find the frame that holds the actual arguments passed to the + // function on the stack. + it.AdvanceToArgumentsFrame(); + frame = it.frame(); + + // Get the number of arguments and construct an arguments object + // mirror for the right frame. + const int length = frame->GetProvidedParametersCount(); + Handle arguments = Factory::NewArgumentsObject(function, length); + Handle array = Factory::NewFixedArray(length); + + // Copy the parameters to the arguments object. + ASSERT(array->length() == length); + for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i)); + arguments->set_elements(*array); + + // Return the freshly allocated arguments object. + return *arguments; } // No frame corresponding to the given function found. Return null. @@ -828,34 +613,19 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { if (!found_it) return Heap::undefined_value(); Handle function(holder); - List functions(2); + // Find the top invocation of the function by traversing frames. for (JavaScriptFrameIterator it; !it.done(); it.Advance()) { - JavaScriptFrame* frame = it.frame(); - frame->GetFunctions(&functions); - for (int i = functions.length() - 1; i >= 0; i--) { - if (functions[i] == *function) { - // Once we have found the frame, we need to go to the caller - // frame. This may require skipping through a number of top-level - // frames, e.g. frames for scripts not functions. - if (i > 0) { - ASSERT(!functions[i - 1]->shared()->is_toplevel()); - return functions[i - 1]; - } else { - for (it.Advance(); !it.done(); it.Advance()) { - frame = it.frame(); - functions.Rewind(0); - frame->GetFunctions(&functions); - if (!functions.last()->shared()->is_toplevel()) { - return functions.last(); - } - ASSERT(functions.length() == 1); - } - if (it.done()) return Heap::null_value(); - break; - } - } + // Skip all frames that aren't invocations of the given function. + if (it.frame()->function() != *function) continue; + // Once we have found the frame, we need to go to the caller + // frame. This may require skipping through a number of top-level + // frames, e.g. frames for scripts not functions. + while (true) { + it.Advance(); + if (it.done()) return Heap::null_value(); + JSFunction* caller = JSFunction::cast(it.frame()->function()); + if (!caller->shared()->is_toplevel()) return caller; } - functions.Rewind(0); } // No frame corresponding to the given function found. Return null. diff --git a/src/accessors.h b/src/accessors.h index 14ccc8fb8..96d742ef6 100644 --- a/src/accessors.h +++ b/src/accessors.h @@ -78,14 +78,13 @@ class Accessors : public AllStatic { MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object, void*); MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object, - Object* value, - void*); - static MaybeObject* FunctionGetArguments(Object* object, void*); - + Object* value, + void*); private: // Accessor functions only used through the descriptor. static MaybeObject* FunctionGetLength(Object* object, void*); static MaybeObject* FunctionGetName(Object* object, void*); + static MaybeObject* FunctionGetArguments(Object* object, void*); static MaybeObject* FunctionGetCaller(Object* object, void*); MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object, Object* value, void*); diff --git a/src/api.cc b/src/api.cc index 0ec8cf123..42c1db461 100644 --- a/src/api.cc +++ b/src/api.cc @@ -33,7 +33,6 @@ #include "bootstrapper.h" #include "compiler.h" #include "debug.h" -#include "deoptimizer.h" #include "execution.h" #include "global-handles.h" #include "heap-profiler.h" @@ -41,21 +40,18 @@ #include "parser.h" #include "platform.h" #include "profile-generator-inl.h" -#include "runtime-profiler.h" #include "serialize.h" #include "snapshot.h" #include "top.h" #include "v8threads.h" #include "version.h" -#include "vm-state-inl.h" #include "../include/v8-profiler.h" -#include "../include/v8-testing.h" #define LOG_API(expr) LOG(ApiEntryCall(expr)) #ifdef ENABLE_VMSTATE_TRACKING -#define ENTER_V8 ASSERT(i::V8::IsRunning()); i::VMState __state__(i::OTHER) +#define ENTER_V8 i::VMState __state__(i::OTHER) #define LEAVE_V8 i::VMState __state__(i::EXTERNAL) #else #define ENTER_V8 ((void) 0) @@ -101,7 +97,6 @@ namespace v8 { } \ } while (false) - // --- D a t a t h a t i s s p e c i f i c t o a t h r e a d --- @@ -2317,11 +2312,6 @@ bool v8::Object::ForceDelete(v8::Handle key) { HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); - - // When turning on access checks for a global object deoptimize all functions - // as optimized code does not always handle access checks. - i::Deoptimizer::DeoptimizeGlobalObject(*self); - EXCEPTION_PREAMBLE(); i::Handle obj = i::ForceDeleteProperty(self, key_obj); has_pending_exception = obj.is_null(); @@ -2608,10 +2598,6 @@ void v8::Object::TurnOnAccessCheck() { HandleScope scope; i::Handle obj = Utils::OpenHandle(this); - // When turning on access checks for a global object deoptimize all functions - // as optimized code does not always handle access checks. - i::Deoptimizer::DeoptimizeGlobalObject(*obj); - i::Handle new_map = i::Factory::CopyMapDropTransitions(i::Handle(obj->map())); new_map->set_is_access_check_needed(true); @@ -3276,6 +3262,7 @@ void v8::Object::SetPointerInInternalField(int index, void* value) { bool v8::V8::Initialize() { if (i::V8::IsRunning()) return true; + ENTER_V8; HandleScope scope; if (i::Snapshot::Initialize()) return true; return i::V8::Initialize(NULL); @@ -3399,7 +3386,6 @@ Persistent v8::Context::New( global_constructor->set_needs_access_check( proxy_constructor->needs_access_check()); } - i::RuntimeProfiler::Reset(); } // Leave V8. @@ -4959,66 +4945,6 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, #endif // ENABLE_LOGGING_AND_PROFILING -v8::Testing::StressType internal::Testing::stress_type_ = - v8::Testing::kStressTypeOpt; - - -void Testing::SetStressRunType(Testing::StressType type) { - internal::Testing::set_stress_type(type); -} - -int Testing::GetStressRuns() { -#ifdef DEBUG - // In debug mode the code runs much slower so stressing will only make two - // runs. - return 2; -#else - return 5; -#endif -} - - -static void SetFlagsFromString(const char* flags) { - V8::SetFlagsFromString(flags, i::StrLength(flags)); -} - - -void Testing::PrepareStressRun(int run) { - static const char* kLazyOptimizations = - "--prepare-always-opt --nolimit-inlining " - "--noalways-opt --noopt-eagerly"; - static const char* kEagerOptimizations = "--opt-eagerly"; - static const char* kForcedOptimizations = "--always-opt"; - - // If deoptimization stressed turn on frequent deoptimization. If no value - // is spefified through --deopt-every-n-times use a default default value. - static const char* kDeoptEvery13Times = "--deopt-every-n-times=13"; - if (internal::Testing::stress_type() == Testing::kStressTypeDeopt && - internal::FLAG_deopt_every_n_times == 0) { - SetFlagsFromString(kDeoptEvery13Times); - } - -#ifdef DEBUG - // As stressing in debug mode only make two runs skip the deopt stressing - // here. - if (run == GetStressRuns() - 1) { - SetFlagsFromString(kForcedOptimizations); - } else { - SetFlagsFromString(kEagerOptimizations); - SetFlagsFromString(kLazyOptimizations); - } -#else - if (run == GetStressRuns() - 1) { - SetFlagsFromString(kForcedOptimizations); - } else if (run == GetStressRuns() - 2) { - SetFlagsFromString(kEagerOptimizations); - } else { - SetFlagsFromString(kLazyOptimizations); - } -#endif -} - - namespace internal { diff --git a/src/api.h b/src/api.h index d07d75b91..e36160cf4 100644 --- a/src/api.h +++ b/src/api.h @@ -31,8 +31,6 @@ #include "apiutils.h" #include "factory.h" -#include "../include/v8-testing.h" - namespace v8 { // Constants used in the implementation of the API. The most natural thing @@ -491,18 +489,6 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) { (!blocks_.is_empty() && prev_limit != NULL)); } - -class Testing { - public: - static v8::Testing::StressType stress_type() { return stress_type_; } - static void set_stress_type(v8::Testing::StressType stress_type) { - stress_type_ = stress_type; - } - - private: - static v8::Testing::StressType stress_type_; -}; - } } // namespace v8::internal #endif // V8_API_H_ diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h index 68d32f1eb..15720c956 100644 --- a/src/arm/assembler-arm-inl.h +++ b/src/arm/assembler-arm-inl.h @@ -110,30 +110,6 @@ Address* RelocInfo::target_reference_address() { } -Handle RelocInfo::target_cell_handle() { - ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); - Address address = Memory::Address_at(pc_); - return Handle( - reinterpret_cast(address)); -} - - -JSGlobalPropertyCell* RelocInfo::target_cell() { - ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); - Address address = Memory::Address_at(pc_); - Object* object = HeapObject::FromAddress( - address - JSGlobalPropertyCell::kValueOffset); - return reinterpret_cast(object); -} - - -void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { - ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); - Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; - Memory::Address_at(pc_) = address; -} - - Address RelocInfo::call_address() { // The 2 instructions offset assumes patched debug break slot or return // sequence. diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index 8fdcf1821..cfdd16496 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -70,7 +70,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() { #endif // def __arm__ -void CpuFeatures::Probe(bool portable) { +void CpuFeatures::Probe() { #ifndef __arm__ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled. if (FLAG_enable_vfp3) { @@ -81,7 +81,7 @@ void CpuFeatures::Probe(bool portable) { supported_ |= 1u << ARMv7; } #else // def __arm__ - if (portable && Serializer::enabled()) { + if (Serializer::enabled()) { supported_ |= OS::CpuFeaturesImpliedByPlatform(); supported_ |= CpuFeaturesImpliedByCompiler(); return; // No features if we might serialize. @@ -98,8 +98,6 @@ void CpuFeatures::Probe(bool portable) { supported_ |= 1u << ARMv7; found_by_runtime_probing_ |= 1u << ARMv7; } - - if (!portable) found_by_runtime_probing_ = 0; #endif } @@ -320,10 +318,7 @@ static const int kMinimalBufferSize = 4*KB; static byte* spare_buffer_ = NULL; Assembler::Assembler(void* buffer, int buffer_size) - : positions_recorder_(this), - allow_peephole_optimization_(false) { - // BUG(3245989): disable peephole optimization if crankshaft is enabled. - allow_peephole_optimization_ = FLAG_peephole_optimization; + : positions_recorder_(this) { if (buffer == NULL) { // Do our own buffer management. if (buffer_size <= kMinimalBufferSize) { @@ -992,7 +987,6 @@ void Assembler::b(int branch_offset, Condition cond) { void Assembler::bl(int branch_offset, Condition cond) { - positions_recorder()->WriteRecordedPositions(); ASSERT((branch_offset & 3) == 0); int imm24 = branch_offset >> 2; ASSERT(is_int24(imm24)); @@ -1656,10 +1650,9 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) { emit(reinterpret_cast(msg)); #else // def __arm__ #ifdef CAN_USE_ARMV5_INSTRUCTIONS - ASSERT(cond == al); bkpt(0); #else // ndef CAN_USE_ARMV5_INSTRUCTIONS - svc(0x9f0001, cond); + svc(0x9f0001); #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS #endif // def __arm__ } @@ -1833,18 +1826,13 @@ void Assembler::vldr(const DwVfpRegister dst, const Condition cond) { // Ddst = MEM(Rbase + offset). // Instruction details available in ARM DDI 0406A, A8-628. - // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | + // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) | // Vdst(15-12) | 1011(11-8) | offset ASSERT(CpuFeatures::IsEnabled(VFP3)); - int u = 1; - if (offset < 0) { - offset = -offset; - u = 0; - } ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | + emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | 0xB*B8 | ((offset / 4) & 255)); } @@ -1855,20 +1843,15 @@ void Assembler::vldr(const SwVfpRegister dst, const Condition cond) { // Sdst = MEM(Rbase + offset). // Instruction details available in ARM DDI 0406A, A8-628. - // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | + // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | offset ASSERT(CpuFeatures::IsEnabled(VFP3)); - int u = 1; - if (offset < 0) { - offset = -offset; - u = 0; - } ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); ASSERT(offset >= 0); int sd, d; dst.split_code(&sd, &d); - emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | + emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); } @@ -1879,18 +1862,13 @@ void Assembler::vstr(const DwVfpRegister src, const Condition cond) { // MEM(Rbase + offset) = Dsrc. // Instruction details available in ARM DDI 0406A, A8-786. - // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) | + // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) | // Vsrc(15-12) | 1011(11-8) | (offset/4) ASSERT(CpuFeatures::IsEnabled(VFP3)); - int u = 1; - if (offset < 0) { - offset = -offset; - u = 0; - } ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | + emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | 0xB*B8 | ((offset / 4) & 255)); } @@ -1901,20 +1879,15 @@ void Assembler::vstr(const SwVfpRegister src, const Condition cond) { // MEM(Rbase + offset) = SSrc. // Instruction details available in ARM DDI 0406A, A8-786. - // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | + // cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | (offset/4) ASSERT(CpuFeatures::IsEnabled(VFP3)); - int u = 1; - if (offset < 0) { - offset = -offset; - u = 0; - } ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); ASSERT(offset >= 0); int sd, d; src.split_code(&sd, &d); - emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | + emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); } @@ -2438,7 +2411,7 @@ void Assembler::RecordDebugBreakSlot() { void Assembler::RecordComment(const char* msg) { - if (FLAG_code_comments) { + if (FLAG_debug_code) { CheckBuffer(); RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast(msg)); } @@ -2496,20 +2469,6 @@ void Assembler::GrowBuffer() { } -void Assembler::db(uint8_t data) { - CheckBuffer(); - *reinterpret_cast(pc_) = data; - pc_ += sizeof(uint8_t); -} - - -void Assembler::dd(uint32_t data) { - CheckBuffer(); - *reinterpret_cast(pc_) = data; - pc_ += sizeof(uint32_t); -} - - void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 36f7507fe..ee4c9aa52 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -69,39 +69,7 @@ namespace internal { // // Core register struct Register { - static const int kNumRegisters = 16; - static const int kNumAllocatableRegisters = 8; - - static int ToAllocationIndex(Register reg) { - return reg.code(); - } - - static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - return from_code(index); - } - - static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - const char* const names[] = { - "r0", - "r1", - "r2", - "r3", - "r4", - "r5", - "r6", - "r7", - }; - return names[index]; - } - - static Register from_code(int code) { - Register r = { code }; - return r; - } - - bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is_valid() const { return 0 <= code_ && code_ < 16; } bool is(Register reg) const { return code_ == reg.code_; } int code() const { ASSERT(is_valid()); @@ -164,48 +132,6 @@ struct SwVfpRegister { // Double word VFP register. struct DwVfpRegister { - // d0 has been excluded from allocation. This is following ia32 - // where xmm0 is excluded. This should be revisited. - static const int kNumRegisters = 16; - static const int kNumAllocatableRegisters = 15; - - static int ToAllocationIndex(DwVfpRegister reg) { - ASSERT(reg.code() != 0); - return reg.code() - 1; - } - - static DwVfpRegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - return from_code(index + 1); - } - - static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - const char* const names[] = { - "d1", - "d2", - "d3", - "d4", - "d5", - "d6", - "d7", - "d8", - "d9", - "d10", - "d11", - "d12", - "d13", - "d14", - "d15" - }; - return names[index]; - } - - static DwVfpRegister from_code(int code) { - DwVfpRegister r = { code }; - return r; - } - // Supporting d0 to d15, can be later extended to d31. bool is_valid() const { return 0 <= code_ && code_ < 16; } bool is(DwVfpRegister reg) const { return code_ == reg.code_; } @@ -241,9 +167,6 @@ struct DwVfpRegister { }; -typedef DwVfpRegister DoubleRegister; - - // Support for the VFP registers s0 to s31 (d0 to d15). // Note that "s(N):s(N+1)" is the same as "d(N/2)". const SwVfpRegister s0 = { 0 }; @@ -363,9 +286,6 @@ enum Coprocessor { // Condition field in instructions. enum Condition { - // any value < 0 is considered no_condition - no_condition = -1, - eq = 0 << 28, // Z set equal. ne = 1 << 28, // Z clear not equal. nz = 1 << 28, // Z clear not zero. @@ -607,7 +527,7 @@ class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). - static void Probe(bool portable); + static void Probe(); // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { @@ -1228,20 +1148,15 @@ class Assembler : public Malloced { void RecordDebugBreakSlot(); // Record a comment relocation entry that can be used by a disassembler. - // Use --code-comments to enable. + // Use --debug_code to enable. void RecordComment(const char* msg); - // Writes a single byte or word of data in the code stream. Used for - // inline tables, e.g., jump-tables. - void db(uint8_t data); - void dd(uint32_t data); - int pc_offset() const { return pc_ - buffer_; } PositionsRecorder* positions_recorder() { return &positions_recorder_; } bool can_peephole_optimize(int instructions) { - if (!allow_peephole_optimization_) return false; + if (!FLAG_peephole_optimization) return false; if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false; return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize; } @@ -1270,8 +1185,6 @@ class Assembler : public Malloced { static bool IsLdrPcImmediateOffset(Instr instr); static bool IsNop(Instr instr, int type = NON_MARKING_NOP); - // Check if is time to emit a constant pool for pending reloc info entries - void CheckConstPool(bool force_emit, bool require_jump); protected: int buffer_space() const { return reloc_info_writer.pos() - pc_; } @@ -1288,6 +1201,9 @@ class Assembler : public Malloced { // Patch branch instruction at pos to branch to given branch target pos void target_at_put(int pos, int target_pos); + // Check if is time to emit a constant pool for pending reloc info entries + void CheckConstPool(bool force_emit, bool require_jump); + // Block the emission of the constant pool before pc_offset void BlockConstPoolBefore(int pc_offset) { if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset; @@ -1401,7 +1317,6 @@ class Assembler : public Malloced { friend class BlockConstPoolScope; PositionsRecorder positions_recorder_; - bool allow_peephole_optimization_; friend class PositionsRecorder; friend class EnsureSpace; }; diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index 6480a9162..862ef395f 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -31,8 +31,6 @@ #include "codegen-inl.h" #include "debug.h" -#include "deoptimizer.h" -#include "full-codegen.h" #include "runtime.h" namespace v8 { @@ -1091,80 +1089,6 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { } -void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { - // Enter an internal frame. - __ EnterInternalFrame(); - - // Preserve the function. - __ push(r1); - - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyRecompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore saved function. - __ pop(r1); - - // Tear down temporary frame. - __ LeaveInternalFrame(); - - // Do a tail-call of the compiled function. - __ Jump(r2); -} - - -static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, - Deoptimizer::BailoutType type) { - __ EnterInternalFrame(); - // Pass the function and deoptimization type to the runtime system. - __ mov(r0, Operand(Smi::FromInt(static_cast(type)))); - __ push(r0); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - __ LeaveInternalFrame(); - - // Get the full codegen state from the stack and untag it -> r6. - __ ldr(r6, MemOperand(sp, 0 * kPointerSize)); - __ SmiUntag(r6); - // Switch on the state. - Label with_tos_register, unknown_state; - __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS)); - __ b(ne, &with_tos_register); - __ add(sp, sp, Operand(1 * kPointerSize)); // Remove state. - __ Ret(); - - __ bind(&with_tos_register); - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); - __ cmp(r6, Operand(FullCodeGenerator::TOS_REG)); - __ b(ne, &unknown_state); - __ add(sp, sp, Operand(2 * kPointerSize)); // Remove state. - __ Ret(); - - __ bind(&unknown_state); - __ stop("no cases left"); -} - - -void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { - Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); -} - - -void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { - Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); -} - - -void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - __ stop("builtins-arm.cc: NotifyOSR"); -} - - -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - __ stop("builtins-arm.cc: OnStackReplacement"); -} - - void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. // r0: actual number of arguments diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 3670765a6..b42d627b9 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -82,15 +82,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { // write barrier because the allocated object is in new space. __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); - __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); - // Initialize the code pointer in the function to be the one // found in the shared function info object. @@ -1091,10 +1088,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { Label not_heap_number; Register scratch = r7; - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(tos_, ip); - __ b(eq, &false_result); - // HeapNumber => false iff +0, -0, or NaN. __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); @@ -2207,14 +2200,6 @@ Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { } -Handle GetTypeRecordingBinaryOpStub(int key, - TRBinaryOpIC::TypeInfo type_info, - TRBinaryOpIC::TypeInfo result_type_info) { - UNIMPLEMENTED(); - return Handle::null(); -} - - void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Argument is a number and is on stack and in r0. Label runtime_call; @@ -2656,7 +2641,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // r0:r1: result // sp: stack pointer // fp: frame pointer - __ LeaveExitFrame(save_doubles_); + __ LeaveExitFrame(); // check if we should retry or throw exception Label retry; @@ -2705,7 +2690,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { // builtin once. // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(save_doubles_); + __ EnterExitFrame(); // r4: number of arguments (C callee-saved) // r5: pointer to builtin function (C callee-saved) @@ -2793,15 +2778,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Setup frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); -#ifdef ENABLE_LOGGING_AND_PROFILING - // If this is the outermost JS call, set js_entry_sp value. - ExternalReference js_entry_sp(Top::k_js_entry_sp_address); - __ mov(r5, Operand(ExternalReference(js_entry_sp))); - __ ldr(r6, MemOperand(r5)); - __ cmp(r6, Operand(0, RelocInfo::NONE)); - __ str(fp, MemOperand(r5), eq); -#endif - // Call a faked try-block that does the invoke. __ bl(&invoke); @@ -2864,15 +2840,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // No need to restore registers __ add(sp, sp, Operand(StackHandlerConstants::kSize)); -#ifdef ENABLE_LOGGING_AND_PROFILING - // If current FP value is the same as js_entry_sp value, it means that - // the current function is the outermost. - __ mov(r5, Operand(ExternalReference(js_entry_sp))); - __ ldr(r6, MemOperand(r5)); - __ cmp(fp, Operand(r6)); - __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq); - __ str(r6, MemOperand(r5), eq); -#endif __ bind(&exit); // r0 holds result // Restore the top frame descriptors from the stack. @@ -3463,95 +3430,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { } -void RegExpConstructResultStub::Generate(MacroAssembler* masm) { - const int kMaxInlineLength = 100; - Label slowcase; - Label done; - __ ldr(r1, MemOperand(sp, kPointerSize * 2)); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); - __ tst(r1, Operand(kSmiTagMask)); - __ b(ne, &slowcase); - __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); - __ b(hi, &slowcase); - // Smi-tagging is equivalent to multiplying by 2. - // Allocate RegExpResult followed by FixedArray with size in ebx. - // JSArray: [Map][empty properties][Elements][Length-smi][index][input] - // Elements: [Map][Length][..elements..] - // Size of JSArray with two in-object properties and the header of a - // FixedArray. - int objects_size = - (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; - __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); - __ add(r2, r5, Operand(objects_size)); - __ AllocateInNewSpace( - r2, // In: Size, in words. - r0, // Out: Start of allocation (tagged). - r3, // Scratch register. - r4, // Scratch register. - &slowcase, - static_cast(TAG_OBJECT | SIZE_IN_WORDS)); - // r0: Start of allocated area, object-tagged. - // r1: Number of elements in array, as smi. - // r5: Number of elements, untagged. - - // Set JSArray map to global.regexp_result_map(). - // Set empty properties FixedArray. - // Set elements to point to FixedArray allocated right after the JSArray. - // Interleave operations for better latency. - __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ add(r3, r0, Operand(JSRegExpResult::kSize)); - __ mov(r4, Operand(Factory::empty_fixed_array())); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); - __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); - __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); - __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - - // Set input, index and length fields from arguments. - __ ldr(r1, MemOperand(sp, kPointerSize * 0)); - __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); - __ ldr(r1, MemOperand(sp, kPointerSize * 1)); - __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); - __ ldr(r1, MemOperand(sp, kPointerSize * 2)); - __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); - - // Fill out the elements FixedArray. - // r0: JSArray, tagged. - // r3: FixedArray, tagged. - // r5: Number of elements in array, untagged. - - // Set map. - __ mov(r2, Operand(Factory::fixed_array_map())); - __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - // Set FixedArray length. - __ mov(r6, Operand(r5, LSL, kSmiTagSize)); - __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); - // Fill contents of fixed-array with the-hole. - __ mov(r2, Operand(Factory::the_hole_value())); - __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - // Fill fixed array elements with hole. - // r0: JSArray, tagged. - // r2: the hole. - // r3: Start of elements in FixedArray. - // r5: Number of elements to fill. - Label loop; - __ tst(r5, Operand(r5)); - __ bind(&loop); - __ b(le, &done); // Jump if r1 is negative or zero. - __ sub(r5, r5, Operand(1), SetCC); - __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); - __ jmp(&loop); - - __ bind(&done); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&slowcase); - __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); -} - - void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow; @@ -4844,123 +4722,6 @@ void StringAddStub::Generate(MacroAssembler* masm) { } -void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMIS); - Label miss; - __ orr(r2, r1, r0); - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, &miss); - - if (GetCondition() == eq) { - // For equality we do not care about the sign of the result. - __ sub(r0, r0, r1, SetCC); - } else { - __ sub(r1, r1, r0, SetCC); - // Correct sign of result in case of overflow. - __ rsb(r1, r1, Operand(0), SetCC, vs); - __ mov(r0, r1); - } - __ Ret(); - - __ bind(&miss); - GenerateMiss(masm); -} - - -void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBERS); - - Label generic_stub; - Label unordered; - Label miss; - __ and_(r2, r1, Operand(r0)); - __ tst(r2, Operand(kSmiTagMask)); - __ b(eq, &generic_stub); - - __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); - __ b(ne, &miss); - __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); - __ b(ne, &miss); - - // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP3 is unsupported. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - - // Load left and right operand - __ sub(r2, r1, Operand(kHeapObjectTag)); - __ vldr(d0, r2, HeapNumber::kValueOffset); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vldr(d1, r2, HeapNumber::kValueOffset); - - // Compare operands - __ vcmp(d0, d1); - __ vmrs(pc); // Move vector status bits to normal status bits. - - // Don't base result on status bits when a NaN is involved. - __ b(vs, &unordered); - - // Return a result of -1, 0, or 1, based on status bits. - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); - - __ bind(&unordered); - } - - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); - __ bind(&generic_stub); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); - - __ bind(&miss); - GenerateMiss(masm); -} - - -void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECTS); - Label miss; - __ and_(r2, r1, Operand(r0)); - __ tst(r2, Operand(kSmiTagMask)); - __ b(eq, &miss); - - __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); - __ b(ne, &miss); - __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); - __ b(ne, &miss); - - ASSERT(GetCondition() == eq); - __ sub(r0, r0, Operand(r1)); - __ Ret(); - - __ bind(&miss); - GenerateMiss(masm); -} - - -void ICCompareStub::GenerateMiss(MacroAssembler* masm) { - __ Push(r1, r0); - __ push(lr); - - // Call the runtime system in a fresh internal frame. - ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss)); - __ EnterInternalFrame(); - __ Push(r1, r0); - __ mov(ip, Operand(Smi::FromInt(op_))); - __ push(ip); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); - // Compute the entry point of the rewritten stub. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore registers. - __ pop(lr); - __ pop(r0); - __ pop(r1); - __ Jump(r2); -} - - #undef __ } } // namespace v8::internal diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index 8ffca773f..2e07e3b5c 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -106,9 +106,9 @@ class GenericBinaryOpStub : public CodeStub { // Minor key encoding in 17 bits. class ModeBits: public BitField {}; class OpBits: public BitField {}; - class TypeInfoBits: public BitField {}; - class RegisterBits: public BitField {}; - class KnownIntBits: public BitField {}; + class TypeInfoBits: public BitField {}; + class RegisterBits: public BitField {}; + class KnownIntBits: public BitField {}; Major MajorKey() { return GenericBinaryOp; } int MinorKey() { @@ -196,10 +196,6 @@ class GenericBinaryOpStub : public CodeStub { const char* GetName(); - virtual void FinishCode(Code* code) { - code->set_binary_op_type(runtime_operands_type_); - } - #ifdef DEBUG void Print() { if (!specialized_on_rhs_) { diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index 2b0b324e5..ea57d2d4e 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -36,7 +36,7 @@ #include "debug.h" #include "ic-inl.h" #include "jsregexp.h" -#include "jump-target-inl.h" +#include "jump-target-light-inl.h" #include "parser.h" #include "regexp-macro-assembler.h" #include "regexp-stack.h" @@ -79,12 +79,12 @@ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { } -void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { +void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterInternalFrame(); } -void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { +void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->LeaveInternalFrame(); } @@ -165,9 +165,6 @@ void CodeGenerator::Generate(CompilationInfo* info) { int slots = scope()->num_parameters() + scope()->num_stack_slots(); ScopedVector type_info_array(slots); - for (int i = 0; i < slots; i++) { - type_info_array[i] = TypeInfo::Unknown(); - } type_info_ = &type_info_array; ASSERT(allocator_ == NULL); @@ -5419,14 +5416,97 @@ void CodeGenerator::GenerateRegExpExec(ZoneList* args) { void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { + // No stub. This code only occurs a few times in regexp.js. + const int kMaxInlineLength = 100; ASSERT_EQ(3, args->length()); - Load(args->at(0)); // Size of array, smi. Load(args->at(1)); // "index" property value. Load(args->at(2)); // "input" property value. - RegExpConstructResultStub stub; - frame_->SpillAll(); - frame_->CallStub(&stub, 3); + { + VirtualFrame::SpilledScope spilled_scope(frame_); + Label slowcase; + Label done; + __ ldr(r1, MemOperand(sp, kPointerSize * 2)); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &slowcase); + __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); + __ b(hi, &slowcase); + // Smi-tagging is equivalent to multiplying by 2. + // Allocate RegExpResult followed by FixedArray with size in ebx. + // JSArray: [Map][empty properties][Elements][Length-smi][index][input] + // Elements: [Map][Length][..elements..] + // Size of JSArray with two in-object properties and the header of a + // FixedArray. + int objects_size = + (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; + __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); + __ add(r2, r5, Operand(objects_size)); + __ AllocateInNewSpace( + r2, // In: Size, in words. + r0, // Out: Start of allocation (tagged). + r3, // Scratch register. + r4, // Scratch register. + &slowcase, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); + // r0: Start of allocated area, object-tagged. + // r1: Number of elements in array, as smi. + // r5: Number of elements, untagged. + + // Set JSArray map to global.regexp_result_map(). + // Set empty properties FixedArray. + // Set elements to point to FixedArray allocated right after the JSArray. + // Interleave operations for better latency. + __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ add(r3, r0, Operand(JSRegExpResult::kSize)); + __ mov(r4, Operand(Factory::empty_fixed_array())); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); + __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); + __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + + // Set input, index and length fields from arguments. + __ ldm(ia_w, sp, static_cast(r2.bit() | r4.bit())); + __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); + __ add(sp, sp, Operand(kPointerSize)); + __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); + __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); + + // Fill out the elements FixedArray. + // r0: JSArray, tagged. + // r3: FixedArray, tagged. + // r5: Number of elements in array, untagged. + + // Set map. + __ mov(r2, Operand(Factory::fixed_array_map())); + __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + // Set FixedArray length. + __ mov(r6, Operand(r5, LSL, kSmiTagSize)); + __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); + // Fill contents of fixed-array with the-hole. + __ mov(r2, Operand(Factory::the_hole_value())); + __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + // Fill fixed array elements with hole. + // r0: JSArray, tagged. + // r2: the hole. + // r3: Start of elements in FixedArray. + // r5: Number of elements to fill. + Label loop; + __ tst(r5, Operand(r5)); + __ bind(&loop); + __ b(le, &done); // Jump if r1 is negative or zero. + __ sub(r5, r5, Operand(1), SetCC); + __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); + __ jmp(&loop); + + __ bind(&slowcase); + __ CallRuntime(Runtime::kRegExpConstructResult, 3); + + __ bind(&done); + } + frame_->Forget(3); frame_->EmitPush(r0); } diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h index 589e704b5..4e672b252 100644 --- a/src/arm/codegen-arm.h +++ b/src/arm/codegen-arm.h @@ -209,9 +209,6 @@ class CodeGenerator: public AstVisitor { Code::Flags flags, CompilationInfo* info); - // Print the code after compiling it. - static void PrintCode(Handle code, CompilationInfo* info); - #ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); #endif @@ -308,9 +305,8 @@ class CodeGenerator: public AstVisitor { // Node visitors. void VisitStatements(ZoneList* statements); - virtual void VisitSlot(Slot* node); #define DEF_VISIT(type) \ - virtual void Visit##type(type* node); + void Visit##type(type* node); AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT @@ -583,7 +579,6 @@ class CodeGenerator: public AstVisitor { friend class FastCodeGenerator; friend class FullCodeGenerator; friend class FullCodeGenSyntaxChecker; - friend class LCodeGen; DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc index b359dce66..e998b6f59 100644 --- a/src/arm/cpu-arm.cc +++ b/src/arm/cpu-arm.cc @@ -42,10 +42,7 @@ namespace v8 { namespace internal { void CPU::Setup() { - CpuFeatures::Probe(true); - if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) { - V8::DisableCrankshaft(); - } + CpuFeatures::Probe(); } diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc deleted file mode 100644 index e7a669dce..000000000 --- a/src/arm/deoptimizer-arm.cc +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "safepoint-table.h" - -namespace v8 { -namespace internal { - -int Deoptimizer::table_entry_size_ = 16; - -void Deoptimizer::DeoptimizeFunction(JSFunction* function) { - AssertNoAllocation no_allocation; - - if (!function->IsOptimized()) return; - - // Get the optimized code. - Code* code = function->code(); - - // Invalidate the relocation information, as it will become invalid by the - // code patching below, and is not needed any more. - code->InvalidateRelocation(); - - // For each return after a safepoint insert an absolute call to the - // corresponding deoptimization entry. - unsigned last_pc_offset = 0; - SafepointTable table(function->code()); - for (unsigned i = 0; i < table.length(); i++) { - unsigned pc_offset = table.GetPcOffset(i); - int deoptimization_index = table.GetDeoptimizationIndex(i); - int gap_code_size = table.GetGapCodeSize(i); - // Check that we did not shoot past next safepoint. - // TODO(srdjan): How do we guarantee that safepoint code does not - // overlap other safepoint patching code? - CHECK(pc_offset >= last_pc_offset); -#ifdef DEBUG - // Destroy the code which is not supposed to be run again. - int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize; - CodePatcher destroyer(code->instruction_start() + last_pc_offset, - instructions); - for (int x = 0; x < instructions; x++) { - destroyer.masm()->bkpt(0); - } -#endif - last_pc_offset = pc_offset; - if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { - const int kCallInstructionSizeInWords = 3; - CodePatcher patcher(code->instruction_start() + pc_offset + gap_code_size, - kCallInstructionSizeInWords); - Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry( - deoptimization_index, Deoptimizer::LAZY); - patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE); - last_pc_offset += - gap_code_size + kCallInstructionSizeInWords * Assembler::kInstrSize; - } - } - - -#ifdef DEBUG - // Destroy the code which is not supposed to be run again. - int instructions = - (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize; - CodePatcher destroyer(code->instruction_start() + last_pc_offset, - instructions); - for (int x = 0; x < instructions; x++) { - destroyer.masm()->bkpt(0); - } -#endif - - // Add the deoptimizing code to the list. - DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - node->set_next(deoptimizing_code_list_); - deoptimizing_code_list_ = node; - - // Set the code for the function to non-optimized version. - function->ReplaceCode(function->shared()->code()); - - if (FLAG_trace_deopt) { - PrintF("[forced deoptimization: "); - function->PrintName(); - PrintF(" / %x]\n", reinterpret_cast(function)); - } -} - - -void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, - Code* replacement_code) { - UNIMPLEMENTED(); -} - - -void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { - UNIMPLEMENTED(); -} - - -void Deoptimizer::DoComputeOsrOutputFrame() { - UNIMPLEMENTED(); -} - - -// This code is very similar to ia32 code, but relies on register names (fp, sp) -// and how the frame is laid out. -void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, - int frame_index) { - // Read the ast node id, function, and frame height for this output frame. - Translation::Opcode opcode = - static_cast(iterator->Next()); - USE(opcode); - ASSERT(Translation::FRAME == opcode); - int node_id = iterator->Next(); - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating "); - function->PrintName(); - PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes); - } - - // The 'fixed' part of the frame consists of the incoming parameters and - // the part described by JavaScriptFrameConstants. - unsigned fixed_frame_size = ComputeFixedSize(function); - unsigned input_frame_size = input_->GetFrameSize(); - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - - bool is_bottommost = (0 == frame_index); - bool is_topmost = (output_count_ - 1 == frame_index); - ASSERT(frame_index >= 0 && frame_index < output_count_); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address for the bottommost output frame can be computed from - // the input frame pointer and the output frame's height. For all - // subsequent output frames, it can be computed from the previous one's - // top address and the current frame's size. - uint32_t top_address; - if (is_bottommost) { - // 2 = context and function in the frame. - top_address = - input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes; - } else { - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - } - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = function->shared()->formal_parameter_count() + 1; - unsigned output_offset = output_frame_size; - unsigned input_offset = input_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - input_offset -= (parameter_count * kPointerSize); - - // There are no translation commands for the caller's pc and fp, the - // context, and the function. Synthesize their values and set them up - // explicitly. - // - // The caller's pc for the bottommost output frame is the same as in the - // input frame. For all subsequent output frames, it can be read from the - // previous one. This frame's pc can be computed from the non-optimized - // function code and AST id of the bailout. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - uint32_t value; - if (is_bottommost) { - value = input_->GetFrameSlot(input_offset); - } else { - value = output_[frame_index - 1]->GetPc(); - } - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, value); - } - - // The caller's frame pointer for the bottommost output frame is the same - // as in the input frame. For all subsequent output frames, it can be - // read from the previous one. Also compute and set this frame's frame - // pointer. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - if (is_bottommost) { - value = input_->GetFrameSlot(input_offset); - } else { - value = output_[frame_index - 1]->GetFp(); - } - output_frame->SetFrameSlot(output_offset, value); - unsigned fp_value = top_address + output_offset; - ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value); - output_frame->SetFp(fp_value); - if (is_topmost) { - output_frame->SetRegister(fp.code(), fp_value); - } - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - value = reinterpret_cast(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); - output_frame->SetFrameSlot(output_offset, value); - if (is_topmost) { - output_frame->SetRegister(cp.code(), value); - } - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", - top_address + output_offset, output_offset, value); - } - - // The function was mentioned explicitly in the BEGIN_FRAME. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - value = reinterpret_cast(function); - // The function for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", - top_address + output_offset, output_offset, value); - } - - // Translate the rest of the frame. - for (unsigned i = 0; i < height; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - ASSERT(0 == output_offset); - - // Compute this frame's PC, state, and continuation. - Code* non_optimized_code = function->shared()->code(); - FixedArray* raw_data = non_optimized_code->deoptimization_data(); - DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); - Address start = non_optimized_code->instruction_start(); - unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); - unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); - uint32_t pc_value = reinterpret_cast(start + pc_offset); - output_frame->SetPc(pc_value); - if (is_topmost) { - output_frame->SetRegister(pc.code(), pc_value); - } - - FullCodeGenerator::State state = - FullCodeGenerator::StateField::decode(pc_and_state); - output_frame->SetState(Smi::FromInt(state)); - - // Set the continuation for the topmost frame. - if (is_topmost) { - Code* continuation = (bailout_type_ == EAGER) - ? Builtins::builtin(Builtins::NotifyDeoptimized) - : Builtins::builtin(Builtins::NotifyLazyDeoptimized); - output_frame->SetContinuation( - reinterpret_cast(continuation->entry())); - } - - if (output_count_ - 1 == frame_index) iterator->Done(); -} - - -#define __ masm()-> - - -// This code tries to be close to ia32 code so that any changes can be -// easily ported. -void Deoptimizer::EntryGenerator::Generate() { - GeneratePrologue(); - // TOS: bailout-id; TOS+1: return address if not EAGER. - CpuFeatures::Scope scope(VFP3); - // Save all general purpose registers before messing with them. - const int kNumberOfRegisters = Register::kNumRegisters; - - // Everything but pc, lr and ip which will be saved but not restored. - RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); - - const int kDoubleRegsSize = - kDoubleSize * DwVfpRegister::kNumAllocatableRegisters; - - // Save all general purpose registers before messing with them. - __ sub(sp, sp, Operand(kDoubleRegsSize)); - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { - DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i); - int offset = i * kDoubleSize; - __ vstr(vfp_reg, sp, offset); - } - - // Push all 16 registers (needed to populate FrameDescription::registers_). - __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); - - const int kSavedRegistersAreaSize = - (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; - - // Get the bailout id from the stack. - __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); - - // Get the address of the location in the code object if possible (r3) (return - // address for lazy deoptimization) and compute the fp-to-sp delta in - // register r4. - if (type() == EAGER) { - __ mov(r3, Operand(0)); - // Correct one word for bailout id. - __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); - } else { - __ mov(r3, lr); - // Correct two words for bailout id and return address. - __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); - } - __ sub(r4, fp, r4); - - // Allocate a new deoptimizer object. - // Pass four arguments in r0 to r3 and fifth argument on stack. - __ PrepareCallCFunction(5, r5); - __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ mov(r1, Operand(type())); // bailout type, - // r2: bailout id already loaded. - // r3: code address or 0 already loaded. - __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. - // Call Deoptimizer::New(). - __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); - - // Preserve "deoptimizer" object in register r0 and get the input - // frame descriptor pointer to r1 (deoptimizer->input_); - __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); - - - // Copy core registers into FrameDescription::registers_[kNumRegisters]. - ASSERT(Register::kNumRegisters == kNumberOfRegisters); - for (int i = 0; i < kNumberOfRegisters; i++) { - int offset = (i * kIntSize) + FrameDescription::registers_offset(); - __ ldr(r2, MemOperand(sp, i * kPointerSize)); - __ str(r2, MemOperand(r1, offset)); - } - - // Copy VFP registers to - // double_registers_[DoubleRegister::kNumAllocatableRegisters] - int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ vldr(d0, sp, src_offset); - __ vstr(d0, r1, dst_offset); - } - - // Remove the bailout id, eventually return address, and the saved registers - // from the stack. - if (type() == EAGER) { - __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); - } else { - __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); - } - - // Compute a pointer to the unwinding limit in register r2; that is - // the first stack slot not part of the input frame. - __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset())); - __ add(r2, r2, sp); - - // Unwind the stack down to - but not including - the unwinding - // limit and copy the contents of the activation frame to the input - // frame description. - __ add(r3, r1, Operand(FrameDescription::frame_content_offset())); - Label pop_loop; - __ bind(&pop_loop); - __ pop(r4); - __ str(r4, MemOperand(r3, 0)); - __ add(r3, r3, Operand(sizeof(uint32_t))); - __ cmp(r2, sp); - __ b(ne, &pop_loop); - - // Compute the output frame in the deoptimizer. - __ push(r0); // Preserve deoptimizer object across call. - // r0: deoptimizer object; r1: scratch. - __ PrepareCallCFunction(1, r1); - // Call Deoptimizer::ComputeOutputFrames(). - __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); - __ pop(r0); // Restore deoptimizer object (class Deoptimizer). - - // Replace the current (input) frame with the output frames. - Label outer_push_loop, inner_push_loop; - // Outer loop state: r0 = current "FrameDescription** output_", - // r1 = one past the last FrameDescription**. - __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); - __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_. - __ add(r1, r0, Operand(r1, LSL, 2)); - __ bind(&outer_push_loop); - // Inner loop state: r2 = current FrameDescription*, r3 = loop index. - __ ldr(r2, MemOperand(r0, 0)); // output_[ix] - __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); - __ bind(&inner_push_loop); - __ sub(r3, r3, Operand(sizeof(uint32_t))); - // __ add(r6, r2, Operand(r3, LSL, 1)); - __ add(r6, r2, Operand(r3)); - __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); - __ push(r7); - __ cmp(r3, Operand(0)); - __ b(ne, &inner_push_loop); // test for gt? - __ add(r0, r0, Operand(kPointerSize)); - __ cmp(r0, r1); - __ b(lt, &outer_push_loop); - - // In case of OSR, we have to restore the XMM registers. - if (type() == OSR) { - UNIMPLEMENTED(); - } - - // Push state, pc, and continuation from the last output frame. - if (type() != OSR) { - __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); - __ push(r6); - } - - __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset())); - __ push(r6); - __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset())); - __ push(r6); - - // Push the registers from the last output frame. - for (int i = kNumberOfRegisters - 1; i >= 0; i--) { - int offset = (i * kIntSize) + FrameDescription::registers_offset(); - __ ldr(r6, MemOperand(r2, offset)); - __ push(r6); - } - - // Restore the registers from the stack. - __ ldm(ia_w, sp, restored_regs); // all but pc registers. - __ pop(ip); // remove sp - __ pop(ip); // remove lr - - // Set up the roots register. - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(r10, Operand(roots_address)); - - __ pop(ip); // remove pc - __ pop(r7); // get continuation, leave pc on stack - __ pop(lr); - __ Jump(r7); - __ stop("Unreachable."); -} - - -void Deoptimizer::TableEntryGenerator::GeneratePrologue() { - // Create a sequence of deoptimization entries. Note that any - // registers may be still live. - Label done; - for (int i = 0; i < count(); i++) { - int start = masm()->pc_offset(); - USE(start); - if (type() == EAGER) { - __ nop(); - } else { - // Emulate ia32 like call by pushing return address to stack. - __ push(lr); - } - __ mov(ip, Operand(i)); - __ push(ip); - __ b(&done); - ASSERT(masm()->pc_offset() - start == table_entry_size_); - } - __ bind(&done); -} - -#undef __ - -} } // namespace v8::internal diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc index d2726cfcf..b0c099030 100644 --- a/src/arm/frames-arm.cc +++ b/src/arm/frames-arm.cc @@ -38,12 +38,7 @@ namespace internal { Address ExitFrame::ComputeStackPointer(Address fp) { - Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset); - Address sp = fp + ExitFrameConstants::kSPOffset; - if (marker == NULL) { - sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize; - } - return sp; + return fp + ExitFrameConstants::kSPOffset; } diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h index 00c20efa8..5847a6a2a 100644 --- a/src/arm/frames-arm.h +++ b/src/arm/frames-arm.h @@ -74,18 +74,6 @@ static const RegList kCalleeSaved = static const int kNumCalleeSaved = 7 + kR9Available; -// Number of registers for which space is reserved in safepoints. Must be a -// multiple of 8. -// TODO(regis): Only 8 registers may actually be sufficient. Revisit. -static const int kNumSafepointRegisters = 16; - -// Define the list of registers actually saved at safepoints. -// Note that the number of saved registers may be smaller than the reserved -// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. -static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; -static const int kNumSafepointSavedRegisters = - kNumJSCallerSaved + kNumCalleeSaved; - // ---------------------------------------------------- @@ -111,9 +99,7 @@ class ExitFrameConstants : public AllStatic { static const int kCodeOffset = -1 * kPointerSize; static const int kSPOffset = -1 * kPointerSize; - // TODO(regis): Use a patched sp value on the stack instead. - // A marker of 0 indicates that double registers are saved. - static const int kMarkerOffset = -2 * kPointerSize; + static const int kSavedRegistersOffset = 0 * kPointerSize; // The caller fields are below the frame pointer on the stack. static const int kCallerFPOffset = +0 * kPointerSize; diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 7e4a28042..633b5b4d1 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -171,20 +171,21 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { } } - if (FLAG_trace) { - __ CallRuntime(Runtime::kTraceEnter, 0); - } - // Check the stack for overflow or break request. { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailout(info->function(), NO_REGISTERS); - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); + __ LoadRoot(r2, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(r2)); StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); + __ mov(ip, + Operand(reinterpret_cast(stub.GetCode().location()), + RelocInfo::CODE_TARGET), + LeaveCC, + lo); + __ Call(ip, lo); + } + + if (FLAG_trace) { + __ CallRuntime(Runtime::kTraceEnter, 0); } { Comment cmnt(masm_, "[ Body"); @@ -199,25 +200,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); } EmitReturnSequence(); - - // Force emit the constant pool, so it doesn't get emitted in the middle - // of the stack check table. - masm()->CheckConstPool(true, false); -} - - -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { - Comment cmnt(masm_, "[ Stack check"); - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); - PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); - PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); - RecordStackCheck(stmt->OsrEntryId()); } @@ -293,7 +275,6 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const { void FullCodeGenerator::TestContext::Plug(Slot* slot) const { // For simplicity we always test the accumulator register. codegen()->Move(result_register(), slot); - codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL); codegen()->DoTest(true_label_, false_label_, fall_through_); } @@ -316,16 +297,12 @@ void FullCodeGenerator::StackValueContext::Plug( void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const { - codegen()->PrepareForBailoutBeforeSplit(TOS_REG, - true, - true_label_, - false_label_); if (index == Heap::kUndefinedValueRootIndex || index == Heap::kNullValueRootIndex || index == Heap::kFalseValueRootIndex) { - if (false_label_ != fall_through_) __ b(false_label_); + __ b(false_label_); } else if (index == Heap::kTrueValueRootIndex) { - if (true_label_ != fall_through_) __ b(true_label_); + __ b(true_label_); } else { __ LoadRoot(result_register(), index); codegen()->DoTest(true_label_, false_label_, fall_through_); @@ -344,34 +321,29 @@ void FullCodeGenerator::AccumulatorValueContext::Plug( void FullCodeGenerator::StackValueContext::Plug(Handle lit) const { - // Immediates cannot be pushed directly. + // Immediates can be pushed directly. __ mov(result_register(), Operand(lit)); __ push(result_register()); } void FullCodeGenerator::TestContext::Plug(Handle lit) const { - codegen()->PrepareForBailoutBeforeSplit(TOS_REG, - true, - true_label_, - false_label_); ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { - if (false_label_ != fall_through_) __ b(false_label_); + __ b(false_label_); } else if (lit->IsTrue() || lit->IsJSObject()) { - if (true_label_ != fall_through_) __ b(true_label_); + __ b(true_label_); } else if (lit->IsString()) { if (String::cast(*lit)->length() == 0) { - if (false_label_ != fall_through_) __ b(false_label_); __ b(false_label_); } else { - if (true_label_ != fall_through_) __ b(true_label_); + __ b(true_label_); } } else if (lit->IsSmi()) { if (Smi::cast(*lit)->value() == 0) { - if (false_label_ != fall_through_) __ b(false_label_); + __ b(false_label_); } else { - if (true_label_ != fall_through_) __ b(true_label_); + __ b(true_label_); } } else { // For simplicity we always test the accumulator register. @@ -411,14 +383,13 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, // For simplicity we always test the accumulator register. __ Drop(count); __ Move(result_register(), reg); - codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL); codegen()->DoTest(true_label_, false_label_, fall_through_); } void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == materialize_false); + ASSERT_EQ(materialize_true, materialize_false); __ bind(materialize_true); } @@ -453,8 +424,8 @@ void FullCodeGenerator::StackValueContext::Plug( void FullCodeGenerator::TestContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == true_label_); ASSERT(materialize_false == false_label_); + ASSERT(materialize_true == true_label_); } @@ -478,10 +449,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const { void FullCodeGenerator::TestContext::Plug(bool flag) const { - codegen()->PrepareForBailoutBeforeSplit(TOS_REG, - true, - true_label_, - false_label_); if (flag) { if (true_label_ != fall_through_) __ b(true_label_); } else { @@ -562,33 +529,6 @@ void FullCodeGenerator::Move(Slot* dst, } -void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, - bool should_normalize, - Label* if_true, - Label* if_false) { - // Only prepare for bailouts before splits if we're in a test - // context. Otherwise, we let the Visit function deal with the - // preparation to avoid preparing with the same AST id twice. - if (!context()->IsTest() || !info_->IsOptimizable()) return; - - Label skip; - if (should_normalize) __ b(&skip); - - ForwardBailoutStack* current = forward_bailout_stack_; - while (current != NULL) { - PrepareForBailout(current->expr(), state); - current = current->parent(); - } - - if (should_normalize) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r0, ip); - Split(eq, if_true, if_false, NULL); - __ bind(&skip); - } -} - - void FullCodeGenerator::EmitDeclaration(Variable* variable, Variable::Mode mode, FunctionLiteral* function) { @@ -711,8 +651,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Keep the switch value on the stack until a case matches. VisitForStackValue(stmt->tag()); - PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); - ZoneList* clauses = stmt->cases(); CaseClause* default_clause = NULL; // Can occur anywhere in the list. @@ -778,7 +716,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { } __ bind(nested_statement.break_target()); - PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); } @@ -893,18 +830,26 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { EmitAssignment(stmt->each()); // Generate code for the body of the loop. + Label stack_limit_hit, stack_check_done; Visit(stmt->body()); + __ StackLimitCheck(&stack_limit_hit); + __ bind(&stack_check_done); + // Generate code for the going to the next element by incrementing // the index (smi) stored on top of the stack. __ bind(loop_statement.continue_target()); __ pop(r0); __ add(r0, r0, Operand(Smi::FromInt(1))); __ push(r0); - - EmitStackCheck(stmt); __ b(&loop); + // Slow case for the stack limit check. + StackCheckStub stack_check_stub; + __ bind(&stack_limit_hit); + __ CallStub(&stack_check_stub); + __ b(&stack_check_done); + // Remove the pointers stored on the stack. __ bind(loop_statement.break_target()); __ Drop(5); @@ -1250,15 +1195,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->handle()->IsSymbol()) { + VisitForAccumulatorValue(value); + __ mov(r2, Operand(key->handle())); + __ ldr(r1, MemOperand(sp)); if (property->emit_store()) { - VisitForAccumulatorValue(value); - __ mov(r2, Operand(key->handle())); - __ ldr(r1, MemOperand(sp)); Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); - PrepareForBailoutForId(key->id(), NO_REGISTERS); - } else { - VisitForEffect(value); } break; } @@ -1353,8 +1295,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { // Update the write barrier for the array store with r0 as the scratch // register. __ RecordWrite(r1, Operand(offset), r2, result_register()); - - PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } if (result_saved) { @@ -1401,27 +1341,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { break; case KEYED_PROPERTY: if (expr->is_compound()) { - if (property->is_arguments_access()) { - VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); - __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0)); - __ push(r0); - __ mov(r0, Operand(property->key()->AsLiteral()->handle())); - } else { - VisitForStackValue(property->obj()); - VisitForAccumulatorValue(property->key()); - } + VisitForStackValue(property->obj()); + VisitForAccumulatorValue(property->key()); __ ldr(r1, MemOperand(sp, 0)); __ push(r0); } else { - if (property->is_arguments_access()) { - VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); - __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0)); - __ mov(r0, Operand(property->key()->AsLiteral()->handle())); - __ Push(r1, r0); - } else { - VisitForStackValue(property->obj()); - VisitForStackValue(property->key()); - } + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); } break; } @@ -1441,12 +1367,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } } - // For property compound assignments we need another deoptimization - // point after the property load. - if (property != NULL) { - PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG); - } - Token::Value op = expr->binary_op(); ConstantOperand constant = ShouldInlineSmiCase(op) ? GetConstantOperand(op, expr->target(), expr->value()) @@ -1472,9 +1392,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } else { EmitBinaryOp(op, mode); } - - // Deoptimization point in case the binary operation may have side effects. - PrepareForBailout(expr->binary_operation(), TOS_REG); } else { VisitForAccumulatorValue(expr->value()); } @@ -1759,14 +1676,13 @@ void FullCodeGenerator::VisitProperty(Property* expr) { if (key->IsPropertyName()) { VisitForAccumulatorValue(expr->obj()); EmitNamedPropertyLoad(expr); - context()->Plug(r0); } else { VisitForStackValue(expr->obj()); VisitForAccumulatorValue(expr->key()); __ pop(r1); EmitKeyedPropertyLoad(expr); - context()->Plug(r0); } + context()->Plug(r0); } void FullCodeGenerator::EmitCallWithIC(Call* expr, @@ -1787,7 +1703,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; Handle ic = StubCache::ComputeCallInitialize(arg_count, in_loop); EmitCallIC(ic, mode); - RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->Plug(r0); @@ -1821,7 +1736,6 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, Handle ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop); __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key. EmitCallIC(ic, mode); - RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, r0); // Drop the key still on the stack. @@ -1842,7 +1756,6 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); - RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, r0); @@ -1850,12 +1763,6 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { void FullCodeGenerator::VisitCall(Call* expr) { -#ifdef DEBUG - // We want to verify that RecordJSReturnSite gets called on all paths - // through this function. Avoid early returns. - expr->return_is_recorded_ = false; -#endif - Comment cmnt(masm_, "[ Call"); Expression* fun = expr->expression(); Variable* var = fun->AsVariableProxy()->AsVariable(); @@ -1907,7 +1814,6 @@ void FullCodeGenerator::VisitCall(Call* expr) { InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); - RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, r0); @@ -2012,11 +1918,6 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Emit function call. EmitCallWithStub(expr); } - -#ifdef DEBUG - // RecordJSReturnSite should have been called. - ASSERT(expr->return_is_recorded_); -#endif } @@ -2064,9 +1965,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList* args) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ tst(r0, Operand(kSmiTagMask)); - Split(eq, if_true, if_false, fall_through); + __ BranchOnSmi(r0, if_true); + __ b(if_false); context()->Plug(if_true, if_false); } @@ -2084,7 +1984,6 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList* args) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); __ tst(r0, Operand(kSmiTagMask | 0x80000000)); Split(eq, if_true, if_false, fall_through); @@ -2117,7 +2016,6 @@ void FullCodeGenerator::EmitIsObject(ZoneList* args) { __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, if_false); __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(le, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2138,7 +2036,6 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList* args) { __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(ge, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2161,7 +2058,6 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList* args) { __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); __ tst(r1, Operand(1 << Map::kIsUndetectable)); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(ne, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2185,7 +2081,6 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only // used in a few functions in runtime.js which should not normally be hit by // this compiler. - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); __ jmp(if_false); context()->Plug(if_true, if_false); } @@ -2205,7 +2100,6 @@ void FullCodeGenerator::EmitIsFunction(ZoneList* args) { __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2226,7 +2120,6 @@ void FullCodeGenerator::EmitIsArray(ZoneList* args) { __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2247,7 +2140,6 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList* args) { __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2279,7 +2171,6 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList* args) { __ bind(&check_frame_marker); __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset)); __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2302,7 +2193,6 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList* args) { __ pop(r1); __ cmp(r0, r1); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2756,12 +2646,11 @@ void FullCodeGenerator::EmitCallFunction(ZoneList* args) { void FullCodeGenerator::EmitRegExpConstructResult(ZoneList* args) { - RegExpConstructResultStub stub; ASSERT(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); - __ CallStub(&stub); + __ CallRuntime(Runtime::kRegExpConstructResult, 3); context()->Plug(r0); } @@ -2880,8 +2769,9 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList* args) { __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); __ tst(r0, Operand(String::kContainsCachedArrayIndexMask)); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - Split(eq, if_true, if_false, fall_through); + + __ b(eq, if_true); + __ b(if_false); context()->Plug(if_true, if_false); } @@ -3004,7 +2894,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { // Notice that the labels are swapped. context()->PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); VisitForControl(expr->expression(), if_true, if_false, fall_through); context()->Plug(if_false, if_true); // Labels swapped. break; @@ -3124,25 +3013,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ push(r0); EmitNamedPropertyLoad(prop); } else { - if (prop->is_arguments_access()) { - VariableProxy* obj_proxy = prop->obj()->AsVariableProxy(); - __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0)); - __ push(r0); - __ mov(r0, Operand(prop->key()->AsLiteral()->handle())); - } else { - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - } + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); __ ldr(r1, MemOperand(sp, 0)); __ push(r0); EmitKeyedPropertyLoad(prop); } } - // We need a second deoptimization point after loading the value - // in case evaluating the property load my have a side effect. - PrepareForBailout(expr->increment(), TOS_REG); - // Call ToNumber only if operand is not a smi. Label no_conversion; __ BranchOnSmi(r0, &no_conversion); @@ -3185,10 +3063,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ sub(r0, r0, Operand(Smi::FromInt(count_value))); } __ mov(r1, Operand(Smi::FromInt(count_value))); - - // Record position before stub call. - SetSourcePosition(expr->position()); - GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); __ CallStub(&stub); __ bind(&done); @@ -3255,7 +3129,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { // Use a regular load, not a contextual load, to avoid a reference // error. EmitCallIC(ic, RelocInfo::CODE_TARGET); - PrepareForBailout(expr, TOS_REG); context()->Plug(r0); } else if (proxy != NULL && proxy->var()->AsSlot() != NULL && @@ -3271,13 +3144,12 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { __ mov(r0, Operand(proxy->name())); __ Push(cp, r0); __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); - PrepareForBailout(expr, TOS_REG); __ bind(&done); context()->Plug(r0); } else { // This expression cannot throw a reference error at the top level. - context()->HandleExpression(expr); + Visit(expr); } } @@ -3302,8 +3174,6 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op, { AccumulatorValueContext context(this); VisitForTypeofValue(left_unary->expression()); } - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - if (check->Equals(Heap::number_symbol())) { __ tst(r0, Operand(kSmiTagMask)); __ b(eq, if_true); @@ -3407,7 +3277,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { case Token::IN: VisitForStackValue(expr->right()); __ InvokeBuiltin(Builtins::IN, CALL_JS); - PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL); __ LoadRoot(ip, Heap::kTrueValueRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); @@ -3417,7 +3286,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { VisitForStackValue(expr->right()); InstanceofStub stub; __ CallStub(&stub); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); // The stub returns 0 for true. __ tst(r0, r0); Split(eq, if_true, if_false, fall_through); @@ -3476,7 +3344,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { : NO_COMPARE_FLAGS; CompareStub stub(cc, strict, flags, r1, r0); __ CallStub(&stub); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); __ cmp(r0, Operand(0, RelocInfo::NONE)); Split(cc, if_true, if_false, fall_through); } @@ -3498,7 +3365,6 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { &if_true, &if_false, &fall_through); VisitForAccumulatorValue(expr->expression()); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); __ LoadRoot(r1, Heap::kNullValueRootIndex); __ cmp(r0, r1); if (expr->is_strict()) { diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index a75d96bfd..ef7cf6af4 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -907,8 +907,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { // Returns the code marker, or the 0 if the code is not marked. static inline int InlinedICSiteMarker(Address address, Address* inline_end_address) { - if (V8::UseCrankshaft()) return false; - // If the instruction after the call site is not the pseudo instruction nop1 // then this is not related to an inlined in-object property load. The nop1 // instruction is located just after the call to the IC in the deferred code @@ -942,8 +940,6 @@ static inline int InlinedICSiteMarker(Address address, bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { - if (V8::UseCrankshaft()) return false; - // Find the end of the inlined code for handling the load if this is an // inlined IC call site. Address inline_end_address; @@ -1023,8 +1019,6 @@ bool LoadIC::PatchInlinedContextualLoad(Address address, bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { - if (V8::UseCrankshaft()) return false; - // Find the end of the inlined code for the store if there is an // inlined version of the store. Address inline_end_address; @@ -1075,8 +1069,6 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { - if (V8::UseCrankshaft()) return false; - Address inline_end_address; if (InlinedICSiteMarker(address, &inline_end_address) != Assembler::PROPERTY_ACCESS_INLINED) { @@ -1095,8 +1087,6 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { - if (V8::UseCrankshaft()) return false; - // Find the end of the inlined code for handling the store if this is an // inlined IC call site. Address inline_end_address; @@ -1325,7 +1315,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { char_at_generator.GenerateFast(masm); __ Ret(); - StubRuntimeCallHelper call_helper; + ICRuntimeCallHelper call_helper; char_at_generator.GenerateSlow(masm, call_helper); __ bind(&miss); @@ -2317,72 +2307,9 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { } -void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - - __ Push(r1, r2, r0); - - // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 3, 1); -} - - #undef __ -Condition CompareIC::ComputeCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - // Reverse left and right operands to obtain ECMA-262 conversion order. - return lt; - case Token::LTE: - // Reverse left and right operands to obtain ECMA-262 conversion order. - return ge; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - return no_condition; - } -} - - -void CompareIC::UpdateCaches(Handle x, Handle y) { - HandleScope scope; - Handle rewritten; -#ifdef DEBUG - State previous_state = GetState(); -#endif - State state = TargetState(x, y); - if (state == GENERIC) { - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); - rewritten = stub.GetCode(); - } else { - ICCompareStub stub(op_, state); - rewritten = stub.GetCode(); - } - set_target(*rewritten); - -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[CompareIC (%s->%s)#%s]\n", - GetStateName(previous_state), - GetStateName(state), - Token::Name(op_)); - } -#endif -} - } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc deleted file mode 100644 index 682c448fd..000000000 --- a/src/arm/lithium-arm.cc +++ /dev/null @@ -1,2081 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "arm/lithium-arm.h" -#include "arm/lithium-codegen-arm.h" - -namespace v8 { -namespace internal { - -#define DEFINE_COMPILE(type) \ - void L##type::CompileToNative(LCodeGen* generator) { \ - generator->Do##type(this); \ - } -LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) -#undef DEFINE_COMPILE - -LOsrEntry::LOsrEntry() { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { - register_spills_[i] = NULL; - } - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { - double_register_spills_[i] = NULL; - } -} - - -void LOsrEntry::MarkSpilledRegister(int allocation_index, - LOperand* spill_operand) { - ASSERT(spill_operand->IsStackSlot()); - ASSERT(register_spills_[allocation_index] == NULL); - register_spills_[allocation_index] = spill_operand; -} - - -void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index, - LOperand* spill_operand) { - ASSERT(spill_operand->IsDoubleStackSlot()); - ASSERT(double_register_spills_[allocation_index] == NULL); - double_register_spills_[allocation_index] = spill_operand; -} - - -void LInstruction::PrintTo(StringStream* stream) const { - stream->Add("%s ", this->Mnemonic()); - if (HasResult()) { - result()->PrintTo(stream); - stream->Add(" "); - } - PrintDataTo(stream); - - if (HasEnvironment()) { - stream->Add(" "); - environment()->PrintTo(stream); - } - - if (HasPointerMap()) { - stream->Add(" "); - pointer_map()->PrintTo(stream); - } -} - - -void LLabel::PrintDataTo(StringStream* stream) const { - LGap::PrintDataTo(stream); - LLabel* rep = replacement(); - if (rep != NULL) { - stream->Add(" Dead block replaced with B%d", rep->block_id()); - } -} - - -bool LParallelMove::IsRedundant() const { - for (int i = 0; i < move_operands_.length(); ++i) { - if (!move_operands_[i].IsRedundant()) return false; - } - return true; -} - - -void LParallelMove::PrintDataTo(StringStream* stream) const { - for (int i = move_operands_.length() - 1; i >= 0; --i) { - if (!move_operands_[i].IsEliminated()) { - LOperand* from = move_operands_[i].from(); - LOperand* to = move_operands_[i].to(); - if (from->Equals(to)) { - to->PrintTo(stream); - } else { - to->PrintTo(stream); - stream->Add(" = "); - from->PrintTo(stream); - } - stream->Add("; "); - } - } -} - - -bool LGap::IsRedundant() const { - for (int i = 0; i < 4; i++) { - if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { - return false; - } - } - - return true; -} - - -void LGap::PrintDataTo(StringStream* stream) const { - for (int i = 0; i < 4; i++) { - stream->Add("("); - if (parallel_moves_[i] != NULL) { - parallel_moves_[i]->PrintDataTo(stream); - } - stream->Add(") "); - } -} - - -const char* LArithmeticD::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-d"; - case Token::SUB: return "sub-d"; - case Token::MUL: return "mul-d"; - case Token::DIV: return "div-d"; - case Token::MOD: return "mod-d"; - default: - UNREACHABLE(); - return NULL; - } -} - - -const char* LArithmeticT::Mnemonic() const { - switch (op()) { - case Token::ADD: return "add-t"; - case Token::SUB: return "sub-t"; - case Token::MUL: return "mul-t"; - case Token::MOD: return "mod-t"; - case Token::DIV: return "div-t"; - default: - UNREACHABLE(); - return NULL; - } -} - - - -void LBinaryOperation::PrintDataTo(StringStream* stream) const { - stream->Add("= "); - left()->PrintTo(stream); - stream->Add(" "); - right()->PrintTo(stream); -} - - -void LGoto::PrintDataTo(StringStream* stream) const { - stream->Add("B%d", block_id()); -} - - -void LBranch::PrintDataTo(StringStream* stream) const { - stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - input()->PrintTo(stream); -} - - -void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if "); - left()->PrintTo(stream); - stream->Add(" %s ", Token::String(op())); - right()->PrintTo(stream); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsNullAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if "); - input()->PrintTo(stream); - stream->Add(is_strict() ? " === null" : " == null"); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if is_smi("); - input()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if has_instance_type("); - input()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if has_cached_array_index("); - input()->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - -void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if class_of_test("); - input()->PrintTo(stream); - stream->Add(", \"%o\") then B%d else B%d", - *hydrogen()->class_name(), - true_block_id(), - false_block_id()); -} - - -void LTypeofIs::PrintDataTo(StringStream* stream) const { - input()->PrintTo(stream); - stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString()); -} - - -void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const { - stream->Add("if typeof "); - input()->PrintTo(stream); - stream->Add(" == \"%s\" then B%d else B%d", - *hydrogen()->type_literal()->ToCString(), - true_block_id(), false_block_id()); -} - - -void LCallConstantFunction::PrintDataTo(StringStream* stream) const { - stream->Add("#%d / ", arity()); -} - - -void LUnaryMathOperation::PrintDataTo(StringStream* stream) const { - stream->Add("/%s ", hydrogen()->OpName()); - input()->PrintTo(stream); -} - - -void LCallKeyed::PrintDataTo(StringStream* stream) const { - stream->Add("[r2] #%d / ", arity()); -} - - -void LCallNamed::PrintDataTo(StringStream* stream) const { - SmartPointer name_string = name()->ToCString(); - stream->Add("%s #%d / ", *name_string, arity()); -} - - -void LCallGlobal::PrintDataTo(StringStream* stream) const { - SmartPointer name_string = name()->ToCString(); - stream->Add("%s #%d / ", *name_string, arity()); -} - - -void LCallKnownGlobal::PrintDataTo(StringStream* stream) const { - stream->Add("#%d / ", arity()); -} - - -void LCallNew::PrintDataTo(StringStream* stream) const { - LUnaryOperation::PrintDataTo(stream); - stream->Add(" #%d / ", arity()); -} - - -void LClassOfTest::PrintDataTo(StringStream* stream) const { - stream->Add("= class_of_test("); - input()->PrintTo(stream); - stream->Add(", \"%o\")", *hydrogen()->class_name()); -} - - -void LUnaryOperation::PrintDataTo(StringStream* stream) const { - stream->Add("= "); - input()->PrintTo(stream); -} - - -void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const { - arguments()->PrintTo(stream); - - stream->Add(" length "); - length()->PrintTo(stream); - - stream->Add(" index "); - index()->PrintTo(stream); -} - - -LChunk::LChunk(HGraph* graph) - : spill_slot_count_(0), - graph_(graph), - instructions_(32), - pointer_maps_(8), - inlined_closures_(1) { -} - - -void LChunk::Verify() const { - // TODO(twuerthinger): Implement verification for chunk. -} - - -int LChunk::GetNextSpillIndex(bool is_double) { - // Skip a slot if for a double-width slot. - if (is_double) spill_slot_count_++; - return spill_slot_count_++; -} - - -LOperand* LChunk::GetNextSpillSlot(bool is_double) { - int index = GetNextSpillIndex(is_double); - if (is_double) { - return LDoubleStackSlot::Create(index); - } else { - return LStackSlot::Create(index); - } -} - - -void LChunk::MarkEmptyBlocks() { - HPhase phase("Mark empty blocks", this); - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - int first = block->first_instruction_index(); - int last = block->last_instruction_index(); - LInstruction* first_instr = instructions()->at(first); - LInstruction* last_instr = instructions()->at(last); - - LLabel* label = LLabel::cast(first_instr); - if (last_instr->IsGoto()) { - LGoto* goto_instr = LGoto::cast(last_instr); - if (!goto_instr->include_stack_check() && - label->IsRedundant() && - !label->is_loop_header()) { - bool can_eliminate = true; - for (int i = first + 1; i < last && can_eliminate; ++i) { - LInstruction* cur = instructions()->at(i); - if (cur->IsGap()) { - LGap* gap = LGap::cast(cur); - if (!gap->IsRedundant()) { - can_eliminate = false; - } - } else { - can_eliminate = false; - } - } - - if (can_eliminate) { - label->set_replacement(GetLabel(goto_instr->block_id())); - } - } - } - } -} - - -void LStoreNamed::PrintDataTo(StringStream* stream) const { - object()->PrintTo(stream); - stream->Add("."); - stream->Add(*String::cast(*name())->ToCString()); - stream->Add(" <- "); - value()->PrintTo(stream); -} - - -void LStoreKeyed::PrintDataTo(StringStream* stream) const { - object()->PrintTo(stream); - stream->Add("["); - key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); -} - - -int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { - LGap* gap = new LGap(block); - int index = -1; - if (instr->IsControl()) { - instructions_.Add(gap); - index = instructions_.length(); - instructions_.Add(instr); - } else { - index = instructions_.length(); - instructions_.Add(instr); - instructions_.Add(gap); - } - if (instr->HasPointerMap()) { - pointer_maps_.Add(instr->pointer_map()); - instr->pointer_map()->set_lithium_position(index); - } - return index; -} - - -LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) { - return LConstantOperand::Create(constant->id()); -} - - -int LChunk::GetParameterStackSlot(int index) const { - // The receiver is at index 0, the first parameter at index 1, so we - // shift all parameter indexes down by the number of parameters, and - // make sure they end up negative so they are distinguishable from - // spill slots. - int result = index - graph()->info()->scope()->num_parameters() - 1; - ASSERT(result < 0); - return result; -} - -// A parameter relative to ebp in the arguments stub. -int LChunk::ParameterAt(int index) { - ASSERT(-1 <= index); // -1 is the receiver. - return (1 + graph()->info()->scope()->num_parameters() - index) * - kPointerSize; -} - - -LGap* LChunk::GetGapAt(int index) const { - return LGap::cast(instructions_[index]); -} - - -bool LChunk::IsGapAt(int index) const { - return instructions_[index]->IsGap(); -} - - -int LChunk::NearestGapPos(int index) const { - while (!IsGapAt(index)) index--; - return index; -} - - -int LChunk::NearestNextGapPos(int index) const { - while (!IsGapAt(index)) index++; - return index; -} - - -void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) { - GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to); -} - - -class LGapNode: public ZoneObject { - public: - explicit LGapNode(LOperand* operand) - : operand_(operand), resolved_(false), visited_id_(-1) { } - - LOperand* operand() const { return operand_; } - bool IsResolved() const { return !IsAssigned() || resolved_; } - void MarkResolved() { - ASSERT(!IsResolved()); - resolved_ = true; - } - int visited_id() const { return visited_id_; } - void set_visited_id(int id) { - ASSERT(id > visited_id_); - visited_id_ = id; - } - - bool IsAssigned() const { return assigned_from_.is_set(); } - LGapNode* assigned_from() const { return assigned_from_.get(); } - void set_assigned_from(LGapNode* n) { assigned_from_.set(n); } - - private: - LOperand* operand_; - SetOncePointer assigned_from_; - bool resolved_; - int visited_id_; -}; - - -LGapResolver::LGapResolver(const ZoneList* moves, - LOperand* marker_operand) - : nodes_(4), - identified_cycles_(4), - result_(4), - marker_operand_(marker_operand), - next_visited_id_(0) { - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) RegisterMove(move); - } -} - - -const ZoneList* LGapResolver::ResolveInReverseOrder() { - for (int i = 0; i < identified_cycles_.length(); ++i) { - ResolveCycle(identified_cycles_[i]); - } - - int unresolved_nodes; - do { - unresolved_nodes = 0; - for (int j = 0; j < nodes_.length(); j++) { - LGapNode* node = nodes_[j]; - if (!node->IsResolved() && node->assigned_from()->IsResolved()) { - AddResultMove(node->assigned_from(), node); - node->MarkResolved(); - } - if (!node->IsResolved()) ++unresolved_nodes; - } - } while (unresolved_nodes > 0); - return &result_; -} - - -void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) { - AddResultMove(from->operand(), to->operand()); -} - - -void LGapResolver::AddResultMove(LOperand* from, LOperand* to) { - result_.Add(LMoveOperands(from, to)); -} - - -void LGapResolver::ResolveCycle(LGapNode* start) { - ZoneList circle_operands(8); - circle_operands.Add(marker_operand_); - LGapNode* cur = start; - do { - cur->MarkResolved(); - circle_operands.Add(cur->operand()); - cur = cur->assigned_from(); - } while (cur != start); - circle_operands.Add(marker_operand_); - - for (int i = circle_operands.length() - 1; i > 0; --i) { - LOperand* from = circle_operands[i]; - LOperand* to = circle_operands[i - 1]; - AddResultMove(from, to); - } -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) { - ASSERT(a != b); - LGapNode* cur = a; - while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) { - cur->set_visited_id(visited_id); - cur = cur->assigned_from(); - } - - return cur == b; -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) { - ASSERT(a != b); - return CanReach(a, b, next_visited_id_++); -} - - -void LGapResolver::RegisterMove(LMoveOperands move) { - if (move.from()->IsConstantOperand()) { - // Constant moves should be last in the machine code. Therefore add them - // first to the result set. - AddResultMove(move.from(), move.to()); - } else { - LGapNode* from = LookupNode(move.from()); - LGapNode* to = LookupNode(move.to()); - if (to->IsAssigned() && to->assigned_from() == from) { - move.Eliminate(); - return; - } - ASSERT(!to->IsAssigned()); - if (CanReach(from, to)) { - // This introduces a circle. Save. - identified_cycles_.Add(from); - } - to->set_assigned_from(from); - } -} - - -LGapNode* LGapResolver::LookupNode(LOperand* operand) { - for (int i = 0; i < nodes_.length(); ++i) { - if (nodes_[i]->operand()->Equals(operand)) return nodes_[i]; - } - - // No node found => create a new one. - LGapNode* result = new LGapNode(operand); - nodes_.Add(result); - return result; -} - - -Handle LChunk::LookupLiteral(LConstantOperand* operand) const { - return HConstant::cast(graph_->LookupValue(operand->index()))->handle(); -} - - -Representation LChunk::LookupLiteralRepresentation( - LConstantOperand* operand) const { - return graph_->LookupValue(operand->index())->representation(); -} - - -LChunk* LChunkBuilder::Build() { - ASSERT(is_unused()); - chunk_ = new LChunk(graph()); - HPhase phase("Building chunk", chunk_); - status_ = BUILDING; - const ZoneList* blocks = graph()->blocks(); - for (int i = 0; i < blocks->length(); i++) { - HBasicBlock* next = NULL; - if (i < blocks->length() - 1) next = blocks->at(i + 1); - DoBasicBlock(blocks->at(i), next); - if (is_aborted()) return NULL; - } - status_ = DONE; - return chunk_; -} - - -void LChunkBuilder::Abort(const char* format, ...) { - if (FLAG_trace_bailout) { - SmartPointer debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LChunk building in @\"%s\": ", *debug_name); - va_list arguments; - va_start(arguments, format); - OS::VPrint(format, arguments); - va_end(arguments); - PrintF("\n"); - } - status_ = ABORTED; -} - - -LRegister* LChunkBuilder::ToOperand(Register reg) { - return LRegister::Create(Register::ToAllocationIndex(reg)); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { - return new LUnallocated(LUnallocated::FIXED_REGISTER, - Register::ToAllocationIndex(reg)); -} - - -LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { - return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, - DoubleRegister::ToAllocationIndex(reg)); -} - - -LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { - return Use(value, ToUnallocated(fixed_register)); -} - - -LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { - return Use(value, ToUnallocated(reg)); -} - - -LOperand* LChunkBuilder::UseRegister(HValue* value) { - return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { - return Use(value, - new LUnallocated(LUnallocated::MUST_HAVE_REGISTER, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseTempRegister(HValue* value) { - return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER)); -} - - -LOperand* LChunkBuilder::Use(HValue* value) { - return Use(value, new LUnallocated(LUnallocated::NONE)); -} - - -LOperand* LChunkBuilder::UseAtStart(HValue* value) { - return Use(value, new LUnallocated(LUnallocated::NONE, - LUnallocated::USED_AT_START)); -} - - -LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : Use(value); -} - - -LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseAtStart(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegister(value); -} - - -LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() - ? chunk_->DefineConstantOperand(HConstant::cast(value)) - : UseRegisterAtStart(value); -} - - -LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { - if (value->EmitAtUses()) { - HInstruction* instr = HInstruction::cast(value); - VisitInstruction(instr); - } - allocator_->RecordUse(value, operand); - return operand; -} - - -LInstruction* LChunkBuilder::Define(LInstruction* instr) { - return Define(instr, new LUnallocated(LUnallocated::NONE)); -} - - -LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) { - return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); -} - - -LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) { - return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index)); -} - - -LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) { - return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) { - return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); -} - - -LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr, - DoubleRegister reg) { - return Define(instr, ToUnallocated(reg)); -} - - -LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { - HEnvironment* hydrogen_env = current_block_->last_environment(); - instr->set_environment(CreateEnvironment(hydrogen_env)); - return instr; -} - - -LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id) { - ASSERT(instructions_pending_deoptimization_environment_ == NULL); - ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); - instructions_pending_deoptimization_environment_ = instr; - pending_deoptimization_ast_id_ = ast_id; - return instr; -} - - -void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() { - instructions_pending_deoptimization_environment_ = NULL; - pending_deoptimization_ast_id_ = AstNode::kNoNumber; -} - - -LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize) { - allocator_->MarkAsCall(); - instr = AssignPointerMap(instr); - - if (hinstr->HasSideEffects()) { - ASSERT(hinstr->next()->IsSimulate()); - HSimulate* sim = HSimulate::cast(hinstr->next()); - instr = SetInstructionPendingDeoptimizationEnvironment( - instr, sim->ast_id()); - } - - // If instruction does not have side-effects lazy deoptimization - // after the call will try to deoptimize to the point before the call. - // Thus we still need to attach environment to this call even if - // call sequence can not deoptimize eagerly. - bool needs_environment = - (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects(); - if (needs_environment && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - - return instr; -} - - -LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new LPointerMap(position_)); - return instr; -} - - -LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) { - allocator_->RecordDefinition(current_instruction_, result); - instr->set_result(result); - return instr; -} - - -LOperand* LChunkBuilder::Temp() { - LUnallocated* operand = new LUnallocated(LUnallocated::NONE); - allocator_->RecordTemporary(operand); - return operand; -} - - -LUnallocated* LChunkBuilder::TempRegister() { - LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - allocator_->RecordTemporary(operand); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(Register reg) { - LUnallocated* operand = ToUnallocated(reg); - allocator_->RecordTemporary(operand); - return operand; -} - - -LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { - LUnallocated* operand = ToUnallocated(reg); - allocator_->RecordTemporary(operand); - return operand; -} - - -LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { - return new LLabel(instr->block()); -} - - -LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { - return AssignEnvironment(new LDeoptimize); -} - - -LInstruction* LChunkBuilder::DoBit(Token::Value op, - HBitwiseBinaryOperation* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - - LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); - LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); - return DefineSameAsFirst(new LBitI(op, left, right)); -} - - -LInstruction* LChunkBuilder::DoShift(Token::Value op, - HBitwiseBinaryOperation* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->OperandAt(0)->representation().IsInteger32()); - ASSERT(instr->OperandAt(1)->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->OperandAt(0)); - - HValue* right_value = instr->OperandAt(1); - LOperand* right = NULL; - int constant_value = 0; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - } else { - right = UseRegister(right_value); - } - - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - bool can_deopt = (op == Token::SHR && constant_value == 0); - if (can_deopt) { - bool can_truncate = true; - for (int i = 0; i < instr->uses()->length(); i++) { - if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) { - can_truncate = false; - break; - } - } - can_deopt = !can_truncate; - } - - LInstruction* result = - DefineSameAsFirst(new LShiftI(op, left, right, can_deopt)); - if (can_deopt) AssignEnvironment(result); - return result; -} - - -LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new LArithmeticD(op, left, right); - return DefineSameAsFirst(result); -} - - -LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr) { - ASSERT(op == Token::ADD || - op == Token::DIV || - op == Token::MOD || - op == Token::MUL || - op == Token::SUB); - HValue* left = instr->left(); - HValue* right = instr->right(); - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); - LOperand* left_operand = UseFixed(left, r1); - LOperand* right_operand = UseFixed(right, r0); - LInstruction* result = new LArithmeticT(op, left_operand, right_operand); - return MarkAsCall(DefineFixed(result, r0), instr); -} - -void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - ASSERT(is_building()); - current_block_ = block; - next_block_ = next_block; - if (block->IsStartBlock()) { - block->UpdateEnvironment(graph_->start_environment()); - argument_count_ = 0; - } else if (block->predecessors()->length() == 1) { - // We have a single predecessor => copy environment and outgoing - // argument count from the predecessor. - ASSERT(block->phis()->length() == 0); - HBasicBlock* pred = block->predecessors()->at(0); - HEnvironment* last_environment = pred->last_environment(); - ASSERT(last_environment != NULL); - // Only copy the environment, if it is later used again. - if (pred->end()->SecondSuccessor() == NULL) { - ASSERT(pred->end()->FirstSuccessor() == block); - } else { - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { - last_environment = last_environment->Copy(); - } - } - block->UpdateEnvironment(last_environment); - ASSERT(pred->argument_count() >= 0); - argument_count_ = pred->argument_count(); - } else { - // We are at a state join => process phis. - HBasicBlock* pred = block->predecessors()->at(0); - // No need to copy the environment, it cannot be used later. - HEnvironment* last_environment = pred->last_environment(); - for (int i = 0; i < block->phis()->length(); ++i) { - HPhi* phi = block->phis()->at(i); - last_environment->SetValueAt(phi->merged_index(), phi); - } - for (int i = 0; i < block->deleted_phis()->length(); ++i) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); - } - block->UpdateEnvironment(last_environment); - // Pick up the outgoing argument count of one of the predecessors. - argument_count_ = pred->argument_count(); - } - HInstruction* current = block->first(); - int start = chunk_->instructions()->length(); - while (current != NULL && !is_aborted()) { - if (FLAG_trace_environment) { - PrintF("Process instruction %d\n", current->id()); - } - // Code for constants in registers is generated lazily. - if (!current->EmitAtUses()) { - VisitInstruction(current); - } - current = current->next(); - } - int end = chunk_->instructions()->length() - 1; - if (end >= start) { - block->set_first_instruction_index(start); - block->set_last_instruction_index(end); - } - block->set_argument_count(argument_count_); - next_block_ = NULL; - current_block_ = NULL; -} - - -void LChunkBuilder::VisitInstruction(HInstruction* current) { - HInstruction* old_current = current_instruction_; - current_instruction_ = current; - allocator_->BeginInstruction(); - if (current->has_position()) position_ = current->position(); - LInstruction* instr = current->CompileToLithium(this); - - if (instr != NULL) { - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - if (current->IsBranch()) { - instr->set_hydrogen_value(HBranch::cast(current)->value()); - } else { - instr->set_hydrogen_value(current); - } - - int index = chunk_->AddInstruction(instr, current_block_); - allocator_->SummarizeInstruction(index); - } else { - // This instruction should be omitted. - allocator_->OmitInstruction(); - } - current_instruction_ = old_current; -} - - -void LEnvironment::WriteTranslation(LCodeGen* cgen, - Translation* translation) const { - if (this == NULL) return; - - // The translation includes one command per value in the environment. - int translation_size = values()->length(); - // The output frame height does not include the parameters. - int height = translation_size - parameter_count(); - - outer()->WriteTranslation(cgen, translation); - int closure_id = cgen->DefineDeoptimizationLiteral(closure()); - translation->BeginFrame(ast_id(), closure_id, height); - for (int i = 0; i < translation_size; ++i) { - LOperand* value = values()->at(i); - // spilled_registers_ and spilled_double_registers_ are either - // both NULL or both set. - if (spilled_registers_ != NULL && value != NULL) { - if (value->IsRegister() && - spilled_registers_[value->index()] != NULL) { - translation->MarkDuplicate(); - cgen->AddToTranslation(translation, - spilled_registers_[value->index()], - HasTaggedValueAt(i)); - } else if (value->IsDoubleRegister() && - spilled_double_registers_[value->index()] != NULL) { - translation->MarkDuplicate(); - cgen->AddToTranslation(translation, - spilled_double_registers_[value->index()], - false); - } - } - - cgen->AddToTranslation(translation, value, HasTaggedValueAt(i)); - } -} - - -void LEnvironment::PrintTo(StringStream* stream) const { - stream->Add("[id=%d|", ast_id()); - stream->Add("[parameters=%d|", parameter_count()); - stream->Add("[arguments_stack_height=%d|", arguments_stack_height()); - for (int i = 0; i < values_.length(); ++i) { - if (i != 0) stream->Add(";"); - if (values_[i] == NULL) { - stream->Add("[hole]"); - } else { - values_[i]->PrintTo(stream); - } - } - stream->Add("]"); -} - - -LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { - if (hydrogen_env == NULL) return NULL; - - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); - int ast_id = hydrogen_env->ast_id(); - ASSERT(ast_id != AstNode::kNoNumber); - int value_count = hydrogen_env->values()->length(); - LEnvironment* result = new LEnvironment(hydrogen_env->closure(), - ast_id, - hydrogen_env->parameter_count(), - argument_count_, - value_count, - outer); - int argument_index = 0; - for (int i = 0; i < value_count; ++i) { - HValue* value = hydrogen_env->values()->at(i); - LOperand* op = NULL; - if (value->IsArgumentsObject()) { - op = NULL; - } else if (value->IsPushArgument()) { - op = new LArgument(argument_index++); - } else { - op = UseOrConstant(value); - if (op->IsUnallocated()) { - LUnallocated* unalloc = LUnallocated::cast(op); - unalloc->set_policy(LUnallocated::ANY); - } - } - result->AddValue(op, value->representation()); - } - - return result; -} - - -LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(), - instr->include_stack_check()); - if (instr->include_stack_check()) result = AssignPointerMap(result); - return result; -} - - -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* v = instr->value(); - HBasicBlock* first = instr->FirstSuccessor(); - HBasicBlock* second = instr->SecondSuccessor(); - ASSERT(first != NULL && second != NULL); - int first_id = first->block_id(); - int second_id = second->block_id(); - - if (v->EmitAtUses()) { - if (v->IsClassOfTest()) { - HClassOfTest* compare = HClassOfTest::cast(v); - ASSERT(compare->value()->representation().IsTagged()); - - return new LClassOfTestAndBranch(UseTempRegister(compare->value()), - TempRegister(), - TempRegister(), - first_id, - second_id); - } else if (v->IsCompare()) { - HCompare* compare = HCompare::cast(v); - Token::Value op = compare->token(); - HValue* left = compare->left(); - HValue* right = compare->right(); - if (left->representation().IsInteger32()) { - ASSERT(right->representation().IsInteger32()); - return new LCmpIDAndBranch(op, - UseRegisterAtStart(left), - UseOrConstantAtStart(right), - first_id, - second_id, - false); - } else if (left->representation().IsDouble()) { - ASSERT(right->representation().IsDouble()); - return new LCmpIDAndBranch(op, - UseRegisterAtStart(left), - UseRegisterAtStart(right), - first_id, - second_id, - true); - } else { - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); - bool reversed = op == Token::GT || op == Token::LTE; - LOperand* left_operand = UseFixed(left, reversed ? r0 : r1); - LOperand* right_operand = UseFixed(right, reversed ? r1 : r0); - LInstruction* result = new LCmpTAndBranch(left_operand, - right_operand, - first_id, - second_id); - return MarkAsCall(result, instr); - } - } else if (v->IsIsSmi()) { - HIsSmi* compare = HIsSmi::cast(v); - ASSERT(compare->value()->representation().IsTagged()); - - return new LIsSmiAndBranch(Use(compare->value()), - first_id, - second_id); - } else if (v->IsHasInstanceType()) { - HHasInstanceType* compare = HHasInstanceType::cast(v); - ASSERT(compare->value()->representation().IsTagged()); - - return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()), - TempRegister(), - first_id, - second_id); - } else if (v->IsHasCachedArrayIndex()) { - HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v); - ASSERT(compare->value()->representation().IsTagged()); - - return new LHasCachedArrayIndexAndBranch( - UseRegisterAtStart(compare->value()), first_id, second_id); - } else if (v->IsIsNull()) { - HIsNull* compare = HIsNull::cast(v); - ASSERT(compare->value()->representation().IsTagged()); - - // We only need a temp register for non-strict compare. - LOperand* temp = compare->is_strict() ? NULL : TempRegister(); - return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), - compare->is_strict(), - temp, - first_id, - second_id); - } else if (v->IsCompareJSObjectEq()) { - HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); - return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), - UseRegisterAtStart(compare->right()), - first_id, - second_id); - } else if (v->IsInstanceOf()) { - HInstanceOf* instance_of = HInstanceOf::cast(v); - LInstruction* result = - new LInstanceOfAndBranch(Use(instance_of->left()), - Use(instance_of->right()), - first_id, - second_id); - return MarkAsCall(result, instr); - } else if (v->IsTypeofIs()) { - HTypeofIs* typeof_is = HTypeofIs::cast(v); - return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()), - first_id, - second_id); - } else { - if (v->IsConstant()) { - if (HConstant::cast(v)->handle()->IsTrue()) { - return new LGoto(first_id); - } else if (HConstant::cast(v)->handle()->IsFalse()) { - return new LGoto(second_id); - } - } - Abort("Undefined compare before branch"); - return NULL; - } - } - return new LBranch(UseRegisterAtStart(v), first_id, second_id); -} - - -LInstruction* LChunkBuilder::DoCompareMapAndBranch( - HCompareMapAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - HBasicBlock* first = instr->FirstSuccessor(); - HBasicBlock* second = instr->SecondSuccessor(); - return new LCmpMapAndBranch(value, - instr->map(), - first->block_id(), - second->block_id()); -} - - -LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { - return DefineAsRegister(new LArgumentsLength(Use(length->value()))); -} - - -LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { - return DefineAsRegister(new LArgumentsElements); -} - - -LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { - LInstruction* result = - new LInstanceOf(Use(instr->left()), Use(instr->right())); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - LOperand* function = UseFixed(instr->function(), r1); - LOperand* receiver = UseFixed(instr->receiver(), r0); - LOperand* length = UseRegisterAtStart(instr->length()); - LOperand* elements = UseRegisterAtStart(instr->elements()); - LInstruction* result = new LApplyArguments(function, - receiver, - length, - elements); - return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); -} - - -LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - ++argument_count_; - LOperand* argument = Use(instr->argument()); - return new LPushArgument(argument); -} - - -LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { - return DefineAsRegister(new LGlobalObject); -} - - -LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { - return DefineAsRegister(new LGlobalReceiver); -} - - -LInstruction* LChunkBuilder::DoCallConstantFunction( - HCallConstantFunction* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr); -} - - -LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - MathFunctionId op = instr->op(); - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = new LUnaryMathOperation(input); - switch (op) { - case kMathAbs: - return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); - case kMathFloor: - return AssignEnvironment(DefineAsRegister(result)); - case kMathSqrt: - return DefineSameAsFirst(result); - default: - UNREACHABLE(); - return NULL; - } -} - - -LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { - ASSERT(instr->key()->representation().IsTagged()); - argument_count_ -= instr->argument_count(); - UseFixed(instr->key(), r2); - return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallNamed, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { - LOperand* constructor = UseFixed(instr->constructor(), r1); - argument_count_ -= instr->argument_count(); - LInstruction* result = new LCallNew(constructor); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallFunction, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr); -} - - -LInstruction* LChunkBuilder::DoShr(HShr* instr) { - return DoShift(Token::SHR, instr); -} - - -LInstruction* LChunkBuilder::DoSar(HSar* instr) { - return DoShift(Token::SAR, instr); -} - - -LInstruction* LChunkBuilder::DoShl(HShl* instr) { - return DoShift(Token::SHL, instr); -} - - -LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) { - return DoBit(Token::BIT_AND, instr); -} - - -LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { - ASSERT(instr->value()->representation().IsInteger32()); - ASSERT(instr->representation().IsInteger32()); - return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value()))); -} - - -LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) { - return DoBit(Token::BIT_OR, instr); -} - - -LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) { - return DoBit(Token::BIT_XOR, instr); -} - - -LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else if (instr->representation().IsInteger32()) { - // The temporary operand is necessary to ensure that right is not allocated - // into edx. - FixedTemp(r1); - LOperand* value = UseFixed(instr->left(), r0); - LOperand* divisor = UseRegister(instr->right()); - return AssignEnvironment(DefineFixed(new LDivI(value, divisor), r0)); - } else { - return DoArithmeticT(Token::DIV, instr); - } -} - - -LInstruction* LChunkBuilder::DoMod(HMod* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - // The temporary operand is necessary to ensure that right is not allocated - // into edx. - FixedTemp(r1); - LOperand* value = UseFixed(instr->left(), r0); - LOperand* divisor = UseRegister(instr->right()); - LInstruction* result = DefineFixed(new LModI(value, divisor), r1); - if (instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanBeDivByZero)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsTagged()) { - return DoArithmeticT(Token::MOD, instr); - } else { - ASSERT(instr->representation().IsDouble()); - // We call a C function for double modulo. It can't trigger a GC. - // We need to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - LOperand* left = UseFixedDouble(instr->left(), d1); - LOperand* right = UseFixedDouble(instr->right(), d2); - LArithmeticD* result = new LArithmeticD(Token::MOD, left, right); - return MarkAsCall(DefineFixedDouble(result, d1), instr); - } -} - - -LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); - LOperand* right = UseOrConstant(instr->MostConstantOperand()); - LOperand* temp = NULL; - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { - temp = TempRegister(); - } - LMulI* mul = new LMulI(left, right, temp); - return AssignEnvironment(DefineSameAsFirst(mul)); - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); - } else { - return DoArithmeticT(Token::MUL, instr); - } -} - - -LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); - LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); - LSubI* sub = new LSubI(left, right); - LInstruction* result = DefineSameAsFirst(sub); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::SUB, instr); - } else { - return DoArithmeticT(Token::SUB, instr); - } -} - - -LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); - LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); - LAddI* add = new LAddI(left, right); - LInstruction* result = DefineSameAsFirst(add); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; - } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::ADD, instr); - } else { - ASSERT(instr->representation().IsTagged()); - return DoArithmeticT(Token::ADD, instr); - } -} - - -LInstruction* LChunkBuilder::DoCompare(HCompare* instr) { - Token::Value op = instr->token(); - if (instr->left()->representation().IsInteger32()) { - ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); - return DefineAsRegister(new LCmpID(op, left, right, false)); - } else if (instr->left()->representation().IsDouble()) { - ASSERT(instr->right()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return DefineAsRegister(new LCmpID(op, left, right, true)); - } else { - bool reversed = (op == Token::GT || op == Token::LTE); - LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1); - LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0); - LInstruction* result = new LCmpT(left, right); - return MarkAsCall(DefineFixed(result, r0), instr); - } -} - - -LInstruction* LChunkBuilder::DoCompareJSObjectEq( - HCompareJSObjectEq* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LInstruction* result = new LCmpJSObjectEq(left, right); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - - return DefineAsRegister(new LIsNull(value, - instr->is_strict())); -} - - -LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseAtStart(instr->value()); - - return DefineAsRegister(new LIsSmi(value)); -} - - -LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); - - return DefineAsRegister(new LHasInstanceType(value)); -} - - -LInstruction* LChunkBuilder::DoHasCachedArrayIndex( - HHasCachedArrayIndex* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseRegister(instr->value()); - - return DefineAsRegister(new LHasCachedArrayIndex(value)); -} - - -LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseTempRegister(instr->value()); - - return DefineSameAsFirst(new LClassOfTest(value, TempRegister())); -} - - -LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) { - LOperand* array = NULL; - LOperand* temporary = NULL; - - if (instr->value()->IsLoadElements()) { - array = UseRegisterAtStart(instr->value()); - } else { - array = UseRegister(instr->value()); - temporary = TempRegister(); - } - - LInstruction* result = new LArrayLength(array, temporary); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { - LOperand* object = UseRegister(instr->value()); - LInstruction* result = new LValueOf(object, TempRegister()); - return AssignEnvironment(DefineSameAsFirst(result)); -} - - -LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()), - Use(instr->length()))); -} - - -LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { - LOperand* value = UseFixed(instr->value(), r0); - return MarkAsCall(new LThrow(value), instr); -} - - -LInstruction* LChunkBuilder::DoChange(HChange* instr) { - Representation from = instr->from(); - Representation to = instr->to(); - if (from.IsTagged()) { - if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); - LInstruction* res = new LNumberUntagD(value); - return AssignEnvironment(DefineAsRegister(res)); - } else { - ASSERT(to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); - bool needs_check = !instr->value()->type().IsSmi(); - LInstruction* res = NULL; - if (needs_check) { - res = DefineSameAsFirst(new LTaggedToI(value, FixedTemp(d1))); - } else { - res = DefineSameAsFirst(new LSmiUntag(value, needs_check)); - } - if (needs_check) { - res = AssignEnvironment(res); - } - return res; - } - } else if (from.IsDouble()) { - if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); - LOperand* temp = TempRegister(); - - // Make sure that temp and result_temp are different registers. - LUnallocated* result_temp = TempRegister(); - LInstruction* result = new LNumberTagD(value, temp); - Define(result, result_temp); - return AssignPointerMap(result); - } else { - ASSERT(to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); - LInstruction* res = new LDoubleToI(value); - return AssignEnvironment(DefineAsRegister(res)); - } - } else if (from.IsInteger32()) { - if (to.IsTagged()) { - HValue* val = instr->value(); - LOperand* value = UseRegister(val); - if (val->HasRange() && val->range()->IsInSmiRange()) { - return DefineSameAsFirst(new LSmiTag(value)); - } else { - LInstruction* result = new LNumberTagI(value); - return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); - } - } else { - ASSERT(to.IsDouble()); - LOperand* value = Use(instr->value()); - return DefineAsRegister(new LInteger32ToDouble(value)); - } - } - UNREACHABLE(); - return NULL; -} - - -LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new LCheckSmi(value, eq)); -} - - -LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - LInstruction* result = new LCheckInstanceType(value, temp); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LOperand* temp = TempRegister(); - LInstruction* result = - new LCheckPrototypeMaps(temp, - instr->holder(), - instr->receiver_map()); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new LCheckSmi(value, ne)); -} - - -LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new LCheckFunction(value)); -} - - -LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new LCheckMap(value); - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - return new LReturn(UseFixed(instr->value(), r0)); -} - - -LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { - Representation r = instr->representation(); - if (r.IsInteger32()) { - int32_t value = instr->Integer32Value(); - return DefineAsRegister(new LConstantI(value)); - } else if (r.IsDouble()) { - double value = instr->DoubleValue(); - return DefineAsRegister(new LConstantD(value)); - } else if (r.IsTagged()) { - return DefineAsRegister(new LConstantT(instr->handle())); - } else { - Abort("unsupported constant of type double"); - return NULL; - } -} - - -LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) { - LInstruction* result = new LLoadGlobal(); - return instr->check_hole_value() - ? AssignEnvironment(DefineAsRegister(result)) - : DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { - return new LStoreGlobal(UseRegisterAtStart(instr->value())); -} - - -LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - return DefineAsRegister( - new LLoadNamedField(UseRegisterAtStart(instr->object()))); -} - - -LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { - LOperand* object = UseFixed(instr->object(), r0); - LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - return DefineSameAsFirst(new LLoadElements(input)); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedFastElement( - HLoadKeyedFastElement* instr) { - Representation r = instr->representation(); - LOperand* obj = UseRegisterAtStart(instr->object()); - ASSERT(instr->key()->representation().IsInteger32()); - LOperand* key = UseRegisterAtStart(instr->key()); - LOperand* load_result = NULL; - // Double needs an extra temp, because the result is converted from heap - // number to a double register. - if (r.IsDouble()) load_result = TempRegister(); - LInstruction* result = new LLoadKeyedFastElement(obj, - key, - load_result); - if (r.IsDouble()) { - result = DefineAsRegister(result); - } else { - result = DefineSameAsFirst(result); - } - return AssignEnvironment(result); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { - LOperand* object = UseFixed(instr->object(), r1); - LOperand* key = UseFixed(instr->key(), r0); - - LInstruction* result = - DefineFixed(new LLoadKeyedGeneric(object, key), r0); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedFastElement( - HStoreKeyedFastElement* instr) { - bool needs_write_barrier = instr->NeedsWriteBarrier(); - ASSERT(instr->value()->representation().IsTagged()); - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32()); - - LOperand* obj = UseTempRegister(instr->object()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - LOperand* key = needs_write_barrier - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - - return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val)); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { - LOperand* obj = UseFixed(instr->object(), r2); - LOperand* key = UseFixed(instr->key(), r1); - LOperand* val = UseFixed(instr->value(), r0); - - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsTagged()); - - return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr); -} - - -LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { - bool needs_write_barrier = !instr->value()->type().IsSmi(); - - LOperand* obj = needs_write_barrier - ? UseTempRegister(instr->object()) - : UseRegisterAtStart(instr->object()); - - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); - - // We only need a scratch register if we have a write barrier or we - // have a store into the properties array (not in-object-property). - LOperand* temp = (!instr->is_in_object() || needs_write_barrier) - ? TempRegister() : NULL; - - return new LStoreNamedField(obj, - instr->name(), - val, - instr->is_in_object(), - instr->offset(), - temp, - needs_write_barrier, - instr->transition()); -} - - -LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { - LOperand* obj = UseFixed(instr->object(), r1); - LOperand* val = UseFixed(instr->value(), r0); - - LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val); - return MarkAsCall(result, instr); -} - - -LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { - return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr); -} - - -LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr); -} - - -LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { - return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr); -} - - -LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { - return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr); -} - - -LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - LInstruction* result = new LDeleteProperty(Use(instr->object()), - UseOrConstant(instr->key())); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - allocator_->MarkAsOsrEntry(); - current_block_->last_environment()->set_ast_id(instr->ast_id()); - return AssignEnvironment(new LOsrEntry); -} - - -LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(new LParameter, spill_index); -} - - -LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width. - return DefineAsSpilled(new LUnknownOSRValue, spill_index); -} - - -LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new LCallStub, r0), instr); -} - - -LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). - return NULL; -} - - -LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - LOperand* arguments = UseRegister(instr->arguments()); - LOperand* length = UseTempRegister(instr->length()); - LOperand* index = Use(instr->index()); - LInstruction* result = new LAccessArgumentsAt(arguments, length, index); - return DefineAsRegister(AssignEnvironment(result)); -} - - -LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LInstruction* result = new LTypeof(Use(instr->value())); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) { - return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value()))); -} - -LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { - HEnvironment* env = current_block_->last_environment(); - ASSERT(env != NULL); - - env->set_ast_id(instr->ast_id()); - - env->Drop(instr->pop_count()); - for (int i = 0; i < instr->values()->length(); ++i) { - HValue* value = instr->values()->at(i); - if (instr->HasAssignedIndexAt(i)) { - env->Bind(instr->GetAssignedIndexAt(i), value); - } else { - env->Push(value); - } - } - - if (FLAG_trace_environment) { - PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n", - instr->ast_id(), - instr->id()); - env->PrintToStd(); - } - ASSERT(env->values()->length() == instr->environment_height()); - - // If there is an instruction pending deoptimization environment create a - // lazy bailout instruction to capture the environment. - if (pending_deoptimization_ast_id_ == instr->ast_id()) { - LInstruction* result = new LLazyBailout; - result = AssignEnvironment(result); - instructions_pending_deoptimization_environment_-> - set_deoptimization_environment(result->environment()); - ClearInstructionPendingDeoptimizationEnvironment(); - return result; - } - - return NULL; -} - - -LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - return MarkAsCall(new LStackCheck, instr); -} - - -LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { - HEnvironment* outer = current_block_->last_environment(); - HConstant* undefined = graph()->GetConstantUndefined(); - HEnvironment* inner = outer->CopyForInlining(instr->closure(), - instr->function(), - false, - undefined); - current_block_->UpdateEnvironment(inner); - chunk_->AddInlinedClosure(instr->closure()); - return NULL; -} - - -LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - HEnvironment* outer = current_block_->last_environment()->outer(); - current_block_->UpdateEnvironment(outer); - return NULL; -} - - -void LPointerMap::RecordPointer(LOperand* op) { - // Do not record arguments as pointers. - if (op->IsStackSlot() && op->index() < 0) return; - ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); - pointer_operands_.Add(op); -} - - -void LPointerMap::PrintTo(StringStream* stream) const { - stream->Add("{"); - for (int i = 0; i < pointer_operands_.length(); ++i) { - if (i != 0) stream->Add(";"); - pointer_operands_[i]->PrintTo(stream); - } - stream->Add("} @%d", position()); -} - -} } // namespace v8::internal diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h deleted file mode 100644 index 0d5ba0f73..000000000 --- a/src/arm/lithium-arm.h +++ /dev/null @@ -1,2068 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_ARM_LITHIUM_ARM_H_ -#define V8_ARM_LITHIUM_ARM_H_ - -#include "hydrogen.h" -#include "lithium-allocator.h" -#include "safepoint-table.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LCodeGen; -class LEnvironment; -class Translation; - - -// Type hierarchy: -// -// LInstruction -// LAccessArgumentsAt -// LArgumentsElements -// LArgumentsLength -// LBinaryOperation -// LAddI -// LApplyArguments -// LArithmeticD -// LArithmeticT -// LBitI -// LBoundsCheck -// LCmpID -// LCmpIDAndBranch -// LCmpJSObjectEq -// LCmpJSObjectEqAndBranch -// LCmpT -// LDivI -// LInstanceOf -// LInstanceOfAndBranch -// LLoadKeyedFastElement -// LLoadKeyedGeneric -// LModI -// LMulI -// LShiftI -// LSubI -// LCallConstantFunction -// LCallFunction -// LCallGlobal -// LCallKeyed -// LCallKnownGlobal -// LCallNamed -// LCallRuntime -// LCallStub -// LConstant -// LConstantD -// LConstantI -// LConstantT -// LDeoptimize -// LFunctionLiteral -// LGlobalObject -// LGlobalReceiver -// LLabel -// LLayzBailout -// LLoadGlobal -// LMaterializedLiteral -// LArrayLiteral -// LObjectLiteral -// LRegExpLiteral -// LOsrEntry -// LParameter -// LStackCheck -// LStoreKeyed -// LStoreKeyedFastElement -// LStoreKeyedGeneric -// LStoreNamed -// LStoreNamedField -// LStoreNamedGeneric -// LUnaryOperation -// LArrayLength -// LBitNotI -// LBranch -// LCallNew -// LCheckFunction -// LCheckInstanceType -// LCheckMap -// LCheckPrototypeMaps -// LCheckSmi -// LClassOfTest -// LClassOfTestAndBranch -// LDeleteProperty -// LDoubleToI -// LHasCachedArrayIndex -// LHasCachedArrayIndexAndBranch -// LHasInstanceType -// LHasInstanceTypeAndBranch -// LInteger32ToDouble -// LIsNull -// LIsNullAndBranch -// LIsSmi -// LIsSmiAndBranch -// LLoadNamedField -// LLoadNamedGeneric -// LNumberTagD -// LNumberTagI -// LPushArgument -// LReturn -// LSmiTag -// LStoreGlobal -// LTaggedToI -// LThrow -// LTypeof -// LTypeofIs -// LTypeofIsAndBranch -// LUnaryMathOperation -// LValueOf -// LUnknownOSRValue - -#define LITHIUM_ALL_INSTRUCTION_LIST(V) \ - V(BinaryOperation) \ - V(Constant) \ - V(Call) \ - V(MaterializedLiteral) \ - V(StoreKeyed) \ - V(StoreNamed) \ - V(UnaryOperation) \ - LITHIUM_CONCRETE_INSTRUCTION_LIST(V) - - -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(ArrayLength) \ - V(ArrayLiteral) \ - V(BitI) \ - V(BitNotI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallConstantFunction) \ - V(CallFunction) \ - V(CallGlobal) \ - V(CallKeyed) \ - V(CallKnownGlobal) \ - V(CallNamed) \ - V(CallNew) \ - V(CallRuntime) \ - V(CallStub) \ - V(CheckFunction) \ - V(CheckInstanceType) \ - V(CheckMap) \ - V(CheckPrototypeMaps) \ - V(CheckSmi) \ - V(CmpID) \ - V(CmpIDAndBranch) \ - V(CmpJSObjectEq) \ - V(CmpJSObjectEqAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(CmpTAndBranch) \ - V(ConstantD) \ - V(ConstantI) \ - V(ConstantT) \ - V(DeleteProperty) \ - V(Deoptimize) \ - V(DivI) \ - V(DoubleToI) \ - V(FunctionLiteral) \ - V(Gap) \ - V(GlobalObject) \ - V(GlobalReceiver) \ - V(Goto) \ - V(InstanceOf) \ - V(InstanceOfAndBranch) \ - V(Integer32ToDouble) \ - V(IsNull) \ - V(IsNullAndBranch) \ - V(IsSmi) \ - V(IsSmiAndBranch) \ - V(HasInstanceType) \ - V(HasInstanceTypeAndBranch) \ - V(HasCachedArrayIndex) \ - V(HasCachedArrayIndexAndBranch) \ - V(ClassOfTest) \ - V(ClassOfTestAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadElements) \ - V(LoadGlobal) \ - V(LoadKeyedFastElement) \ - V(LoadKeyedGeneric) \ - V(LoadNamedField) \ - V(LoadNamedGeneric) \ - V(ModI) \ - V(MulI) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberUntagD) \ - V(ObjectLiteral) \ - V(OsrEntry) \ - V(Parameter) \ - V(PushArgument) \ - V(RegExpLiteral) \ - V(Return) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreGlobal) \ - V(StoreKeyedFastElement) \ - V(StoreKeyedGeneric) \ - V(StoreNamedField) \ - V(StoreNamedGeneric) \ - V(SubI) \ - V(TaggedToI) \ - V(Throw) \ - V(Typeof) \ - V(TypeofIs) \ - V(TypeofIsAndBranch) \ - V(UnaryMathOperation) \ - V(UnknownOSRValue) \ - V(ValueOf) - - -#define DECLARE_INSTRUCTION(type) \ - virtual bool Is##type() const { return true; } \ - static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ - return reinterpret_cast(instr); \ - } - - -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - virtual void CompileToNative(LCodeGen* generator); \ - virtual const char* Mnemonic() const { return mnemonic; } \ - DECLARE_INSTRUCTION(type) - - -#define DECLARE_HYDROGEN_ACCESSOR(type) \ - H##type* hydrogen() const { \ - return H##type::cast(hydrogen_value()); \ - } - - -class LInstruction: public ZoneObject { - public: - LInstruction() - : hydrogen_value_(NULL) { } - virtual ~LInstruction() { } - - virtual void CompileToNative(LCodeGen* generator) = 0; - virtual const char* Mnemonic() const = 0; - virtual void PrintTo(StringStream* stream) const; - virtual void PrintDataTo(StringStream* stream) const { } - - // Declare virtual type testers. -#define DECLARE_DO(type) virtual bool Is##type() const { return false; } - LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - virtual bool IsControl() const { return false; } - - void set_environment(LEnvironment* env) { environment_.set(env); } - LEnvironment* environment() const { return environment_.get(); } - bool HasEnvironment() const { return environment_.is_set(); } - - void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } - LPointerMap* pointer_map() const { return pointer_map_.get(); } - bool HasPointerMap() const { return pointer_map_.is_set(); } - - void set_result(LOperand* operand) { result_.set(operand); } - LOperand* result() const { return result_.get(); } - bool HasResult() const { return result_.is_set(); } - - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } - HValue* hydrogen_value() const { return hydrogen_value_; } - - void set_deoptimization_environment(LEnvironment* env) { - deoptimization_environment_.set(env); - } - LEnvironment* deoptimization_environment() const { - return deoptimization_environment_.get(); - } - bool HasDeoptimizationEnvironment() const { - return deoptimization_environment_.is_set(); - } - - private: - SetOncePointer environment_; - SetOncePointer pointer_map_; - SetOncePointer result_; - HValue* hydrogen_value_; - SetOncePointer deoptimization_environment_; -}; - - -class LGapNode; - - -class LGapResolver BASE_EMBEDDED { - public: - LGapResolver(const ZoneList* moves, LOperand* marker_operand); - const ZoneList* ResolveInReverseOrder(); - - private: - LGapNode* LookupNode(LOperand* operand); - bool CanReach(LGapNode* a, LGapNode* b, int visited_id); - bool CanReach(LGapNode* a, LGapNode* b); - void RegisterMove(LMoveOperands move); - void AddResultMove(LOperand* from, LOperand* to); - void AddResultMove(LGapNode* from, LGapNode* to); - void ResolveCycle(LGapNode* start); - - ZoneList nodes_; - ZoneList identified_cycles_; - ZoneList result_; - LOperand* marker_operand_; - int next_visited_id_; - int bailout_after_ast_id_; -}; - - -class LParallelMove : public ZoneObject { - public: - LParallelMove() : move_operands_(4) { } - - void AddMove(LOperand* from, LOperand* to) { - move_operands_.Add(LMoveOperands(from, to)); - } - - bool IsRedundant() const; - - const ZoneList* move_operands() const { - return &move_operands_; - } - - void PrintDataTo(StringStream* stream) const; - - private: - ZoneList move_operands_; -}; - - -class LGap: public LInstruction { - public: - explicit LGap(HBasicBlock* block) - : block_(block) { - parallel_moves_[BEFORE] = NULL; - parallel_moves_[START] = NULL; - parallel_moves_[END] = NULL; - parallel_moves_[AFTER] = NULL; - } - - DECLARE_CONCRETE_INSTRUCTION(Gap, "gap") - virtual void PrintDataTo(StringStream* stream) const; - - bool IsRedundant() const; - - HBasicBlock* block() const { return block_; } - - enum InnerPosition { - BEFORE, - START, - END, - AFTER, - FIRST_INNER_POSITION = BEFORE, - LAST_INNER_POSITION = AFTER - }; - - LParallelMove* GetOrCreateParallelMove(InnerPosition pos) { - if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove; - return parallel_moves_[pos]; - } - - LParallelMove* GetParallelMove(InnerPosition pos) { - return parallel_moves_[pos]; - } - - private: - LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; - HBasicBlock* block_; -}; - - -class LGoto: public LInstruction { - public: - LGoto(int block_id, bool include_stack_check = false) - : block_id_(block_id), include_stack_check_(include_stack_check) { } - - DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int block_id() const { return block_id_; } - bool include_stack_check() const { return include_stack_check_; } - - private: - int block_id_; - bool include_stack_check_; -}; - - -class LLazyBailout: public LInstruction { - public: - LLazyBailout() : gap_instructions_size_(0) { } - - DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") - - void set_gap_instructions_size(int gap_instructions_size) { - gap_instructions_size_ = gap_instructions_size; - } - int gap_instructions_size() { return gap_instructions_size_; } - - private: - int gap_instructions_size_; -}; - - -class LDeoptimize: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") -}; - - -class LLabel: public LGap { - public: - explicit LLabel(HBasicBlock* block) - : LGap(block), replacement_(NULL) { } - - DECLARE_CONCRETE_INSTRUCTION(Label, "label") - - virtual void PrintDataTo(StringStream* stream) const; - - int block_id() const { return block()->block_id(); } - bool is_loop_header() const { return block()->IsLoopHeader(); } - Label* label() { return &label_; } - LLabel* replacement() const { return replacement_; } - void set_replacement(LLabel* label) { replacement_ = label; } - bool HasReplacement() const { return replacement_ != NULL; } - - private: - Label label_; - LLabel* replacement_; -}; - - -class LParameter: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") -}; - - -class LCallStub: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub") - DECLARE_HYDROGEN_ACCESSOR(CallStub) -}; - - -class LUnknownOSRValue: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") -}; - - -class LUnaryOperation: public LInstruction { - public: - explicit LUnaryOperation(LOperand* input) : input_(input) { } - - DECLARE_INSTRUCTION(UnaryOperation) - - LOperand* input() const { return input_; } - - virtual void PrintDataTo(StringStream* stream) const; - - private: - LOperand* input_; -}; - - -class LBinaryOperation: public LInstruction { - public: - LBinaryOperation(LOperand* left, LOperand* right) - : left_(left), right_(right) { } - - DECLARE_INSTRUCTION(BinaryOperation) - - LOperand* left() const { return left_; } - LOperand* right() const { return right_; } - virtual void PrintDataTo(StringStream* stream) const; - - private: - LOperand* left_; - LOperand* right_; -}; - - -class LApplyArguments: public LBinaryOperation { - public: - LApplyArguments(LOperand* function, - LOperand* receiver, - LOperand* length, - LOperand* elements) - : LBinaryOperation(function, receiver), - length_(length), - elements_(elements) { } - - DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") - - LOperand* function() const { return left(); } - LOperand* receiver() const { return right(); } - LOperand* length() const { return length_; } - LOperand* elements() const { return elements_; } - - private: - LOperand* length_; - LOperand* elements_; -}; - - -class LAccessArgumentsAt: public LInstruction { - public: - LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) - : arguments_(arguments), length_(length), index_(index) { } - - DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") - - LOperand* arguments() const { return arguments_; } - LOperand* length() const { return length_; } - LOperand* index() const { return index_; } - - virtual void PrintDataTo(StringStream* stream) const; - - private: - LOperand* arguments_; - LOperand* length_; - LOperand* index_; -}; - - -class LArgumentsLength: public LUnaryOperation { - public: - explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {} - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") -}; - - -class LArgumentsElements: public LInstruction { - public: - LArgumentsElements() { } - - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") -}; - - -class LModI: public LBinaryOperation { - public: - LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { } - - DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") - DECLARE_HYDROGEN_ACCESSOR(Mod) -}; - - -class LDivI: public LBinaryOperation { - public: - LDivI(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } - - DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") - DECLARE_HYDROGEN_ACCESSOR(Div) -}; - - -class LMulI: public LBinaryOperation { - public: - LMulI(LOperand* left, LOperand* right, LOperand* temp) - : LBinaryOperation(left, right), temp_(temp) { } - - DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") - DECLARE_HYDROGEN_ACCESSOR(Mul) - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; -}; - - -class LCmpID: public LBinaryOperation { - public: - LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double) - : LBinaryOperation(left, right), op_(op), is_double_(is_double) { } - - Token::Value op() const { return op_; } - bool is_double() const { return is_double_; } - - DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id") - - private: - Token::Value op_; - bool is_double_; -}; - - -class LCmpIDAndBranch: public LCmpID { - public: - LCmpIDAndBranch(Token::Value op, - LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id, - bool is_double) - : LCmpID(op, left, right, is_double), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LUnaryMathOperation: public LUnaryOperation { - public: - explicit LUnaryMathOperation(LOperand* value) - : LUnaryOperation(value) { } - - DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation") - DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) - - virtual void PrintDataTo(StringStream* stream) const; - MathFunctionId op() const { return hydrogen()->op(); } -}; - - -class LCmpJSObjectEq: public LBinaryOperation { - public: - LCmpJSObjectEq(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) {} - - DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq") -}; - - -class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq { - public: - LCmpJSObjectEqAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LCmpJSObjectEq(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch, - "cmp-jsobject-eq-and-branch") - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LIsNull: public LUnaryOperation { - public: - LIsNull(LOperand* value, bool is_strict) - : LUnaryOperation(value), is_strict_(is_strict) {} - - DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null") - - bool is_strict() const { return is_strict_; } - - private: - bool is_strict_; -}; - - -class LIsNullAndBranch: public LIsNull { - public: - LIsNullAndBranch(LOperand* value, - bool is_strict, - LOperand* temp, - int true_block_id, - int false_block_id) - : LIsNull(value, is_strict), - temp_(temp), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; - int true_block_id_; - int false_block_id_; -}; - - -class LIsSmi: public LUnaryOperation { - public: - explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {} - - DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi") - DECLARE_HYDROGEN_ACCESSOR(IsSmi) -}; - - -class LIsSmiAndBranch: public LIsSmi { - public: - LIsSmiAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LIsSmi(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LHasInstanceType: public LUnaryOperation { - public: - explicit LHasInstanceType(LOperand* value) - : LUnaryOperation(value) { } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceType) - - InstanceType TestType(); // The type to test against when generating code. - Condition BranchCondition(); // The branch condition for 'true'. -}; - - -class LHasInstanceTypeAndBranch: public LHasInstanceType { - public: - LHasInstanceTypeAndBranch(LOperand* value, - LOperand* temporary, - int true_block_id, - int false_block_id) - : LHasInstanceType(value), - temp_(temporary), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, - "has-instance-type-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - LOperand* temp() { return temp_; } - - private: - LOperand* temp_; - int true_block_id_; - int false_block_id_; -}; - - -class LHasCachedArrayIndex: public LUnaryOperation { - public: - explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {} - - DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index") - DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex) -}; - - -class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex { - public: - LHasCachedArrayIndexAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LHasCachedArrayIndex(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch, - "has-cached-array-index-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LClassOfTest: public LUnaryOperation { - public: - LClassOfTest(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temporary_(temp) {} - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTest) - - virtual void PrintDataTo(StringStream* stream) const; - - LOperand* temporary() { return temporary_; } - - private: - LOperand *temporary_; -}; - - -class LClassOfTestAndBranch: public LClassOfTest { - public: - LClassOfTestAndBranch(LOperand* value, - LOperand* temporary, - LOperand* temporary2, - int true_block_id, - int false_block_id) - : LClassOfTest(value, temporary), - temporary2_(temporary2), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, - "class-of-test-and-branch") - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - LOperand* temporary2() { return temporary2_; } - - private: - LOperand* temporary2_; - int true_block_id_; - int false_block_id_; -}; - - -class LCmpT: public LBinaryOperation { - public: - LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {} - - DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(Compare) - - Token::Value op() const { return hydrogen()->token(); } -}; - - -class LCmpTAndBranch: public LCmpT { - public: - LCmpTAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LCmpT(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch") - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LInstanceOf: public LBinaryOperation { - public: - LInstanceOf(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } - - DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") -}; - - -class LInstanceOfAndBranch: public LInstanceOf { - public: - LInstanceOfAndBranch(LOperand* left, - LOperand* right, - int true_block_id, - int false_block_id) - : LInstanceOf(left, right), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch") - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LBoundsCheck: public LBinaryOperation { - public: - LBoundsCheck(LOperand* index, LOperand* length) - : LBinaryOperation(index, length) { } - - LOperand* index() const { return left(); } - LOperand* length() const { return right(); } - - DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") -}; - - -class LBitI: public LBinaryOperation { - public: - LBitI(Token::Value op, LOperand* left, LOperand* right) - : LBinaryOperation(left, right), op_(op) { } - - Token::Value op() const { return op_; } - - DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") - - private: - Token::Value op_; -}; - - -class LShiftI: public LBinaryOperation { - public: - LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) - : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { } - - Token::Value op() const { return op_; } - - bool can_deopt() const { return can_deopt_; } - - DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") - - private: - Token::Value op_; - bool can_deopt_; -}; - - -class LSubI: public LBinaryOperation { - public: - LSubI(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } - - DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - -class LConstant: public LInstruction { - DECLARE_INSTRUCTION(Constant) -}; - - -class LConstantI: public LConstant { - public: - explicit LConstantI(int32_t value) : value_(value) { } - int32_t value() const { return value_; } - - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") - - private: - int32_t value_; -}; - - -class LConstantD: public LConstant { - public: - explicit LConstantD(double value) : value_(value) { } - double value() const { return value_; } - - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") - - private: - double value_; -}; - - -class LConstantT: public LConstant { - public: - explicit LConstantT(Handle value) : value_(value) { } - Handle value() const { return value_; } - - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") - - private: - Handle value_; -}; - - -class LBranch: public LUnaryOperation { - public: - LBranch(LOperand* input, int true_block_id, int false_block_id) - : LUnaryOperation(input), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Value) - - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LCmpMapAndBranch: public LUnaryOperation { - public: - LCmpMapAndBranch(LOperand* value, - Handle map, - int true_block_id, - int false_block_id) - : LUnaryOperation(value), - map_(map), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") - - virtual bool IsControl() const { return true; } - - Handle map() const { return map_; } - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - Handle map_; - int true_block_id_; - int false_block_id_; -}; - - -class LArrayLength: public LUnaryOperation { - public: - LArrayLength(LOperand* input, LOperand* temporary) - : LUnaryOperation(input), temporary_(temporary) { } - - LOperand* temporary() const { return temporary_; } - - DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length") - DECLARE_HYDROGEN_ACCESSOR(ArrayLength) - - private: - LOperand* temporary_; -}; - - -class LValueOf: public LUnaryOperation { - public: - LValueOf(LOperand* input, LOperand* temporary) - : LUnaryOperation(input), temporary_(temporary) { } - - LOperand* temporary() const { return temporary_; } - - DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of") - DECLARE_HYDROGEN_ACCESSOR(ValueOf) - - private: - LOperand* temporary_; -}; - - -class LThrow: public LUnaryOperation { - public: - explicit LThrow(LOperand* value) : LUnaryOperation(value) { } - - DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") -}; - - -class LBitNotI: public LUnaryOperation { - public: - explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i") -}; - - -class LAddI: public LBinaryOperation { - public: - LAddI(LOperand* left, LOperand* right) - : LBinaryOperation(left, right) { } - - DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") - DECLARE_HYDROGEN_ACCESSOR(Add) -}; - - -class LArithmeticD: public LBinaryOperation { - public: - LArithmeticD(Token::Value op, LOperand* left, LOperand* right) - : LBinaryOperation(left, right), op_(op) { } - - Token::Value op() const { return op_; } - - virtual void CompileToNative(LCodeGen* generator); - virtual const char* Mnemonic() const; - - private: - Token::Value op_; -}; - - -class LArithmeticT: public LBinaryOperation { - public: - LArithmeticT(Token::Value op, LOperand* left, LOperand* right) - : LBinaryOperation(left, right), op_(op) { } - - virtual void CompileToNative(LCodeGen* generator); - virtual const char* Mnemonic() const; - - Token::Value op() const { return op_; } - - private: - Token::Value op_; -}; - - -class LReturn: public LUnaryOperation { - public: - explicit LReturn(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(Return, "return") -}; - - -class LLoadNamedField: public LUnaryOperation { - public: - explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) -}; - - -class LLoadNamedGeneric: public LUnaryOperation { - public: - explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) - - LOperand* object() const { return input(); } - Handle name() const { return hydrogen()->name(); } -}; - - -class LLoadElements: public LUnaryOperation { - public: - explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { } - - DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements") -}; - - -class LLoadKeyedFastElement: public LBinaryOperation { - public: - LLoadKeyedFastElement(LOperand* elements, - LOperand* key, - LOperand* load_result) - : LBinaryOperation(elements, key), - load_result_(load_result) { } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) - - LOperand* elements() const { return left(); } - LOperand* key() const { return right(); } - LOperand* load_result() const { return load_result_; } - - private: - LOperand* load_result_; -}; - - -class LLoadKeyedGeneric: public LBinaryOperation { - public: - LLoadKeyedGeneric(LOperand* obj, LOperand* key) - : LBinaryOperation(obj, key) { } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") - - LOperand* object() const { return left(); } - LOperand* key() const { return right(); } -}; - - -class LLoadGlobal: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global") - DECLARE_HYDROGEN_ACCESSOR(LoadGlobal) -}; - - -class LStoreGlobal: public LUnaryOperation { - public: - explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {} - - DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global") - DECLARE_HYDROGEN_ACCESSOR(StoreGlobal) -}; - - -class LPushArgument: public LUnaryOperation { - public: - explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {} - - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") -}; - - -class LGlobalObject: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") -}; - - -class LGlobalReceiver: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") -}; - - -class LCallConstantFunction: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function") - DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction) - - virtual void PrintDataTo(StringStream* stream) const; - - Handle function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallKeyed: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") - DECLARE_HYDROGEN_ACCESSOR(CallKeyed) - - virtual void PrintDataTo(StringStream* stream) const; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNamed: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named") - DECLARE_HYDROGEN_ACCESSOR(CallNamed) - - virtual void PrintDataTo(StringStream* stream) const; - - Handle name() const { return hydrogen()->name(); } - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallFunction: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") - DECLARE_HYDROGEN_ACCESSOR(CallFunction) - - int arity() const { return hydrogen()->argument_count() - 2; } -}; - - -class LCallGlobal: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global") - DECLARE_HYDROGEN_ACCESSOR(CallGlobal) - - virtual void PrintDataTo(StringStream* stream) const; - - Handle name() const {return hydrogen()->name(); } - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallKnownGlobal: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global") - DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal) - - virtual void PrintDataTo(StringStream* stream) const; - - Handle target() const { return hydrogen()->target(); } - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallNew: public LUnaryOperation { - public: - explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { } - - DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") - DECLARE_HYDROGEN_ACCESSOR(CallNew) - - virtual void PrintDataTo(StringStream* stream) const; - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - -class LCallRuntime: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") - DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - - Runtime::Function* function() const { return hydrogen()->function(); } - int arity() const { return hydrogen()->argument_count(); } -}; - - -class LInteger32ToDouble: public LUnaryOperation { - public: - explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") -}; - - -class LNumberTagI: public LUnaryOperation { - public: - explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") -}; - - -class LNumberTagD: public LUnaryOperation { - public: - explicit LNumberTagD(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temp_(temp) { } - - DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; -}; - - -// Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI: public LUnaryOperation { - public: - explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { } - - DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } -}; - - -// Truncating conversion from a tagged value to an int32. -class LTaggedToI: public LUnaryOperation { - public: - LTaggedToI(LOperand* value, LOperand* temp) - : LUnaryOperation(value), temp_(temp) { } - - DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(Change) - - bool truncating() { return hydrogen()->CanTruncateToInt32(); } - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; -}; - - -class LSmiTag: public LUnaryOperation { - public: - explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") -}; - - -class LNumberUntagD: public LUnaryOperation { - public: - explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { } - - DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") -}; - - -class LSmiUntag: public LUnaryOperation { - public: - LSmiUntag(LOperand* use, bool needs_check) - : LUnaryOperation(use), needs_check_(needs_check) { } - - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - - bool needs_check() const { return needs_check_; } - - private: - bool needs_check_; -}; - - -class LStoreNamed: public LInstruction { - public: - LStoreNamed(LOperand* obj, Handle name, LOperand* val) - : object_(obj), name_(name), value_(val) { } - - DECLARE_INSTRUCTION(StoreNamed) - - virtual void PrintDataTo(StringStream* stream) const; - - LOperand* object() const { return object_; } - Handle name() const { return name_; } - LOperand* value() const { return value_; } - - private: - LOperand* object_; - Handle name_; - LOperand* value_; -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, - Handle name, - LOperand* val, - bool in_object, - int offset, - LOperand* temp, - bool needs_write_barrier, - Handle transition) - : LStoreNamed(obj, name, val), - is_in_object_(in_object), - offset_(offset), - temp_(temp), - needs_write_barrier_(needs_write_barrier), - transition_(transition) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - - bool is_in_object() { return is_in_object_; } - int offset() { return offset_; } - LOperand* temp() { return temp_; } - bool needs_write_barrier() { return needs_write_barrier_; } - Handle transition() const { return transition_; } - void set_transition(Handle map) { transition_ = map; } - - private: - bool is_in_object_; - int offset_; - LOperand* temp_; - bool needs_write_barrier_; - Handle transition_; -}; - - -class LStoreNamedGeneric: public LStoreNamed { - public: - LStoreNamedGeneric(LOperand* obj, - Handle name, - LOperand* val) - : LStoreNamed(obj, name, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") -}; - - -class LStoreKeyed: public LInstruction { - public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) - : object_(obj), key_(key), value_(val) { } - - DECLARE_INSTRUCTION(StoreKeyed) - - virtual void PrintDataTo(StringStream* stream) const; - - LOperand* object() const { return object_; } - LOperand* key() const { return key_; } - LOperand* value() const { return value_; } - - private: - LOperand* object_; - LOperand* key_; - LOperand* value_; -}; - - -class LStoreKeyedFastElement: public LStoreKeyed { - public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) -}; - - -class LStoreKeyedGeneric: public LStoreKeyed { - public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") -}; - - -class LCheckFunction: public LUnaryOperation { - public: - explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function") - DECLARE_HYDROGEN_ACCESSOR(CheckFunction) -}; - - -class LCheckInstanceType: public LUnaryOperation { - public: - LCheckInstanceType(LOperand* use, LOperand* temp) - : LUnaryOperation(use), temp_(temp) { } - - DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") - DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) - - LOperand* temp() const { return temp_; } - - private: - LOperand* temp_; -}; - - -class LCheckMap: public LUnaryOperation { - public: - explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { } - - DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map") - DECLARE_HYDROGEN_ACCESSOR(CheckMap) -}; - - -class LCheckPrototypeMaps: public LInstruction { - public: - LCheckPrototypeMaps(LOperand* temp, - Handle holder, - Handle receiver_map) - : temp_(temp), - holder_(holder), - receiver_map_(receiver_map) { } - - DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") - - LOperand* temp() const { return temp_; } - Handle holder() const { return holder_; } - Handle receiver_map() const { return receiver_map_; } - - private: - LOperand* temp_; - Handle holder_; - Handle receiver_map_; -}; - - -class LCheckSmi: public LUnaryOperation { - public: - LCheckSmi(LOperand* use, Condition condition) - : LUnaryOperation(use), condition_(condition) { } - - Condition condition() const { return condition_; } - - virtual void CompileToNative(LCodeGen* generator); - virtual const char* Mnemonic() const { - return (condition_ == eq) ? "check-non-smi" : "check-smi"; - } - - private: - Condition condition_; -}; - - -class LMaterializedLiteral: public LInstruction { - public: - DECLARE_INSTRUCTION(MaterializedLiteral) -}; - - -class LArrayLiteral: public LMaterializedLiteral { - public: - DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") - DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) -}; - - -class LObjectLiteral: public LMaterializedLiteral { - public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) -}; - - -class LRegExpLiteral: public LMaterializedLiteral { - public: - DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") - DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral) -}; - - -class LFunctionLiteral: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") - DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) - - Handle shared_info() { return hydrogen()->shared_info(); } -}; - - -class LTypeof: public LUnaryOperation { - public: - explicit LTypeof(LOperand* input) : LUnaryOperation(input) { } - - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") -}; - - -class LTypeofIs: public LUnaryOperation { - public: - explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { } - virtual void PrintDataTo(StringStream* stream) const; - - DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is") - DECLARE_HYDROGEN_ACCESSOR(TypeofIs) - - Handle type_literal() { return hydrogen()->type_literal(); } -}; - - -class LTypeofIsAndBranch: public LTypeofIs { - public: - LTypeofIsAndBranch(LOperand* value, - int true_block_id, - int false_block_id) - : LTypeofIs(value), - true_block_id_(true_block_id), - false_block_id_(false_block_id) { } - - DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - - virtual void PrintDataTo(StringStream* stream) const; - virtual bool IsControl() const { return true; } - - int true_block_id() const { return true_block_id_; } - int false_block_id() const { return false_block_id_; } - - private: - int true_block_id_; - int false_block_id_; -}; - - -class LDeleteProperty: public LBinaryOperation { - public: - LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {} - - DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property") - - LOperand* object() const { return left(); } - LOperand* key() const { return right(); } -}; - - -class LOsrEntry: public LInstruction { - public: - LOsrEntry(); - - DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") - - LOperand** SpilledRegisterArray() { return register_spills_; } - LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; } - - void MarkSpilledRegister(int allocation_index, LOperand* spill_operand); - void MarkSpilledDoubleRegister(int allocation_index, - LOperand* spill_operand); - - private: - // Arrays of spill slot operands for registers with an assigned spill - // slot, i.e., that must also be restored to the spill slot on OSR entry. - // NULL if the register has no assigned spill slot. Indexed by allocation - // index. - LOperand* register_spills_[Register::kNumAllocatableRegisters]; - LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; -}; - - -class LStackCheck: public LInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") -}; - - -class LPointerMap: public ZoneObject { - public: - explicit LPointerMap(int position) - : pointer_operands_(8), position_(position), lithium_position_(-1) { } - - const ZoneList* operands() const { return &pointer_operands_; } - int position() const { return position_; } - int lithium_position() const { return lithium_position_; } - - void set_lithium_position(int pos) { - ASSERT(lithium_position_ == -1); - lithium_position_ = pos; - } - - void RecordPointer(LOperand* op); - void PrintTo(StringStream* stream) const; - - private: - ZoneList pointer_operands_; - int position_; - int lithium_position_; -}; - - -class LEnvironment: public ZoneObject { - public: - LEnvironment(Handle closure, - int ast_id, - int parameter_count, - int argument_count, - int value_count, - LEnvironment* outer) - : closure_(closure), - arguments_stack_height_(argument_count), - deoptimization_index_(Safepoint::kNoDeoptimizationIndex), - translation_index_(-1), - ast_id_(ast_id), - parameter_count_(parameter_count), - values_(value_count), - representations_(value_count), - spilled_registers_(NULL), - spilled_double_registers_(NULL), - outer_(outer) { - } - - Handle closure() const { return closure_; } - int arguments_stack_height() const { return arguments_stack_height_; } - int deoptimization_index() const { return deoptimization_index_; } - int translation_index() const { return translation_index_; } - int ast_id() const { return ast_id_; } - int parameter_count() const { return parameter_count_; } - const ZoneList* values() const { return &values_; } - LEnvironment* outer() const { return outer_; } - - void AddValue(LOperand* operand, Representation representation) { - values_.Add(operand); - representations_.Add(representation); - } - - bool HasTaggedValueAt(int index) const { - return representations_[index].IsTagged(); - } - - void Register(int deoptimization_index, int translation_index) { - ASSERT(!HasBeenRegistered()); - deoptimization_index_ = deoptimization_index; - translation_index_ = translation_index; - } - bool HasBeenRegistered() const { - return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex; - } - - void SetSpilledRegisters(LOperand** registers, - LOperand** double_registers) { - spilled_registers_ = registers; - spilled_double_registers_ = double_registers; - } - - // Emit frame translation commands for this environment. - void WriteTranslation(LCodeGen* cgen, Translation* translation) const; - - void PrintTo(StringStream* stream) const; - - private: - Handle closure_; - int arguments_stack_height_; - int deoptimization_index_; - int translation_index_; - int ast_id_; - int parameter_count_; - ZoneList values_; - ZoneList representations_; - - // Allocation index indexed arrays of spill slot operands for registers - // that are also in spill slots at an OSR entry. NULL for environments - // that do not correspond to an OSR entry. - LOperand** spilled_registers_; - LOperand** spilled_double_registers_; - - LEnvironment* outer_; -}; - -class LChunkBuilder; -class LChunk: public ZoneObject { - public: - explicit LChunk(HGraph* graph); - - int AddInstruction(LInstruction* instruction, HBasicBlock* block); - LConstantOperand* DefineConstantOperand(HConstant* constant); - Handle LookupLiteral(LConstantOperand* operand) const; - Representation LookupLiteralRepresentation(LConstantOperand* operand) const; - - int GetNextSpillIndex(bool is_double); - LOperand* GetNextSpillSlot(bool is_double); - - int ParameterAt(int index); - int GetParameterStackSlot(int index) const; - int spill_slot_count() const { return spill_slot_count_; } - HGraph* graph() const { return graph_; } - const ZoneList* instructions() const { return &instructions_; } - void AddGapMove(int index, LOperand* from, LOperand* to); - LGap* GetGapAt(int index) const; - bool IsGapAt(int index) const; - int NearestGapPos(int index) const; - int NearestNextGapPos(int index) const; - void MarkEmptyBlocks(); - const ZoneList* pointer_maps() const { return &pointer_maps_; } - LLabel* GetLabel(int block_id) const { - HBasicBlock* block = graph_->blocks()->at(block_id); - int first_instruction = block->first_instruction_index(); - return LLabel::cast(instructions_[first_instruction]); - } - int LookupDestination(int block_id) const { - LLabel* cur = GetLabel(block_id); - while (cur->replacement() != NULL) { - cur = cur->replacement(); - } - return cur->block_id(); - } - Label* GetAssemblyLabel(int block_id) const { - LLabel* label = GetLabel(block_id); - ASSERT(!label->HasReplacement()); - return label->label(); - } - - const ZoneList >* inlined_closures() const { - return &inlined_closures_; - } - - void AddInlinedClosure(Handle closure) { - inlined_closures_.Add(closure); - } - - void Verify() const; - - private: - int spill_slot_count_; - HGraph* const graph_; - ZoneList instructions_; - ZoneList pointer_maps_; - ZoneList > inlined_closures_; -}; - - -class LChunkBuilder BASE_EMBEDDED { - public: - LChunkBuilder(HGraph* graph, LAllocator* allocator) - : chunk_(NULL), - graph_(graph), - status_(UNUSED), - current_instruction_(NULL), - current_block_(NULL), - next_block_(NULL), - argument_count_(0), - allocator_(allocator), - position_(RelocInfo::kNoPosition), - instructions_pending_deoptimization_environment_(NULL), - pending_deoptimization_ast_id_(AstNode::kNoNumber) { } - - // Build the sequence for the graph. - LChunk* Build(); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); - HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - enum Status { - UNUSED, - BUILDING, - DONE, - ABORTED - }; - - LChunk* chunk() const { return chunk_; } - HGraph* graph() const { return graph_; } - - bool is_unused() const { return status_ == UNUSED; } - bool is_building() const { return status_ == BUILDING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - - void Abort(const char* format, ...); - - // Methods for getting operands for Use / Define / Temp. - LRegister* ToOperand(Register reg); - LUnallocated* ToUnallocated(Register reg); - LUnallocated* ToUnallocated(DoubleRegister reg); - - // Methods for setting up define-use relationships. - LOperand* Use(HValue* value, LUnallocated* operand); - LOperand* UseFixed(HValue* value, Register fixed_register); - LOperand* UseFixedDouble(HValue* value, DoubleRegister fixed_register); - - // A value that is guaranteed to be allocated to a register. - // Operand created by UseRegister is guaranteed to be live until the end of - // instruction. This means that register allocator will not reuse it's - // register for any other operand inside instruction. - // Operand created by UseRegisterAtStart is guaranteed to be live only at - // instruction start. Register allocator is free to assign the same register - // to some other operand used inside instruction (i.e. temporary or - // output). - LOperand* UseRegister(HValue* value); - LOperand* UseRegisterAtStart(HValue* value); - - // A value in a register that may be trashed. - LOperand* UseTempRegister(HValue* value); - LOperand* Use(HValue* value); - LOperand* UseAtStart(HValue* value); - LOperand* UseOrConstant(HValue* value); - LOperand* UseOrConstantAtStart(HValue* value); - LOperand* UseRegisterOrConstant(HValue* value); - LOperand* UseRegisterOrConstantAtStart(HValue* value); - - // Methods for setting up define-use relationships. - // Return the same instruction that they are passed. - LInstruction* Define(LInstruction* instr, LUnallocated* result); - LInstruction* Define(LInstruction* instr); - LInstruction* DefineAsRegister(LInstruction* instr); - LInstruction* DefineAsSpilled(LInstruction* instr, int index); - LInstruction* DefineSameAsAny(LInstruction* instr); - LInstruction* DefineSameAsFirst(LInstruction* instr); - LInstruction* DefineFixed(LInstruction* instr, Register reg); - LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg); - LInstruction* AssignEnvironment(LInstruction* instr); - LInstruction* AssignPointerMap(LInstruction* instr); - - enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; - - // By default we assume that instruction sequences generated for calls - // cannot deoptimize eagerly and we do not attach environment to this - // instruction. - LInstruction* MarkAsCall( - LInstruction* instr, - HInstruction* hinstr, - CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - - LInstruction* SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id); - void ClearInstructionPendingDeoptimizationEnvironment(); - - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); - - // Temporary operand that may be a memory location. - LOperand* Temp(); - // Temporary operand that must be in a register. - LUnallocated* TempRegister(); - LOperand* FixedTemp(Register reg); - LOperand* FixedTemp(DoubleRegister reg); - - void VisitInstruction(HInstruction* current); - - void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); - LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); - LInstruction* DoArithmeticD(Token::Value op, - HArithmeticBinaryOperation* instr); - LInstruction* DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr); - - LChunk* chunk_; - HGraph* const graph_; - Status status_; - HInstruction* current_instruction_; - HBasicBlock* current_block_; - HBasicBlock* next_block_; - int argument_count_; - LAllocator* allocator_; - int position_; - LInstruction* instructions_pending_deoptimization_environment_; - int pending_deoptimization_ast_id_; - - DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); -}; - -#undef DECLARE_HYDROGEN_ACCESSOR -#undef DECLARE_INSTRUCTION -#undef DECLARE_CONCRETE_INSTRUCTION - -} } // namespace v8::internal - -#endif // V8_ARM_LITHIUM_ARM_H_ diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc deleted file mode 100644 index db8037a62..000000000 --- a/src/arm/lithium-codegen-arm.cc +++ /dev/null @@ -1,2146 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "arm/lithium-codegen-arm.h" -#include "code-stubs.h" -#include "stub-cache.h" - -namespace v8 { -namespace internal { - - -class SafepointGenerator : public PostCallGenerator { - public: - SafepointGenerator(LCodeGen* codegen, - LPointerMap* pointers, - int deoptimization_index) - : codegen_(codegen), - pointers_(pointers), - deoptimization_index_(deoptimization_index) { } - virtual ~SafepointGenerator() { } - - virtual void Generate() { - codegen_->RecordSafepoint(pointers_, deoptimization_index_); - } - - private: - LCodeGen* codegen_; - LPointerMap* pointers_; - int deoptimization_index_; -}; - - -#define __ masm()-> - -bool LCodeGen::GenerateCode() { - HPhase phase("Code generation", chunk()); - ASSERT(is_unused()); - status_ = GENERATING; - CpuFeatures::Scope scope1(VFP3); - CpuFeatures::Scope scope2(ARMv7); - return GeneratePrologue() && - GenerateBody() && - GenerateDeferredCode() && - GenerateSafepointTable(); -} - - -void LCodeGen::FinishCode(Handle code) { - ASSERT(is_done()); - code->set_stack_slots(StackSlotCount()); - code->set_safepoint_table_start(safepoints_.GetCodeOffset()); - PopulateDeoptimizationData(code); -} - - -void LCodeGen::Abort(const char* format, ...) { - if (FLAG_trace_bailout) { - SmartPointer debug_name = graph()->debug_name()->ToCString(); - PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); - va_list arguments; - va_start(arguments, format); - OS::VPrint(format, arguments); - va_end(arguments); - PrintF("\n"); - } - status_ = ABORTED; -} - - -void LCodeGen::Comment(const char* format, ...) { - if (!FLAG_code_comments) return; - char buffer[4 * KB]; - StringBuilder builder(buffer, ARRAY_SIZE(buffer)); - va_list arguments; - va_start(arguments, format); - builder.AddFormattedList(format, arguments); - va_end(arguments); - - // Copy the string before recording it in the assembler to avoid - // issues when the stack allocated buffer goes out of scope. - size_t length = builder.position(); - Vector copy = Vector::New(length + 1); - memcpy(copy.start(), builder.Finalize(), copy.length()); - masm()->RecordComment(copy.start()); -} - - -bool LCodeGen::GeneratePrologue() { - ASSERT(is_generating()); - -#ifdef DEBUG - if (strlen(FLAG_stop_at) > 0 && - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - __ stop("stop_at"); - } -#endif - - // r1: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. - - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. - - // Reserve space for the stack slots needed by the code. - int slots = StackSlotCount(); - if (slots > 0) { - if (FLAG_debug_code) { - __ mov(r0, Operand(slots)); - __ mov(r2, Operand(kSlotsZapValue)); - Label loop; - __ bind(&loop); - __ push(r2); - __ sub(r0, r0, Operand(1)); - __ b(ne, &loop); - } else { - __ sub(sp, sp, Operand(slots * kPointerSize)); - } - } - - // Trace the call. - if (FLAG_trace) { - __ CallRuntime(Runtime::kTraceEnter, 0); - } - return !is_aborted(); -} - - -bool LCodeGen::GenerateBody() { - ASSERT(is_generating()); - bool emit_instructions = true; - for (current_instruction_ = 0; - !is_aborted() && current_instruction_ < instructions_->length(); - current_instruction_++) { - LInstruction* instr = instructions_->at(current_instruction_); - if (instr->IsLabel()) { - LLabel* label = LLabel::cast(instr); - emit_instructions = !label->HasReplacement(); - } - - if (emit_instructions) { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - instr->CompileToNative(this); - } - } - return !is_aborted(); -} - - -LInstruction* LCodeGen::GetNextInstruction() { - if (current_instruction_ < instructions_->length() - 1) { - return instructions_->at(current_instruction_ + 1); - } else { - return NULL; - } -} - - -bool LCodeGen::GenerateDeferredCode() { - ASSERT(is_generating()); - for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { - LDeferredCode* code = deferred_[i]; - __ bind(code->entry()); - code->Generate(); - __ jmp(code->exit()); - } - - // Deferred code is the last part of the instruction sequence. Mark - // the generated code as done unless we bailed out. - if (!is_aborted()) status_ = DONE; - return !is_aborted(); -} - - -bool LCodeGen::GenerateSafepointTable() { - ASSERT(is_done()); - safepoints_.Emit(masm(), StackSlotCount()); - return !is_aborted(); -} - - -Register LCodeGen::ToRegister(int index) const { - return Register::FromAllocationIndex(index); -} - - -DoubleRegister LCodeGen::ToDoubleRegister(int index) const { - return DoubleRegister::FromAllocationIndex(index); -} - - -Register LCodeGen::ToRegister(LOperand* op) const { - ASSERT(op->IsRegister()); - return ToRegister(op->index()); -} - - -Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { - if (op->IsRegister()) { - return ToRegister(op->index()); - } else if (op->IsConstantOperand()) { - __ mov(scratch, ToOperand(op)); - return scratch; - } else if (op->IsStackSlot() || op->IsArgument()) { - __ ldr(scratch, ToMemOperand(op)); - return scratch; - } - UNREACHABLE(); - return scratch; -} - - -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - ASSERT(op->IsDoubleRegister()); - return ToDoubleRegister(op->index()); -} - - -DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch) { - if (op->IsDoubleRegister()) { - return ToDoubleRegister(op->index()); - } else if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - Handle literal = chunk_->LookupLiteral(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); - __ mov(ip, Operand(static_cast(literal->Number()))); - __ vmov(flt_scratch, ip); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); - return dbl_scratch; - } else if (r.IsDouble()) { - Abort("unsupported double immediate"); - } else if (r.IsTagged()) { - Abort("unsupported tagged immediate"); - } - } else if (op->IsStackSlot() || op->IsArgument()) { - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(op)); - MemOperand mem_op = ToMemOperand(op); - __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); - return dbl_scratch; - } - UNREACHABLE(); - return dbl_scratch; -} - - -int LCodeGen::ToInteger32(LConstantOperand* op) const { - Handle value = chunk_->LookupLiteral(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); - ASSERT(static_cast(static_cast(value->Number())) == - value->Number()); - return static_cast(value->Number()); -} - - -Operand LCodeGen::ToOperand(LOperand* op) { - if (op->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(op); - Handle literal = chunk_->LookupLiteral(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); - return Operand(static_cast(literal->Number())); - } else if (r.IsDouble()) { - Abort("ToOperand Unsupported double immediate."); - } - ASSERT(r.IsTagged()); - return Operand(literal); - } else if (op->IsRegister()) { - return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - Abort("ToOperand IsDoubleRegister unimplemented"); - return Operand(0); - } - // Stack slots not implemented, use ToMemOperand instead. - UNREACHABLE(); - return Operand(0); -} - - -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - // TODO(regis): Revisit. - ASSERT(!op->IsRegister()); - ASSERT(!op->IsDoubleRegister()); - ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, and - // context in the fixed part of the frame. - return MemOperand(fp, -(index + 3) * kPointerSize); - } else { - // Incoming parameter. Skip the return address. - return MemOperand(fp, -(index - 1) * kPointerSize); - } -} - - -void LCodeGen::AddToTranslation(Translation* translation, - LOperand* op, - bool is_tagged) { - if (op == NULL) { - // TODO(twuerthinger): Introduce marker operands to indicate that this value - // is not present and must be reconstructed from the deoptimizer. Currently - // this is only used for the arguments object. - translation->StoreArgumentsObject(); - } else if (op->IsStackSlot()) { - if (is_tagged) { - translation->StoreStackSlot(op->index()); - } else { - translation->StoreInt32StackSlot(op->index()); - } - } else if (op->IsDoubleStackSlot()) { - translation->StoreDoubleStackSlot(op->index()); - } else if (op->IsArgument()) { - ASSERT(is_tagged); - int src_index = StackSlotCount() + op->index(); - translation->StoreStackSlot(src_index); - } else if (op->IsRegister()) { - Register reg = ToRegister(op); - if (is_tagged) { - translation->StoreRegister(reg); - } else { - translation->StoreInt32Register(reg); - } - } else if (op->IsDoubleRegister()) { - DoubleRegister reg = ToDoubleRegister(op); - translation->StoreDoubleRegister(reg); - } else if (op->IsConstantOperand()) { - Handle literal = chunk()->LookupLiteral(LConstantOperand::cast(op)); - int src_index = DefineDeoptimizationLiteral(literal); - translation->StoreLiteral(src_index); - } else { - UNREACHABLE(); - } -} - - -void LCodeGen::CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr) { - if (instr != NULL) { - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - __ Call(code, mode); - RegisterLazyDeoptimization(instr); - } else { - LPointerMap no_pointers(0); - RecordPosition(no_pointers.position()); - __ Call(code, mode); - RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex); - } -} - - -void LCodeGen::CallRuntime(Runtime::Function* function, - int num_arguments, - LInstruction* instr) { - ASSERT(instr != NULL); - LPointerMap* pointers = instr->pointer_map(); - ASSERT(pointers != NULL); - RecordPosition(pointers->position()); - - __ CallRuntime(function, num_arguments); - // Runtime calls to Throw are not supposed to ever return at the - // call site, so don't register lazy deoptimization for these. We do - // however have to record a safepoint since throwing exceptions can - // cause garbage collections. - if (!instr->IsThrow()) { - RegisterLazyDeoptimization(instr); - } else { - RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex); - } -} - - -void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) { - // Create the environment to bailout to. If the call has side effects - // execution has to continue after the call otherwise execution can continue - // from a previous bailout point repeating the call. - LEnvironment* deoptimization_environment; - if (instr->HasDeoptimizationEnvironment()) { - deoptimization_environment = instr->deoptimization_environment(); - } else { - deoptimization_environment = instr->environment(); - } - - RegisterEnvironmentForDeoptimization(deoptimization_environment); - RecordSafepoint(instr->pointer_map(), - deoptimization_environment->deoptimization_index()); -} - - -void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) { - if (!environment->HasBeenRegistered()) { - // Physical stack frame layout: - // -x ............. -4 0 ..................................... y - // [incoming arguments] [spill slots] [pushed outgoing arguments] - - // Layout of the environment: - // 0 ..................................................... size-1 - // [parameters] [locals] [expression stack including arguments] - - // Layout of the translation: - // 0 ........................................................ size - 1 + 4 - // [expression stack including arguments] [locals] [4 words] [parameters] - // |>------------ translation_size ------------<| - - int frame_count = 0; - for (LEnvironment* e = environment; e != NULL; e = e->outer()) { - ++frame_count; - } - Translation translation(&translations_, frame_count); - environment->WriteTranslation(this, &translation); - int deoptimization_index = deoptimizations_.length(); - environment->Register(deoptimization_index, translation.index()); - deoptimizations_.Add(environment); - } -} - - -void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { - RegisterEnvironmentForDeoptimization(environment); - ASSERT(environment->HasBeenRegistered()); - int id = environment->deoptimization_index(); - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); - ASSERT(entry != NULL); - if (entry == NULL) { - Abort("bailout was not prepared"); - return; - } - - ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. - - if (FLAG_deopt_every_n_times == 1 && - info_->shared_info()->opt_count() == id) { - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); - return; - } - - if (cc == no_condition) { - if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); - } else { - if (FLAG_trap_on_deopt) { - Label done; - __ b(&done, NegateCondition(cc)); - __ stop("trap_on_deopt"); - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&done); - } else { - __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc); - } - } -} - - -void LCodeGen::PopulateDeoptimizationData(Handle code) { - int length = deoptimizations_.length(); - if (length == 0) return; - ASSERT(FLAG_deopt); - Handle data = - Factory::NewDeoptimizationInputData(length, TENURED); - - data->SetTranslationByteArray(*translations_.CreateByteArray()); - data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); - - Handle literals = - Factory::NewFixedArray(deoptimization_literals_.length(), TENURED); - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); - } - data->SetLiteralArray(*literals); - - data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); - data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); - - // Populate the deoptimization entries. - for (int i = 0; i < length; i++) { - LEnvironment* env = deoptimizations_[i]; - data->SetAstId(i, Smi::FromInt(env->ast_id())); - data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); - data->SetArgumentsStackHeight(i, - Smi::FromInt(env->arguments_stack_height())); - } - code->set_deoptimization_data(*data); -} - - -int LCodeGen::DefineDeoptimizationLiteral(Handle literal) { - int result = deoptimization_literals_.length(); - for (int i = 0; i < deoptimization_literals_.length(); ++i) { - if (deoptimization_literals_[i].is_identical_to(literal)) return i; - } - deoptimization_literals_.Add(literal); - return result; -} - - -void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - ASSERT(deoptimization_literals_.length() == 0); - - const ZoneList >* inlined_closures = - chunk()->inlined_closures(); - - for (int i = 0, length = inlined_closures->length(); - i < length; - i++) { - DefineDeoptimizationLiteral(inlined_closures->at(i)); - } - - inlined_function_count_ = deoptimization_literals_.length(); -} - - -void LCodeGen::RecordSafepoint(LPointerMap* pointers, - int deoptimization_index) { - const ZoneList* operands = pointers->operands(); - Safepoint safepoint = safepoints_.DefineSafepoint(masm(), - deoptimization_index); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index()); - } - } -} - - -void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - int deoptimization_index) { - const ZoneList* operands = pointers->operands(); - Safepoint safepoint = - safepoints_.DefineSafepointWithRegisters( - masm(), arguments, deoptimization_index); - for (int i = 0; i < operands->length(); i++) { - LOperand* pointer = operands->at(i); - if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index()); - } else if (pointer->IsRegister()) { - safepoint.DefinePointerRegister(ToRegister(pointer)); - } - } - // Register cp always contains a pointer to the context. - safepoint.DefinePointerRegister(cp); -} - - -void LCodeGen::RecordPosition(int position) { - if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return; - masm()->positions_recorder()->RecordPosition(position); -} - - -void LCodeGen::DoLabel(LLabel* label) { - if (label->is_loop_header()) { - Comment(";;; B%d - LOOP entry", label->block_id()); - } else { - Comment(";;; B%d", label->block_id()); - } - __ bind(label->label()); - current_block_ = label->block_id(); - LCodeGen::DoGap(label); -} - - -void LCodeGen::DoParallelMove(LParallelMove* move) { - // d0 must always be a scratch register. - DoubleRegister dbl_scratch = d0; - LUnallocated marker_operand(LUnallocated::NONE); - - Register core_scratch = r9; - bool destroys_core_scratch = false; - - LGapResolver resolver(move->move_operands(), &marker_operand); - const ZoneList* moves = resolver.ResolveInReverseOrder(); - for (int i = moves->length() - 1; i >= 0; --i) { - LMoveOperands move = moves->at(i); - LOperand* from = move.from(); - LOperand* to = move.to(); - ASSERT(!from->IsDoubleRegister() || - !ToDoubleRegister(from).is(dbl_scratch)); - ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch)); - ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch)); - ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch)); - if (from == &marker_operand) { - if (to->IsRegister()) { - __ mov(ToRegister(to), core_scratch); - ASSERT(destroys_core_scratch); - } else if (to->IsStackSlot()) { - __ str(core_scratch, ToMemOperand(to)); - ASSERT(destroys_core_scratch); - } else if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), dbl_scratch); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } else if (to == &marker_operand) { - if (from->IsRegister() || from->IsConstantOperand()) { - __ mov(core_scratch, ToOperand(from)); - destroys_core_scratch = true; - } else if (from->IsStackSlot()) { - __ ldr(core_scratch, ToMemOperand(from)); - destroys_core_scratch = true; - } else if (from->IsDoubleRegister()) { - __ vmov(dbl_scratch, ToDoubleRegister(from)); - } else { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - } - } else if (from->IsConstantOperand()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ mov(ip, ToOperand(from)); - __ str(ip, ToMemOperand(to)); - } - } else if (from->IsRegister()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ str(ToRegister(from), ToMemOperand(to)); - } - } else if (to->IsRegister()) { - ASSERT(from->IsStackSlot()); - __ ldr(ToRegister(to), ToMemOperand(from)); - } else if (from->IsStackSlot()) { - ASSERT(to->IsStackSlot()); - __ ldr(ip, ToMemOperand(from)); - __ str(ip, ToMemOperand(to)); - } else if (from->IsDoubleRegister()) { - if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), ToDoubleRegister(from)); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset()); - } - } else if (to->IsDoubleRegister()) { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(ToDoubleRegister(to), ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset()); - } else { - ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } - - if (destroys_core_scratch) { - __ ldr(core_scratch, MemOperand(fp, -kPointerSize)); - } - - LInstruction* next = GetNextInstruction(); - if (next != NULL && next->IsLazyBailout()) { - int pc = masm()->pc_offset(); - safepoints_.SetPcAfterGap(pc); - } -} - - -void LCodeGen::DoGap(LGap* gap) { - for (int i = LGap::FIRST_INNER_POSITION; - i <= LGap::LAST_INNER_POSITION; - i++) { - LGap::InnerPosition inner_pos = static_cast(i); - LParallelMove* move = gap->GetParallelMove(inner_pos); - if (move != NULL) DoParallelMove(move); - } - - LInstruction* next = GetNextInstruction(); - if (next != NULL && next->IsLazyBailout()) { - int pc = masm()->pc_offset(); - safepoints_.SetPcAfterGap(pc); - } -} - - -void LCodeGen::DoParameter(LParameter* instr) { - // Nothing to do. -} - - -void LCodeGen::DoCallStub(LCallStub* instr) { - Abort("DoCallStub unimplemented."); -} - - -void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { - // Nothing to do. -} - - -void LCodeGen::DoModI(LModI* instr) { - Abort("DoModI unimplemented."); -} - - -void LCodeGen::DoDivI(LDivI* instr) { - Abort("DoDivI unimplemented."); -} - - -void LCodeGen::DoMulI(LMulI* instr) { - Register left = ToRegister(instr->left()); - Register scratch = r9; - Register right = EmitLoadRegister(instr->right(), scratch); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && - !instr->right()->IsConstantOperand()) { - __ orr(ToRegister(instr->temp()), left, right); - } - - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - // scratch:left = left * right. - __ smull(scratch, left, left, right); - __ mov(ip, Operand(left, ASR, 31)); - __ cmp(ip, Operand(scratch)); - DeoptimizeIf(ne, instr->environment()); - } else { - __ mul(left, left, right); - } - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Bail out if the result is supposed to be negative zero. - Label done; - __ tst(left, Operand(left)); - __ b(ne, &done); - if (instr->right()->IsConstantOperand()) { - if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) { - DeoptimizeIf(no_condition, instr->environment()); - } - } else { - // Test the non-zero operand for negative sign. - __ cmp(ToRegister(instr->temp()), Operand(0)); - DeoptimizeIf(mi, instr->environment()); - } - __ bind(&done); - } -} - - -void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - ASSERT(left->IsRegister()); - Register result = ToRegister(left); - Register right_reg = EmitLoadRegister(right, ip); - switch (instr->op()) { - case Token::BIT_AND: - __ and_(result, ToRegister(left), Operand(right_reg)); - break; - case Token::BIT_OR: - __ orr(result, ToRegister(left), Operand(right_reg)); - break; - case Token::BIT_XOR: - __ eor(result, ToRegister(left), Operand(right_reg)); - break; - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoShiftI(LShiftI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - ASSERT(left->IsRegister()); - Register result = ToRegister(left); - if (right->IsRegister()) { - // Mask the right operand. - __ and_(r9, ToRegister(right), Operand(0x1F)); - switch (instr->op()) { - case Token::SAR: - __ mov(result, Operand(result, ASR, r9)); - break; - case Token::SHR: - if (instr->can_deopt()) { - __ mov(result, Operand(result, LSR, r9), SetCC); - DeoptimizeIf(mi, instr->environment()); - } else { - __ mov(result, Operand(result, LSR, r9)); - } - break; - case Token::SHL: - __ mov(result, Operand(result, LSL, r9)); - break; - default: - UNREACHABLE(); - break; - } - } else { - int value = ToInteger32(LConstantOperand::cast(right)); - uint8_t shift_count = static_cast(value & 0x1F); - switch (instr->op()) { - case Token::SAR: - if (shift_count != 0) { - __ mov(result, Operand(result, ASR, shift_count)); - } - break; - case Token::SHR: - if (shift_count == 0 && instr->can_deopt()) { - __ tst(result, Operand(0x80000000)); - DeoptimizeIf(ne, instr->environment()); - } else { - __ mov(result, Operand(result, LSR, shift_count)); - } - break; - case Token::SHL: - if (shift_count != 0) { - __ mov(result, Operand(result, LSL, shift_count)); - } - break; - default: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoSubI(LSubI* instr) { - Register left = ToRegister(instr->left()); - Register right = EmitLoadRegister(instr->right(), ip); - ASSERT(instr->left()->Equals(instr->result())); - __ sub(left, left, right, SetCC); - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(vs, instr->environment()); - } -} - - -void LCodeGen::DoConstantI(LConstantI* instr) { - ASSERT(instr->result()->IsRegister()); - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoConstantD(LConstantD* instr) { - Abort("DoConstantD unimplemented."); -} - - -void LCodeGen::DoConstantT(LConstantT* instr) { - ASSERT(instr->result()->IsRegister()); - __ mov(ToRegister(instr->result()), Operand(instr->value())); -} - - -void LCodeGen::DoArrayLength(LArrayLength* instr) { - Register result = ToRegister(instr->result()); - - if (instr->hydrogen()->value()->IsLoadElements()) { - // We load the length directly from the elements array. - Register elements = ToRegister(instr->input()); - __ ldr(result, FieldMemOperand(elements, FixedArray::kLengthOffset)); - } else { - // Check that the receiver really is an array. - Register array = ToRegister(instr->input()); - Register temporary = ToRegister(instr->temporary()); - __ CompareObjectType(array, temporary, temporary, JS_ARRAY_TYPE); - DeoptimizeIf(ne, instr->environment()); - - // Load length directly from the array. - __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset)); - } - Abort("DoArrayLength untested."); -} - - -void LCodeGen::DoValueOf(LValueOf* instr) { - Abort("DoValueOf unimplemented."); -} - - -void LCodeGen::DoBitNotI(LBitNotI* instr) { - LOperand* input = instr->input(); - ASSERT(input->Equals(instr->result())); - __ mvn(ToRegister(input), Operand(ToRegister(input))); - Abort("DoBitNotI untested."); -} - - -void LCodeGen::DoThrow(LThrow* instr) { - Register input_reg = EmitLoadRegister(instr->input(), ip); - __ push(input_reg); - CallRuntime(Runtime::kThrow, 1, instr); - - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } -} - - -void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - - Register right_reg = EmitLoadRegister(right, ip); - __ add(ToRegister(left), ToRegister(left), Operand(right_reg), SetCC); - - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(vs, instr->environment()); - } -} - - -void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - switch (instr->op()) { - case Token::ADD: - __ vadd(left, left, right); - break; - case Token::SUB: - __ vsub(left, left, right); - break; - case Token::MUL: - __ vmul(left, left, right); - break; - case Token::DIV: - __ vdiv(left, left, right); - break; - case Token::MOD: { - Abort("DoArithmeticD unimplemented for MOD."); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->left()).is(r1)); - ASSERT(ToRegister(instr->right()).is(r0)); - ASSERT(ToRegister(instr->result()).is(r0)); - - // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current - // GenericBinaryOpStub: - // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); - GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -int LCodeGen::GetNextEmittedBlock(int block) { - for (int i = block + 1; i < graph()->blocks()->length(); ++i) { - LLabel* label = chunk_->GetLabel(i); - if (!label->HasReplacement()) return i; - } - return -1; -} - - -void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { - int next_block = GetNextEmittedBlock(current_block_); - right_block = chunk_->LookupDestination(right_block); - left_block = chunk_->LookupDestination(left_block); - - if (right_block == left_block) { - EmitGoto(left_block); - } else if (left_block == next_block) { - __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); - } else if (right_block == next_block) { - __ b(cc, chunk_->GetAssemblyLabel(left_block)); - } else { - __ b(cc, chunk_->GetAssemblyLabel(left_block)); - __ b(chunk_->GetAssemblyLabel(right_block)); - } -} - - -void LCodeGen::DoBranch(LBranch* instr) { - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - Representation r = instr->hydrogen()->representation(); - if (r.IsInteger32()) { - Register reg = ToRegister(instr->input()); - __ cmp(reg, Operand(0)); - EmitBranch(true_block, false_block, nz); - } else if (r.IsDouble()) { - DoubleRegister reg = ToDoubleRegister(instr->input()); - __ vcmp(reg, 0.0); - EmitBranch(true_block, false_block, ne); - } else { - ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->input()); - if (instr->hydrogen()->type().IsBoolean()) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(reg, ip); - EmitBranch(true_block, false_block, eq); - } else { - Label* true_label = chunk_->GetAssemblyLabel(true_block); - Label* false_label = chunk_->GetAssemblyLabel(false_block); - - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(reg, ip); - __ b(eq, false_label); - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(reg, ip); - __ b(eq, true_label); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(reg, ip); - __ b(eq, false_label); - __ cmp(reg, Operand(0)); - __ b(eq, false_label); - __ tst(reg, Operand(kSmiTagMask)); - __ b(eq, true_label); - - // Test for double values. Zero is false. - Label call_stub; - DoubleRegister dbl_scratch = d0; - Register core_scratch = r9; - ASSERT(!reg.is(core_scratch)); - __ ldr(core_scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(core_scratch, Operand(ip)); - __ b(ne, &call_stub); - __ sub(ip, reg, Operand(kHeapObjectTag)); - __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset); - __ vcmp(dbl_scratch, 0.0); - __ b(eq, false_label); - __ b(true_label); - - // The conversion stub doesn't cause garbage collections so it's - // safe to not record a safepoint after the call. - __ bind(&call_stub); - ToBooleanStub stub(reg); - RegList saved_regs = kJSCallerSaved | kCalleeSaved; - __ stm(db_w, sp, saved_regs); - __ CallStub(&stub); - __ cmp(reg, Operand(0)); - __ ldm(ia_w, sp, saved_regs); - EmitBranch(true_block, false_block, nz); - } - } -} - - -void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) { - // TODO(srdjan): Perform stack overflow check if this goto needs it - // before jumping. - block = chunk_->LookupDestination(block); - int next_block = GetNextEmittedBlock(current_block_); - if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); - } -} - - -void LCodeGen::DoDeferredStackCheck(LGoto* instr) { - UNIMPLEMENTED(); -} - - -void LCodeGen::DoGoto(LGoto* instr) { - // TODO(srdjan): Implement deferred stack check. - EmitGoto(instr->block_id(), NULL); -} - - -Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { - Condition cond = no_condition; - switch (op) { - case Token::EQ: - case Token::EQ_STRICT: - cond = eq; - break; - case Token::LT: - cond = is_unsigned ? lo : lt; - break; - case Token::GT: - cond = is_unsigned ? hi : gt; - break; - case Token::LTE: - cond = is_unsigned ? ls : le; - break; - case Token::GTE: - cond = is_unsigned ? hs : ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } - return cond; -} - - -void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { - __ cmp(ToRegister(left), ToOperand(right)); - Abort("EmitCmpI untested."); -} - - -void LCodeGen::DoCmpID(LCmpID* instr) { - Abort("DoCmpID unimplemented."); -} - - -void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { - Abort("DoCmpIDAndBranch unimplemented."); -} - - -void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - Register result = ToRegister(instr->result()); - - __ cmp(left, Operand(right)); - __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); - __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); - Abort("DoCmpJSObjectEq untested."); -} - - -void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { - Abort("DoCmpJSObjectEqAndBranch unimplemented."); -} - - -void LCodeGen::DoIsNull(LIsNull* instr) { - Abort("DoIsNull unimplemented."); -} - - -void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { - Register reg = ToRegister(instr->input()); - - // TODO(fsc): If the expression is known to be a smi, then it's - // definitely not null. Jump to the false block. - - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(reg, ip); - if (instr->is_strict()) { - EmitBranch(true_block, false_block, eq); - } else { - Label* true_label = chunk_->GetAssemblyLabel(true_block); - Label* false_label = chunk_->GetAssemblyLabel(false_block); - __ b(eq, true_label); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(reg, ip); - __ b(eq, true_label); - __ tst(reg, Operand(kSmiTagMask)); - __ b(eq, false_label); - // Check for undetectable objects by looking in the bit field in - // the map. The object has already been smi checked. - Register scratch = ToRegister(instr->temp()); - __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tst(scratch, Operand(1 << Map::kIsUndetectable)); - EmitBranch(true_block, false_block, ne); - } -} - - -void LCodeGen::DoIsSmi(LIsSmi* instr) { - ASSERT(instr->hydrogen()->value()->representation().IsTagged()); - Register result = ToRegister(instr->result()); - Register input_reg = EmitLoadRegister(instr->input(), ip); - __ tst(input_reg, Operand(kSmiTagMask)); - __ LoadRoot(result, Heap::kTrueValueRootIndex); - Label done; - __ b(eq, &done); - __ LoadRoot(result, Heap::kFalseValueRootIndex); - __ bind(&done); -} - - -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - Register input_reg = EmitLoadRegister(instr->input(), ip); - __ tst(input_reg, Operand(kSmiTagMask)); - EmitBranch(true_block, false_block, eq); -} - - -InstanceType LHasInstanceType::TestType() { - InstanceType from = hydrogen()->from(); - InstanceType to = hydrogen()->to(); - if (from == FIRST_TYPE) return to; - ASSERT(from == to || to == LAST_TYPE); - return from; -} - - -Condition LHasInstanceType::BranchCondition() { - InstanceType from = hydrogen()->from(); - InstanceType to = hydrogen()->to(); - if (from == to) return eq; - if (to == LAST_TYPE) return hs; - if (from == FIRST_TYPE) return ls; - UNREACHABLE(); - return eq; -} - - -void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { - Abort("DoHasInstanceType unimplemented."); -} - - -void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { - Register input = ToRegister(instr->input()); - Register temp = ToRegister(instr->temp()); - - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - Label* false_label = chunk_->GetAssemblyLabel(false_block); - - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - - __ CompareObjectType(input, temp, temp, instr->TestType()); - EmitBranch(true_block, false_block, instr->BranchCondition()); -} - - -void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { - Abort("DoHasCachedArrayIndex unimplemented."); -} - - -void LCodeGen::DoHasCachedArrayIndexAndBranch( - LHasCachedArrayIndexAndBranch* instr) { - Abort("DoHasCachedArrayIndexAndBranch unimplemented."); -} - - -// Branches to a label or falls through with the answer in the z flag. Trashes -// the temp registers, but not the input. Only input and temp2 may alias. -void LCodeGen::EmitClassOfTest(Label* is_true, - Label* is_false, - Handleclass_name, - Register input, - Register temp, - Register temp2) { - Abort("EmitClassOfTest unimplemented."); -} - - -void LCodeGen::DoClassOfTest(LClassOfTest* instr) { - Abort("DoClassOfTest unimplemented."); -} - - -void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Abort("DoClassOfTestAndBranch unimplemented."); -} - - -void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Abort("DoCmpMapAndBranch unimplemented."); -} - - -void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - Abort("DoInstanceOf unimplemented."); -} - - -void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { - Abort("DoInstanceOfAndBranch unimplemented."); -} - - - -static Condition ComputeCompareCondition(Token::Value op) { - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - return eq; - case Token::LT: - return lt; - case Token::GT: - return gt; - case Token::LTE: - return le; - case Token::GTE: - return ge; - default: - UNREACHABLE(); - return no_condition; - } -} - - -void LCodeGen::DoCmpT(LCmpT* instr) { - Token::Value op = instr->op(); - - Handle ic = CompareIC::GetUninitialized(op); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - - Condition condition = ComputeCompareCondition(op); - if (op == Token::GT || op == Token::LTE) { - condition = ReverseCondition(condition); - } - __ cmp(r0, Operand(0)); - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex, - condition); - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex, - NegateCondition(condition)); -} - - -void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { - Abort("DoCmpTAndBranch unimplemented."); -} - - -void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace) { - // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in r0. - __ push(r0); - __ CallRuntime(Runtime::kTraceExit, 1); - } - int32_t sp_delta = (ParameterCount() + 1) * kPointerSize; - __ mov(sp, fp); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); - __ add(sp, sp, Operand(sp_delta)); - __ Jump(lr); -} - - -void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) { - Register result = ToRegister(instr->result()); - __ mov(ip, Operand(Handle(instr->hydrogen()->cell()))); - __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - if (instr->hydrogen()->check_hole_value()) { - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(result, ip); - DeoptimizeIf(eq, instr->environment()); - } -} - - -void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { - Register value = ToRegister(instr->input()); - __ mov(ip, Operand(Handle(instr->hydrogen()->cell()))); - __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); -} - - -void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - Abort("DoLoadNamedField unimplemented."); -} - - -void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { - ASSERT(ToRegister(instr->object()).is(r0)); - ASSERT(ToRegister(instr->result()).is(r0)); - - // Name is always in r2. - __ mov(r2, Operand(instr->name())); - Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); - CallCode(ic, RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoLoadElements(LLoadElements* instr) { - Abort("DoLoadElements unimplemented."); -} - - -void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { - Abort("DoAccessArgumentsAt unimplemented."); -} - - -void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { - Abort("DoLoadKeyedFastElement unimplemented."); -} - - -void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->object()).is(r1)); - ASSERT(ToRegister(instr->key()).is(r0)); - - Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); - CallCode(ic, RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { - Abort("DoArgumentsElements unimplemented."); -} - - -void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Abort("DoArgumentsLength unimplemented."); -} - - -void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Abort("DoApplyArguments unimplemented."); -} - - -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->input(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort("DoPushArgument not implemented for double type."); - } else { - Register argument_reg = EmitLoadRegister(argument, ip); - __ push(argument_reg); - } -} - - -void LCodeGen::DoGlobalObject(LGlobalObject* instr) { - Register result = ToRegister(instr->result()); - __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); -} - - -void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { - Register result = ToRegister(instr->result()); - __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); -} - - -void LCodeGen::CallKnownFunction(Handle function, - int arity, - LInstruction* instr) { - // Change context if needed. - bool change_context = - (graph()->info()->closure()->context() != function->context()) || - scope()->contains_with() || - (scope()->num_heap_slots() > 0); - if (change_context) { - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - } - - // Set r0 to arguments count if adaption is not needed. Assumes that r0 - // is available to write to at this point. - if (!function->NeedsArgumentsAdaption()) { - __ mov(r0, Operand(arity)); - } - - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - - // Invoke function. - __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); - __ Call(ip); - - // Setup deoptimization. - RegisterLazyDeoptimization(instr); - - // Restore context. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); -} - - -void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { - Abort("DoCallConstantFunction unimplemented."); -} - - -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { - Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented."); -} - - -void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - Abort("LUnaryMathOperation unimplemented."); -} - - -void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - Abort("DoMathFloor unimplemented."); -} - - -void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - Abort("DoMathSqrt unimplemented."); -} - - -void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { - ASSERT(instr->op() == kMathFloor || - instr->op() == kMathAbs); - - switch (instr->op()) { - case kMathAbs: - DoMathAbs(instr); - break; - case kMathFloor: - DoMathFloor(instr); - break; - case kMathSqrt: - DoMathSqrt(instr); - break; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::DoCallKeyed(LCallKeyed* instr) { - Abort("DoCallKeyed unimplemented."); -} - - -void LCodeGen::DoCallNamed(LCallNamed* instr) { - ASSERT(ToRegister(instr->result()).is(r0)); - - int arity = instr->arity(); - Handle ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP); - __ mov(r2, Operand(instr->name())); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - // Restore context register. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); -} - - -void LCodeGen::DoCallFunction(LCallFunction* instr) { - Abort("DoCallFunction unimplemented."); -} - - -void LCodeGen::DoCallGlobal(LCallGlobal* instr) { - Abort("DoCallGlobal unimplemented."); -} - - -void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { - ASSERT(ToRegister(instr->result()).is(r0)); - __ mov(r1, Operand(instr->target())); - CallKnownFunction(instr->target(), instr->arity(), instr); -} - - -void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->input()).is(r1)); - ASSERT(ToRegister(instr->result()).is(r0)); - - Handle builtin(Builtins::builtin(Builtins::JSConstructCall)); - __ mov(r0, Operand(instr->arity())); - CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr); -} - - -void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); -} - - -void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Abort("DoStoreNamedField unimplemented."); -} - - -void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - ASSERT(ToRegister(instr->object()).is(r1)); - ASSERT(ToRegister(instr->value()).is(r0)); - - // Name is always in r2. - __ mov(r2, Operand(instr->name())); - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); - CallCode(ic, RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - Abort("DoBoundsCheck unimplemented."); -} - - -void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { - Abort("DoStoreKeyedFastElement unimplemented."); -} - - -void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - ASSERT(ToRegister(instr->object()).is(r2)); - ASSERT(ToRegister(instr->key()).is(r1)); - ASSERT(ToRegister(instr->value()).is(r0)); - - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); - CallCode(ic, RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - Abort("DoInteger32ToDouble unimplemented."); -} - - -void LCodeGen::DoNumberTagI(LNumberTagI* instr) { - class DeferredNumberTagI: public LDeferredCode { - public: - DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } - private: - LNumberTagI* instr_; - }; - - LOperand* input = instr->input(); - ASSERT(input->IsRegister() && input->Equals(instr->result())); - Register reg = ToRegister(input); - - DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); - __ SmiTag(reg, SetCC); - __ b(vs, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { - Label slow; - Register reg = ToRegister(instr->input()); - DoubleRegister dbl_scratch = d0; - SwVfpRegister flt_scratch = s0; - - // Preserve the value of all registers. - __ PushSafepointRegisters(); - - // There was overflow, so bits 30 and 31 of the original integer - // disagree. Try to allocate a heap number in new space and store - // the value in there. If that fails, call the runtime system. - Label done; - __ SmiUntag(reg); - __ eor(reg, reg, Operand(0x80000000)); - __ vmov(flt_scratch, reg); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); - if (FLAG_inline_new) { - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow); - if (!reg.is(r5)) __ mov(reg, r5); - __ b(&done); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - - // TODO(3095996): Put a valid pointer value in the stack slot where the result - // register is stored, as this register is in the pointer map, but contains an - // integer value. - __ mov(ip, Operand(0)); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize)); - - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - if (!reg.is(r0)) __ mov(reg, r0); - - // Done. Put the value in dbl_scratch into the value of the allocated heap - // number. - __ bind(&done); - __ sub(ip, reg, Operand(kHeapObjectTag)); - __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); - __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); - __ PopSafepointRegisters(); -} - - -void LCodeGen::DoNumberTagD(LNumberTagD* instr) { - class DeferredNumberTagD: public LDeferredCode { - public: - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } - private: - LNumberTagD* instr_; - }; - - DoubleRegister input_reg = ToDoubleRegister(instr->input()); - Register reg = ToRegister(instr->result()); - Register tmp = ToRegister(instr->temp()); - Register scratch = r9; - - DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); - if (FLAG_inline_new) { - __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, tmp, ip, scratch, deferred->entry()); - } else { - __ jmp(deferred->entry()); - } - __ bind(deferred->exit()); - __ sub(ip, reg, Operand(kHeapObjectTag)); - __ vstr(input_reg, ip, HeapNumber::kValueOffset); -} - - -void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - Register reg = ToRegister(instr->result()); - __ mov(reg, Operand(0)); - - __ PushSafepointRegisters(); - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize)); - __ PopSafepointRegisters(); -} - - -void LCodeGen::DoSmiTag(LSmiTag* instr) { - LOperand* input = instr->input(); - ASSERT(input->IsRegister() && input->Equals(instr->result())); - ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); - __ SmiTag(ToRegister(input)); -} - - -void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Abort("DoSmiUntag unimplemented."); -} - - -void LCodeGen::EmitNumberUntagD(Register input_reg, - DoubleRegister result_reg, - LEnvironment* env) { - Register core_scratch = r9; - ASSERT(!input_reg.is(core_scratch)); - SwVfpRegister flt_scratch = s0; - ASSERT(!result_reg.is(d0)); - - Label load_smi, heap_number, done; - - // Smi check. - __ tst(input_reg, Operand(kSmiTagMask)); - __ b(eq, &load_smi); - - // Heap number map check. - __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(core_scratch, Operand(ip)); - __ b(eq, &heap_number); - - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, Operand(ip)); - DeoptimizeIf(ne, env); - - // Convert undefined to NaN. - __ LoadRoot(ip, Heap::kNanValueRootIndex); - __ sub(ip, ip, Operand(kHeapObjectTag)); - __ vldr(result_reg, ip, HeapNumber::kValueOffset); - __ jmp(&done); - - // Heap number to double register conversion. - __ bind(&heap_number); - __ sub(ip, input_reg, Operand(kHeapObjectTag)); - __ vldr(result_reg, ip, HeapNumber::kValueOffset); - __ jmp(&done); - - // Smi to double register conversion - __ bind(&load_smi); - __ SmiUntag(input_reg); // Untag smi before converting to float. - __ vmov(flt_scratch, input_reg); - __ vcvt_f64_s32(result_reg, flt_scratch); - __ SmiTag(input_reg); // Retag smi. - __ bind(&done); -} - - -class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - private: - LTaggedToI* instr_; -}; - - -void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Label done; - Register input_reg = ToRegister(instr->input()); - Register core_scratch = r9; - ASSERT(!input_reg.is(core_scratch)); - DoubleRegister dbl_scratch = d0; - SwVfpRegister flt_scratch = s0; - DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp()); - - // Heap number map check. - __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(core_scratch, Operand(ip)); - - if (instr->truncating()) { - Label heap_number; - __ b(eq, &heap_number); - // Check for undefined. Undefined is converted to zero for truncating - // conversions. - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, Operand(ip)); - DeoptimizeIf(ne, instr->environment()); - __ mov(input_reg, Operand(0)); - __ b(&done); - - __ bind(&heap_number); - __ sub(ip, input_reg, Operand(kHeapObjectTag)); - __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset); - __ vcmp(dbl_tmp, 0.0); // Sets overflow bit if NaN. - __ vcvt_s32_f64(flt_scratch, dbl_tmp); - __ vmov(input_reg, flt_scratch); // 32-bit result of conversion. - __ vmrs(pc); // Move vector status bits to normal status bits. - // Overflow bit is set if dbl_tmp is Nan. - __ cmn(input_reg, Operand(1), vc); // 0x7fffffff + 1 -> overflow. - __ cmp(input_reg, Operand(1), vc); // 0x80000000 - 1 -> overflow. - DeoptimizeIf(vs, instr->environment()); // Saturation may have occured. - - } else { - // Deoptimize if we don't have a heap number. - DeoptimizeIf(ne, instr->environment()); - - __ sub(ip, input_reg, Operand(kHeapObjectTag)); - __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset); - __ vcvt_s32_f64(flt_scratch, dbl_tmp); - __ vmov(input_reg, flt_scratch); // 32-bit result of conversion. - // Non-truncating conversion means that we cannot lose bits, so we convert - // back to check; note that using non-overlapping s and d regs would be - // slightly faster. - __ vcvt_f64_s32(dbl_scratch, flt_scratch); - __ vcmp(dbl_scratch, dbl_tmp); - __ vmrs(pc); // Move vector status bits to normal status bits. - DeoptimizeIf(ne, instr->environment()); // Not equal or unordered. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ tst(input_reg, Operand(input_reg)); - __ b(ne, &done); - __ vmov(lr, ip, dbl_tmp); - __ tst(ip, Operand(1 << 31)); // Test sign bit. - DeoptimizeIf(ne, instr->environment()); - } - } - __ bind(&done); -} - - -void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - LOperand* input = instr->input(); - ASSERT(input->IsRegister()); - ASSERT(input->Equals(instr->result())); - - Register input_reg = ToRegister(input); - - DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); - - // Smi check. - __ tst(input_reg, Operand(kSmiTagMask)); - __ b(ne, deferred->entry()); - - // Smi to int32 conversion - __ SmiUntag(input_reg); // Untag smi. - - __ bind(deferred->exit()); -} - - -void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->input(); - ASSERT(input->IsRegister()); - LOperand* result = instr->result(); - ASSERT(result->IsDoubleRegister()); - - Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); - - EmitNumberUntagD(input_reg, result_reg, instr->environment()); -} - - -void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Abort("DoDoubleToI unimplemented."); -} - - -void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->input(); - ASSERT(input->IsRegister()); - __ tst(ToRegister(input), Operand(kSmiTagMask)); - DeoptimizeIf(instr->condition(), instr->environment()); -} - - -void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Abort("DoCheckInstanceType unimplemented."); -} - - -void LCodeGen::DoCheckFunction(LCheckFunction* instr) { - ASSERT(instr->input()->IsRegister()); - Register reg = ToRegister(instr->input()); - __ cmp(reg, Operand(instr->hydrogen()->target())); - DeoptimizeIf(ne, instr->environment()); -} - - -void LCodeGen::DoCheckMap(LCheckMap* instr) { - LOperand* input = instr->input(); - ASSERT(input->IsRegister()); - Register reg = ToRegister(input); - __ ldr(r9, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ cmp(r9, Operand(instr->hydrogen()->map())); - DeoptimizeIf(ne, instr->environment()); -} - - -void LCodeGen::LoadPrototype(Register result, - Handle prototype) { - Abort("LoadPrototype unimplemented."); -} - - -void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - Abort("DoCheckPrototypeMaps unimplemented."); -} - - -void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { - Abort("DoArrayLiteral unimplemented."); -} - - -void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { - Abort("DoObjectLiteral unimplemented."); -} - - -void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - Abort("DoRegExpLiteral unimplemented."); -} - - -void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { - Abort("DoFunctionLiteral unimplemented."); -} - - -void LCodeGen::DoTypeof(LTypeof* instr) { - Abort("DoTypeof unimplemented."); -} - - -void LCodeGen::DoTypeofIs(LTypeofIs* instr) { - Abort("DoTypeofIs unimplemented."); -} - - -void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->input()); - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - Label* true_label = chunk_->GetAssemblyLabel(true_block); - Label* false_label = chunk_->GetAssemblyLabel(false_block); - - Condition final_branch_condition = EmitTypeofIs(true_label, - false_label, - input, - instr->type_literal()); - - EmitBranch(true_block, false_block, final_branch_condition); -} - - -Condition LCodeGen::EmitTypeofIs(Label* true_label, - Label* false_label, - Register input, - Handle type_name) { - Condition final_branch_condition = no_condition; - Register core_scratch = r9; - ASSERT(!input.is(core_scratch)); - if (type_name->Equals(Heap::number_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, true_label); - __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(input, Operand(ip)); - final_branch_condition = eq; - - } else if (type_name->Equals(Heap::string_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, false_label); - __ CompareInstanceType(input, core_scratch, FIRST_NONSTRING_TYPE); - final_branch_condition = lo; - - } else if (type_name->Equals(Heap::boolean_symbol())) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(input, ip); - __ b(eq, true_label); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(input, ip); - final_branch_condition = eq; - - } else if (type_name->Equals(Heap::undefined_symbol())) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input, ip); - __ b(eq, true_label); - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - // Check for undetectable objects => true. - __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsUndetectable)); - final_branch_condition = ne; - - } else if (type_name->Equals(Heap::function_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ CompareObjectType(input, input, core_scratch, JS_FUNCTION_TYPE); - __ b(eq, true_label); - // Regular expressions => 'function' (they are callable). - __ CompareInstanceType(input, core_scratch, JS_REGEXP_TYPE); - final_branch_condition = eq; - - } else if (type_name->Equals(Heap::object_symbol())) { - __ tst(input, Operand(kSmiTagMask)); - __ b(eq, false_label); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(input, ip); - __ b(eq, true_label); - // Regular expressions => 'function', not 'object'. - __ CompareObjectType(input, input, core_scratch, JS_REGEXP_TYPE); - __ b(eq, false_label); - // Check for undetectable objects => false. - __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(ne, false_label); - // Check for JS objects => true. - __ CompareInstanceType(input, core_scratch, FIRST_JS_OBJECT_TYPE); - __ b(lo, false_label); - __ CompareInstanceType(input, core_scratch, LAST_JS_OBJECT_TYPE); - final_branch_condition = ls; - - } else { - final_branch_condition = ne; - __ b(false_label); - // A dead branch instruction will be generated after this point. - } - - return final_branch_condition; -} - - -void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - // No code for lazy bailout instruction. Used to capture environment after a - // call for populating the safepoint data with deoptimization data. -} - - -void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - DeoptimizeIf(no_condition, instr->environment()); -} - - -void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { - Abort("DoDeleteProperty unimplemented."); -} - - -void LCodeGen::DoStackCheck(LStackCheck* instr) { - // Perform stack overflow check. - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&ok); -} - - -void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - Abort("DoOsrEntry unimplemented."); -} - - -#undef __ - -} } // namespace v8::internal diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h deleted file mode 100644 index 846acaccc..000000000 --- a/src/arm/lithium-codegen-arm.h +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_ -#define V8_ARM_LITHIUM_CODEGEN_ARM_H_ - -#include "arm/lithium-arm.h" - -#include "deoptimizer.h" -#include "safepoint-table.h" -#include "scopes.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class LDeferredCode; -class SafepointGenerator; - - -class LCodeGen BASE_EMBEDDED { - public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : chunk_(chunk), - masm_(assembler), - info_(info), - current_block_(-1), - current_instruction_(-1), - instructions_(chunk->instructions()), - deoptimizations_(4), - deoptimization_literals_(8), - inlined_function_count_(0), - scope_(chunk->graph()->info()->scope()), - status_(UNUSED), - deferred_(8), - osr_pc_offset_(-1) { - PopulateDeoptimizationLiteralsWithInlinedFunctions(); - } - - // Try to generate code for the entire chunk, but it may fail if the - // chunk contains constructs we cannot handle. Returns true if the - // code generation attempt succeeded. - bool GenerateCode(); - - // Finish the code by setting stack height, safepoint, and bailout - // information on it. - void FinishCode(Handle code); - - // Deferred code support. - void DoDeferredNumberTagD(LNumberTagD* instr); - void DoDeferredNumberTagI(LNumberTagI* instr); - void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); - void DoDeferredStackCheck(LGoto* instr); - - // Parallel move support. - void DoParallelMove(LParallelMove* move); - - // Declare methods that deal with the individual node types. -#define DECLARE_DO(type) void Do##type(L##type* node); - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) -#undef DECLARE_DO - - private: - enum Status { - UNUSED, - GENERATING, - DONE, - ABORTED - }; - - bool is_unused() const { return status_ == UNUSED; } - bool is_generating() const { return status_ == GENERATING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - - LChunk* chunk() const { return chunk_; } - Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk_->graph(); } - MacroAssembler* masm() const { return masm_; } - - int GetNextEmittedBlock(int block); - LInstruction* GetNextInstruction(); - - void EmitClassOfTest(Label* if_true, - Label* if_false, - Handle class_name, - Register input, - Register temporary, - Register temporary2); - - int StackSlotCount() const { return chunk()->spill_slot_count(); } - int ParameterCount() const { return scope()->num_parameters(); } - - void Abort(const char* format, ...); - void Comment(const char* format, ...); - - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); } - - // Code generation passes. Returns true if code generation should - // continue. - bool GeneratePrologue(); - bool GenerateBody(); - bool GenerateDeferredCode(); - bool GenerateSafepointTable(); - - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); - void CallRuntime(Runtime::Function* function, - int num_arguments, - LInstruction* instr); - void CallRuntime(Runtime::FunctionId id, - int num_arguments, - LInstruction* instr) { - Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, num_arguments, instr); - } - - // Generate a direct call to a known function. Expects the function - // to be in edi. - void CallKnownFunction(Handle function, - int arity, - LInstruction* instr); - - void LoadPrototype(Register result, Handle prototype); - - void RegisterLazyDeoptimization(LInstruction* instr); - void RegisterEnvironmentForDeoptimization(LEnvironment* environment); - void DeoptimizeIf(Condition cc, LEnvironment* environment); - - void AddToTranslation(Translation* translation, - LOperand* op, - bool is_tagged); - void PopulateDeoptimizationData(Handle code); - int DefineDeoptimizationLiteral(Handle literal); - - void PopulateDeoptimizationLiteralsWithInlinedFunctions(); - - Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; - - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); - - int ToInteger32(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - - // Specific math operations - used from DoUnaryMathOperation. - void DoMathAbs(LUnaryMathOperation* instr); - void DoMathFloor(LUnaryMathOperation* instr); - void DoMathSqrt(LUnaryMathOperation* instr); - - // Support for recording safepoint and position information. - void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); - void RecordSafepointWithRegisters(LPointerMap* pointers, - int arguments, - int deoptimization_index); - void RecordPosition(int position); - - static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL); - void EmitBranch(int left_block, int right_block, Condition cc); - void EmitCmpI(LOperand* left, LOperand* right); - void EmitNumberUntagD(Register input, - DoubleRegister result, - LEnvironment* env); - - // Emits optimized code for typeof x == "y". Modifies input register. - // Returns the condition on which a final split to - // true and false label should be made, to optimize fallthrough. - Condition EmitTypeofIs(Label* true_label, Label* false_label, - Register input, Handle type_name); - - LChunk* const chunk_; - MacroAssembler* const masm_; - CompilationInfo* const info_; - - int current_block_; - int current_instruction_; - const ZoneList* instructions_; - ZoneList deoptimizations_; - ZoneList > deoptimization_literals_; - int inlined_function_count_; - Scope* const scope_; - Status status_; - TranslationBuffer translations_; - ZoneList deferred_; - int osr_pc_offset_; - - // Builder that keeps track of safepoints in the code. The table - // itself is emitted at the end of the generated code. - SafepointTableBuilder safepoints_; - - friend class LDeferredCode; - friend class LEnvironment; - friend class SafepointGenerator; - DISALLOW_COPY_AND_ASSIGN(LCodeGen); -}; - - -class LDeferredCode: public ZoneObject { - public: - explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), external_exit_(NULL) { - codegen->AddDeferredCode(this); - } - - virtual ~LDeferredCode() { } - virtual void Generate() = 0; - - void SetExit(Label *exit) { external_exit_ = exit; } - Label* entry() { return &entry_; } - Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - - protected: - LCodeGen* codegen() const { return codegen_; } - MacroAssembler* masm() const { return codegen_->masm(); } - - private: - LCodeGen* codegen_; - Label entry_; - Label exit_; - Label* external_exit_; -}; - -} } // namespace v8::internal - -#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_ diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 6ad8918f1..afd7e2cae 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -171,6 +171,13 @@ void MacroAssembler::Ret(Condition cond) { } +void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { + LoadRoot(ip, Heap::kStackLimitRootIndex); + cmp(sp, Operand(ip)); + b(lo, on_stack_overflow); +} + + void MacroAssembler::Drop(int count, Condition cond) { if (count > 0) { add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); @@ -440,34 +447,6 @@ void MacroAssembler::RecordWrite(Register object, } -// Push and pop all registers that can hold pointers. -void MacroAssembler::PushSafepointRegisters() { - // Safepoints expect a block of contiguous register values starting with r0: - ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); - // Safepoints expect a block of kNumSafepointRegisters values on the - // stack, so adjust the stack for unsaved registers. - const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; - ASSERT(num_unsaved >= 0); - sub(sp, sp, Operand(num_unsaved * kPointerSize)); - stm(db_w, sp, kSafepointSavedRegisters); -} - - -void MacroAssembler::PopSafepointRegisters() { - const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; - ldm(ia_w, sp, kSafepointSavedRegisters); - add(sp, sp, Operand(num_unsaved * kPointerSize)); -} - - -int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { - // The registers are pushed starting with the highest encoding, - // which means that lowest encodings are closest to the stack pointer. - ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); - return reg_code; -} - - void MacroAssembler::Ldrd(Register dst1, Register dst2, const MemOperand& src, Condition cond) { ASSERT(src.rm().is(no_reg)); @@ -536,8 +515,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { } -void MacroAssembler::EnterExitFrame(bool save_doubles) { +void MacroAssembler::EnterExitFrame() { + // Compute the argv pointer and keep it in a callee-saved register. // r0 is argc. + add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); + sub(r6, r6, Operand(kPointerSize)); + // Compute callee's stack pointer before making changes and save it as // ip register so that it is restored as sp register on exit, thereby // popping the args. @@ -545,9 +528,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // ip = sp + kPointerSize * #args; add(ip, sp, Operand(r0, LSL, kPointerSizeLog2)); - // Compute the argv pointer and keep it in a callee-saved register. - sub(r6, ip, Operand(kPointerSize)); - // Prepare the stack to be aligned when calling into C. After this point there // are 5 pushes before the call into C, so the stack needs to be aligned after // 5 pushes. @@ -578,28 +558,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // Setup argc and the builtin function in callee-saved registers. mov(r4, Operand(r0)); mov(r5, Operand(r1)); - - // Optionally save all double registers. - if (save_doubles) { - // TODO(regis): Use vstrm instruction. - // The stack alignment code above made sp unaligned, so add space for one - // more double register and use aligned addresses. - ASSERT(kDoubleSize == frame_alignment); - // Mark the frame as containing doubles by pushing a non-valid return - // address, i.e. 0. - ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize); - mov(ip, Operand(0)); // Marker and alignment word. - push(ip); - int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize; - sub(sp, sp, Operand(space)); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - vstr(reg, sp, i * kDoubleSize + kPointerSize); - } - // Note that d0 will be accessible at fp - 2*kPointerSize - - // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the - // alignment word were pushed after the fp. - } } @@ -634,18 +592,7 @@ int MacroAssembler::ActivationFrameAlignment() { } -void MacroAssembler::LeaveExitFrame(bool save_doubles) { - // Optionally restore all double registers. - if (save_doubles) { - // TODO(regis): Use vldrm instruction. - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - // Register d15 is just below the marker. - const int offset = ExitFrameConstants::kMarkerOffset; - vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset); - } - } - +void MacroAssembler::LeaveExitFrame() { // Clear top frame. mov(r3, Operand(0, RelocInfo::NONE)); mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); @@ -809,15 +756,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function, // Invoke the cached code. Handle code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); - if (V8::UseCrankshaft()) { - // TODO(kasperl): For now, we always call indirectly through the - // code field in the function to allow recompilation to take effect - // without changing any of the call sites. - ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); - InvokeCode(r3, expected, actual, flag); - } else { - InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); - } + InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); } @@ -1575,16 +1514,6 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { } -void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { - Runtime::Function* function = Runtime::FunctionForId(id); - mov(r0, Operand(function->nargs)); - mov(r1, Operand(ExternalReference(function))); - CEntryStub stub(1); - stub.SaveDoubles(); - CallStub(&stub); -} - - void MacroAssembler::CallExternalReference(const ExternalReference& ext, int num_arguments) { mov(r0, Operand(num_arguments)); diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 3da872677..8bd134c38 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -224,12 +224,6 @@ class MacroAssembler: public Assembler { } } - // Push and pop the registers that can hold pointers, as defined by the - // RegList constant kSafepointSavedRegisters. - void PushSafepointRegisters(); - void PopSafepointRegisters(); - static int SafepointRegisterStackIndex(int reg_code); - // Load two consecutive registers with two consecutive memory locations. void Ldrd(Register dst1, Register dst2, @@ -242,6 +236,11 @@ class MacroAssembler: public Assembler { const MemOperand& dst, Condition cond = al); + // --------------------------------------------------------------------------- + // Stack limit support + + void StackLimitCheck(Label* on_stack_limit_hit); + // --------------------------------------------------------------------------- // Activation frames @@ -255,10 +254,10 @@ class MacroAssembler: public Assembler { // Expects the number of arguments in register r0 and // the builtin function to call in register r1. Exits with argc in // r4, argv in r6, and and the builtin function to call in r5. - void EnterExitFrame(bool save_doubles); + void EnterExitFrame(); // Leave the current exit frame. Expects the return value in r0. - void LeaveExitFrame(bool save_doubles); + void LeaveExitFrame(); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -576,7 +575,6 @@ class MacroAssembler: public Assembler { // Call a runtime routine. void CallRuntime(Runtime::Function* f, int num_arguments); - void CallRuntimeSaveDoubles(Runtime::FunctionId id); // Convenience function: Same as above, but takes the fid instead. void CallRuntime(Runtime::FunctionId fid, int num_arguments); @@ -667,14 +665,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Smi utilities - void SmiTag(Register reg, SBit s = LeaveCC) { - add(reg, reg, Operand(reg), s); - } - - void SmiUntag(Register reg) { - mov(reg, Operand(reg, ASR, kSmiTagSize)); - } - // Jump if either of the registers contain a non-smi. void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); // Jump if either of the registers contain a smi. @@ -776,17 +766,6 @@ class CodePatcher { #endif // ENABLE_DEBUGGER_SUPPORT -// Helper class for generating code or data associated with the code -// right after a call instruction. As an example this can be used to -// generate safepoint data after calls for crankshaft. -class PostCallGenerator { - public: - PostCallGenerator() { } - virtual ~PostCallGenerator() { } - virtual void Generate() = 0; -}; - - // ----------------------------------------------------------------------------- // Static helper functions. diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc index 143b83936..3ec5f449d 100644 --- a/src/arm/simulator-arm.cc +++ b/src/arm/simulator-arm.cc @@ -74,7 +74,6 @@ class Debugger { Simulator* sim_; int32_t GetRegisterValue(int regnum); - double GetVFPDoubleRegisterValue(int regnum); bool GetValue(const char* desc, int32_t* value); bool GetVFPSingleValue(const char* desc, float* value); bool GetVFPDoubleValue(const char* desc, double* value); @@ -169,11 +168,6 @@ int32_t Debugger::GetRegisterValue(int regnum) { } -double Debugger::GetVFPDoubleRegisterValue(int regnum) { - return sim_->get_double_from_d_register(regnum); -} - - bool Debugger::GetValue(const char* desc, int32_t* value) { int regnum = Registers::Number(desc); if (regnum != kNoRegister) { @@ -315,11 +309,6 @@ void Debugger::Debug() { value = GetRegisterValue(i); PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value); } - for (int i = 0; i < kNumVFPDoubleRegisters; i++) { - dvalue = GetVFPDoubleRegisterValue(i); - PrintF("%3s: %f\n", - VFPRegisters::Name(i, true), dvalue); - } } else { if (GetValue(arg1, &value)) { PrintF("%s: 0x%08x %d \n", arg1, value, value); @@ -848,11 +837,6 @@ void Simulator::set_pc(int32_t value) { } -bool Simulator::has_bad_pc() const { - return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc)); -} - - // Raw access to the PC register without the special adjustment when reading. int32_t Simulator::get_pc() const { return registers_[pc]; @@ -1526,8 +1510,7 @@ void Simulator::HandleRList(Instr* instr, bool load) { typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1, int32_t arg2, - int32_t arg3, - int32_t arg4); + int32_t arg3); typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, int32_t arg1, int32_t arg2, @@ -1550,8 +1533,6 @@ void Simulator::SoftwareInterrupt(Instr* instr) { int32_t arg1 = get_register(r1); int32_t arg2 = get_register(r2); int32_t arg3 = get_register(r3); - int32_t* stack_pointer = reinterpret_cast(get_register(sp)); - int32_t arg4 = *stack_pointer; // This is dodgy but it works because the C entry stubs are never moved. // See comment in codegen-arm.cc and bug 1242173. int32_t saved_lr = get_register(lr); @@ -1580,20 +1561,19 @@ void Simulator::SoftwareInterrupt(Instr* instr) { reinterpret_cast(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF( - "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc", + "Call to host function at %p with args %08x, %08x, %08x, %08x", FUNCTION_ADDR(target), arg0, arg1, arg2, - arg3, - arg4); + arg3); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } PrintF("\n"); } CHECK(stack_aligned); - int64_t result = target(arg0, arg1, arg2, arg3, arg4); + int64_t result = target(arg0, arg1, arg2, arg3); int32_t lo_res = static_cast(result); int32_t hi_res = static_cast(result >> 32); if (::v8::internal::FLAG_trace_sim) { @@ -1928,12 +1908,9 @@ void Simulator::DecodeType01(Instr* instr) { set_register(lr, old_pc + Instr::kInstrSize); break; } - case BKPT: { - Debugger dbg(this); - PrintF("Simulator hit BKPT.\n"); - dbg.Debug(); + case BKPT: + v8::internal::OS::DebugBreak(); break; - } default: UNIMPLEMENTED(); } diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h index 7bfe76ac3..c37b3f715 100644 --- a/src/arm/simulator-arm.h +++ b/src/arm/simulator-arm.h @@ -186,10 +186,6 @@ class Simulator { // ICache checking. static void FlushICache(void* start, size_t size); - // Returns true if pc register contains one of the 'special_values' defined - // below (bad_lr, end_sim_pc). - bool has_bad_pc() const; - private: enum special_values { // Known bad pc value to ensure that the simulator does not execute diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 74ffd3b6f..0a5eac27f 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -874,34 +874,6 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell( return cell; } -// Calls GenerateCheckPropertyCell for each global object in the prototype chain -// from object to (but not including) holder. -MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells( - MacroAssembler* masm, - JSObject* object, - JSObject* holder, - String* name, - Register scratch, - Label* miss) { - JSObject* current = object; - while (current != holder) { - if (current->IsGlobalObject()) { - // Returns a cell or a failure. - MaybeObject* result = GenerateCheckPropertyCell( - masm, - GlobalObject::cast(current), - name, - scratch, - miss); - if (result->IsFailure()) return result; - } - ASSERT(current->IsJSObject()); - current = JSObject::cast(current->GetPrototype()); - } - return NULL; -} - - #undef __ #define __ ACCESS_MASM(masm()) @@ -939,19 +911,18 @@ Register StubCompiler::CheckPrototypes(JSObject* object, // checks are allowed in stubs. ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); - ASSERT(current->GetPrototype()->IsJSObject()); JSObject* prototype = JSObject::cast(current->GetPrototype()); if (!current->HasFastProperties() && !current->IsJSGlobalObject() && !current->IsJSGlobalProxy()) { if (!name->IsSymbol()) { - MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name); - Object* lookup_result = NULL; // Initialization to please compiler. - if (!maybe_lookup_result->ToObject(&lookup_result)) { - set_failure(Failure::cast(maybe_lookup_result)); + MaybeObject* lookup_result = Heap::LookupSymbol(name); + if (lookup_result->IsFailure()) { + set_failure(Failure::cast(lookup_result)); return reg; + } else { + name = String::cast(lookup_result->ToObjectUnchecked()); } - name = String::cast(lookup_result); } ASSERT(current->property_dictionary()->FindEntry(name) == StringDictionary::kNotFound); @@ -965,7 +936,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // from now the object is in holder_reg __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); - } else if (Heap::InNewSpace(prototype)) { + } else { // Get the map of the current object. __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); __ cmp(scratch1, Operand(Handle(current->map()))); @@ -985,24 +956,14 @@ Register StubCompiler::CheckPrototypes(JSObject* object, } reg = holder_reg; // from now the object is in holder_reg - // The prototype is in new space; we cannot store a reference - // to it in the code. Load it from the map. - __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); - } else { - // Check the map of the current object. - __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ cmp(scratch1, Operand(Handle(current->map()))); - // Branch on the result of the map check. - __ b(ne, miss); - // Check access rights to the global object. This has to happen - // after the map check so that we know that the object is - // actually a global object. - if (current->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(reg, scratch1, miss); + if (Heap::InNewSpace(prototype)) { + // The prototype is in new space; we cannot store a reference + // to it in the code. Load it from the map. + __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + } else { + // The prototype is in old space; load it directly. + __ mov(reg, Operand(Handle(prototype))); } - // The prototype is in old space; load it directly. - reg = holder_reg; // from now the object is in holder_reg - __ mov(reg, Operand(Handle(prototype))); } if (save_at_depth == depth) { @@ -1021,22 +982,32 @@ Register StubCompiler::CheckPrototypes(JSObject* object, // Log the check depth. LOG(IntEvent("check-maps-depth", depth + 1)); - // Perform security check for access to the global object. - ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); - if (holder->IsJSGlobalProxy()) { + // Perform security check for access to the global object and return + // the holder register. + ASSERT(current == holder); + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + if (current->IsJSGlobalProxy()) { __ CheckAccessGlobalProxy(reg, scratch1, miss); - }; + } // If we've skipped any global objects, it's not enough to verify // that their maps haven't changed. We also need to check that the // property cell for the property is still empty. - MaybeObject* result = GenerateCheckPropertyCells(masm(), - object, - holder, - name, - scratch1, - miss); - if (result->IsFailure()) set_failure(Failure::cast(result)); + current = object; + while (current != holder) { + if (current->IsGlobalObject()) { + MaybeObject* cell = GenerateCheckPropertyCell(masm(), + GlobalObject::cast(current), + name, + scratch1, + miss); + if (cell->IsFailure()) { + set_failure(Failure::cast(cell)); + return reg; + } + } + current = JSObject::cast(current->GetPrototype()); + } // Return the register containing the holder. return reg; @@ -1681,7 +1652,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall( __ Drop(argc + 1); __ Ret(); - StubRuntimeCallHelper call_helper; + ICRuntimeCallHelper call_helper; char_code_at_generator.GenerateSlow(masm(), call_helper); __ bind(&index_out_of_range); @@ -1758,7 +1729,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall( __ Drop(argc + 1); __ Ret(); - StubRuntimeCallHelper call_helper; + ICRuntimeCallHelper call_helper; char_at_generator.GenerateSlow(masm(), call_helper); __ bind(&index_out_of_range); @@ -1833,7 +1804,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall( __ Drop(argc + 1); __ Ret(); - StubRuntimeCallHelper call_helper; + ICRuntimeCallHelper call_helper; char_from_code_generator.GenerateSlow(masm(), call_helper); // Tail call the full function. We do not have to patch the receiver @@ -2359,16 +2330,8 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object, ASSERT(function->is_compiled()); Handle code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); - if (V8::UseCrankshaft()) { - // TODO(kasperl): For now, we always call indirectly through the - // code field in the function to allow recompilation to take effect - // without changing any of the call sites. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); - __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION); - } else { - __ InvokeCode(code, expected, arguments(), - RelocInfo::CODE_TARGET, JUMP_FUNCTION); - } + __ InvokeCode(code, expected, arguments(), + RelocInfo::CODE_TARGET, JUMP_FUNCTION); // Handle call cache miss. __ bind(&miss); @@ -2901,62 +2864,13 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - // Check that the receiver isn't a smi. - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &miss); - - // Check that the map matches. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ cmp(r2, Operand(Handle(receiver->map()))); - __ b(ne, &miss); - - // Check that the key is a smi. - __ tst(r0, Operand(kSmiTagMask)); - __ b(ne, &miss); - - // Get the elements array. - __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ AssertFastElements(r2); - - // Check that the key is within bounds. - __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); - __ cmp(r0, Operand(r3)); - __ b(hs, &miss); - - // Load the result and make sure it's not the hole. - __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ ldr(r4, - MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r4, ip); - __ b(eq, &miss); - __ mov(r0, r4); - __ Ret(); - - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(NORMAL, NULL); -} - - MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, int index, Map* transition, String* name) { // ----------- S t a t e ------------- // -- r0 : value - // -- r1 : name + // -- r1 : key // -- r2 : receiver // -- lr : return address // ----------------------------------- @@ -2988,76 +2902,6 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, } -MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( - JSObject* receiver) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : scratch - // -- r4 : scratch (elements) - // ----------------------------------- - Label miss; - - Register value_reg = r0; - Register key_reg = r1; - Register receiver_reg = r2; - Register scratch = r3; - Register elements_reg = r4; - - // Check that the receiver isn't a smi. - __ tst(receiver_reg, Operand(kSmiTagMask)); - __ b(eq, &miss); - - // Check that the map matches. - __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); - __ cmp(scratch, Operand(Handle(receiver->map()))); - __ b(ne, &miss); - - // Check that the key is a smi. - __ tst(key_reg, Operand(kSmiTagMask)); - __ b(ne, &miss); - - // Get the elements array and make sure it is a fast element array, not 'cow'. - __ ldr(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset)); - __ cmp(scratch, Operand(Handle(Factory::fixed_array_map()))); - __ b(ne, &miss); - - // Check that the key is within bounds. - if (receiver->IsJSArray()) { - __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); - } else { - __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); - } - // Compare smis. - __ cmp(key_reg, scratch); - __ b(hs, &miss); - - __ add(scratch, - elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ str(value_reg, - MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ RecordWrite(scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), - receiver_reg , elements_reg); - - // value_reg (r0) is preserved. - // Done. - __ Ret(); - - __ bind(&miss); - Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(NORMAL, NULL); -} - - MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // ----------- S t a t e ------------- // -- r0 : argc diff --git a/src/assembler.cc b/src/assembler.cc index d71a35a4a..101eece8c 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -35,12 +35,10 @@ #include "v8.h" #include "arguments.h" -#include "deoptimizer.h" #include "execution.h" #include "ic-inl.h" #include "factory.h" #include "runtime.h" -#include "runtime-profiler.h" #include "serialize.h" #include "stub-cache.h" #include "regexp-stack.h" @@ -64,10 +62,6 @@ namespace v8 { namespace internal { -const double DoubleConstant::min_int = kMinInt; -const double DoubleConstant::one_half = 0.5; - - // ----------------------------------------------------------------------------- // Implementation of Label @@ -216,7 +210,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { #endif Counters::reloc_info_count.Increment(); ASSERT(rinfo->pc() - last_pc_ >= 0); - ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes); + ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes); // Use unsigned delta-encoding for pc. uint32_t pc_delta = static_cast(rinfo->pc() - last_pc_); RelocInfo::Mode rmode = rinfo->rmode(); @@ -392,7 +386,7 @@ void RelocIterator::next() { RelocIterator::RelocIterator(Code* code, int mode_mask) { rinfo_.pc_ = code->instruction_start(); rinfo_.data_ = 0; - // Relocation info is read backwards. + // relocation info is read backwards pos_ = code->relocation_start() + code->relocation_size(); end_ = code->relocation_start(); done_ = false; @@ -405,7 +399,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) { RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { rinfo_.pc_ = desc.buffer; rinfo_.data_ = 0; - // Relocation info is read backwards. + // relocation info is read backwards pos_ = desc.buffer + desc.buffer_size; end_ = pos_ - desc.reloc_size; done_ = false; @@ -437,8 +431,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { return "debug break"; case RelocInfo::CODE_TARGET: return "code target"; - case RelocInfo::GLOBAL_PROPERTY_CELL: - return "global property cell"; case RelocInfo::RUNTIME_ENTRY: return "runtime entry"; case RelocInfo::JS_RETURN: @@ -484,13 +476,6 @@ void RelocInfo::Print() { PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address()); } else if (IsPosition(rmode_)) { PrintF(" (%" V8_PTR_PREFIX "d)", data()); - } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) { - // Depotimization bailouts are stored as runtime entries. - int id = Deoptimizer::GetDeoptimizationId( - target_address(), Deoptimizer::EAGER); - if (id != Deoptimizer::kNotDeoptimizationEntry) { - PrintF(" (deoptimization bailout %d)", id); - } } PrintF("\n"); @@ -504,9 +489,6 @@ void RelocInfo::Verify() { case EMBEDDED_OBJECT: Object::VerifyPointer(target_object()); break; - case GLOBAL_PROPERTY_CELL: - Object::VerifyPointer(target_cell()); - break; case DEBUG_BREAK: #ifndef ENABLE_DEBUGGER_SUPPORT UNREACHABLE(); @@ -613,23 +595,6 @@ ExternalReference ExternalReference::transcendental_cache_array_address() { } -ExternalReference ExternalReference::new_deoptimizer_function() { - return ExternalReference( - Redirect(FUNCTION_ADDR(Deoptimizer::New))); -} - - -ExternalReference ExternalReference::compute_output_frames_function() { - return ExternalReference( - Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames))); -} - - -ExternalReference ExternalReference::global_contexts_list() { - return ExternalReference(Heap::global_contexts_list_address()); -} - - ExternalReference ExternalReference::keyed_lookup_cache_keys() { return ExternalReference(KeyedLookupCache::keys_address()); } @@ -710,18 +675,6 @@ ExternalReference ExternalReference::scheduled_exception_address() { } -ExternalReference ExternalReference::address_of_min_int() { - return ExternalReference(reinterpret_cast( - const_cast(&DoubleConstant::min_int))); -} - - -ExternalReference ExternalReference::address_of_one_half() { - return ExternalReference(reinterpret_cast( - const_cast(&DoubleConstant::one_half))); -} - - #ifndef V8_INTERPRETED_REGEXP ExternalReference ExternalReference::re_check_stack_guard_state() { diff --git a/src/assembler.cc.rej b/src/assembler.cc.rej deleted file mode 100644 index fbc576bd9..000000000 --- a/src/assembler.cc.rej +++ /dev/null @@ -1,30 +0,0 @@ ---- src/assembler.cc (revision 757) -+++ src/assembler.cc (working copy) -@@ -392,12 +392,16 @@ - RelocIterator::RelocIterator(Code* code, int mode_mask) { - rinfo_.pc_ = code->instruction_start(); - rinfo_.data_ = 0; -- // relocation info is read backwards -+ // Relocation info is read backwards. - pos_ = code->relocation_start() + code->relocation_size(); - end_ = code->relocation_start(); - done_ = false; - mode_mask_ = mode_mask; -- if (mode_mask_ == 0) pos_ = end_; -+ // Skip all relocation information if the mask is zero or if the -+ // code has been deoptimized and thereby destructively patched. -+ if (mode_mask_ == 0 || code->kind() == Code::DEOPTIMIZED_FUNCTION) { -+ pos_ = end_; -+ } - next(); - } - -@@ -405,7 +409,7 @@ - RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { - rinfo_.pc_ = desc.buffer; - rinfo_.data_ = 0; -- // relocation info is read backwards -+ // Relocation info is read backwards. - pos_ = desc.buffer + desc.buffer_size; - end_ = pos_ - desc.reloc_size; - done_ = false; diff --git a/src/assembler.h b/src/assembler.h index 82c9fc24c..da4ab2135 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -38,21 +38,12 @@ #include "runtime.h" #include "top.h" #include "token.h" +#include "objects.h" namespace v8 { namespace internal { -// ----------------------------------------------------------------------------- -// Common double constants. - -class DoubleConstant: public AllStatic { - public: - static const double min_int; - static const double one_half; -}; - - // ----------------------------------------------------------------------------- // Labels represent pc locations; they are typically jump or call targets. // After declaration, a label can be freely used to denote known or (yet) @@ -183,8 +174,6 @@ class RelocInfo BASE_EMBEDDED { CODE_TARGET, // Code target which is not any of the above. EMBEDDED_OBJECT, - GLOBAL_PROPERTY_CELL, - // Everything after runtime_entry (inclusive) is not GC'ed. RUNTIME_ENTRY, JS_RETURN, // Marks start of the ExitJSFrame code. @@ -265,10 +254,6 @@ class RelocInfo BASE_EMBEDDED { INLINE(Handle target_object_handle(Assembler* origin)); INLINE(Object** target_object_address()); INLINE(void set_target_object(Object* target)); - INLINE(JSGlobalPropertyCell* target_cell()); - INLINE(Handle target_cell_handle()); - INLINE(void set_target_cell(JSGlobalPropertyCell* cell)); - // Read the address of the word containing the target_address in an // instruction stream. What this means exactly is architecture-independent. @@ -499,11 +484,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference transcendental_cache_array_address(); static ExternalReference delete_handle_scope_extensions(); - // Deoptimization support. - static ExternalReference new_deoptimizer_function(); - static ExternalReference compute_output_frames_function(); - static ExternalReference global_contexts_list(); - // Static data in the keyed lookup cache. static ExternalReference keyed_lookup_cache_keys(); static ExternalReference keyed_lookup_cache_field_offsets(); @@ -546,10 +526,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference scheduled_exception_address(); - // Static variables containing common double constants. - static ExternalReference address_of_min_int(); - static ExternalReference address_of_one_half(); - Address address() const {return reinterpret_cast
(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/src/ast-inl.h b/src/ast-inl.h index e88156d6e..f0a25c171 100644 --- a/src/ast-inl.h +++ b/src/ast-inl.h @@ -25,17 +25,18 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifndef V8_AST_INL_H_ -#define V8_AST_INL_H_ - #include "v8.h" #include "ast.h" -#include "jump-target-inl.h" namespace v8 { namespace internal { +BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type) + : labels_(labels), type_(type) { + ASSERT(labels == NULL || labels->length() > 0); +} + SwitchStatement::SwitchStatement(ZoneStringList* labels) : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), @@ -43,42 +44,17 @@ SwitchStatement::SwitchStatement(ZoneStringList* labels) } -Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block) - : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY), - statements_(capacity), - is_initializer_block_(is_initializer_block) { -} - - -BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type) - : labels_(labels), - type_(type), - entry_id_(GetNextId()), - exit_id_(GetNextId()) { - ASSERT(labels == NULL || labels->length() > 0); -} - - IterationStatement::IterationStatement(ZoneStringList* labels) : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL), - continue_target_(JumpTarget::BIDIRECTIONAL), - osr_entry_id_(GetNextId()) { + continue_target_(JumpTarget::BIDIRECTIONAL) { } -DoWhileStatement::DoWhileStatement(ZoneStringList* labels) - : IterationStatement(labels), - cond_(NULL), - condition_position_(-1), - next_id_(GetNextId()) { -} - - -WhileStatement::WhileStatement(ZoneStringList* labels) - : IterationStatement(labels), - cond_(NULL), - may_have_function_literal_(true) { +Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block) + : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY), + statements_(capacity), + is_initializer_block_(is_initializer_block) { } @@ -88,8 +64,7 @@ ForStatement::ForStatement(ZoneStringList* labels) cond_(NULL), next_(NULL), may_have_function_literal_(true), - loop_variable_(NULL), - next_id_(GetNextId()) { + loop_variable_(NULL) { } @@ -98,6 +73,8 @@ ForInStatement::ForInStatement(ZoneStringList* labels) } -} } // namespace v8::internal +DoWhileStatement::DoWhileStatement(ZoneStringList* labels) + : IterationStatement(labels), cond_(NULL), condition_position_(-1) { +} -#endif // V8_AST_INL_H_ +} } // namespace v8::internal diff --git a/src/ast.cc b/src/ast.cc index c1ea0a8b3..bb445c4d2 100644 --- a/src/ast.cc +++ b/src/ast.cc @@ -28,16 +28,16 @@ #include "v8.h" #include "ast.h" -#include "jump-target-inl.h" #include "parser.h" #include "scopes.h" #include "string-stream.h" +#include "ast-inl.h" +#include "jump-target-inl.h" namespace v8 { namespace internal { -unsigned AstNode::current_id_ = 0; -unsigned AstNode::count_ = 0; + VariableProxySentinel VariableProxySentinel::this_proxy_(true); VariableProxySentinel VariableProxySentinel::identifier_proxy_(false); ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_; @@ -48,8 +48,6 @@ Call Call::sentinel_(NULL, NULL, 0); // ---------------------------------------------------------------------------- // All the Accept member functions for each syntax tree node type. -void Slot::Accept(AstVisitor* v) { v->VisitSlot(this); } - #define DECL_ACCEPT(type) \ void type::Accept(AstVisitor* v) { v->Visit##type(this); } AST_NODE_LIST(DECL_ACCEPT) @@ -117,29 +115,6 @@ void VariableProxy::BindTo(Variable* var) { } -Assignment::Assignment(Token::Value op, - Expression* target, - Expression* value, - int pos) - : op_(op), - target_(target), - value_(value), - pos_(pos), - compound_bailout_id_(kNoNumber), - block_start_(false), - block_end_(false), - is_monomorphic_(false), - receiver_types_(NULL) { - ASSERT(Token::IsAssignmentOp(op)); - binary_operation_ = is_compound() - ? new BinaryOperation(binary_op(), target, value, pos + 1) - : NULL; - if (is_compound()) { - compound_bailout_id_ = GetNextId(); - } -} - - Token::Value Assignment::binary_op() const { switch (op_) { case Token::ASSIGN_BIT_OR: return Token::BIT_OR; @@ -164,12 +139,6 @@ bool FunctionLiteral::AllowsLazyCompilation() { } -bool FunctionLiteral::AllowOptimize() { - // We can't deal with heap-allocated locals. - return scope()->num_heap_slots() == 0; -} - - ObjectLiteral::Property::Property(Literal* key, Expression* value) { emit_store_ = true; key_ = key; @@ -403,265 +372,6 @@ BinaryOperation::BinaryOperation(Assignment* assignment) { } -// ---------------------------------------------------------------------------- -// Inlining support - -bool Block::IsInlineable() const { - const int count = statements_.length(); - for (int i = 0; i < count; ++i) { - if (!statements_[i]->IsInlineable()) return false; - } - return true; -} - - -bool ExpressionStatement::IsInlineable() const { - return expression()->IsInlineable(); -} - - -bool IfStatement::IsInlineable() const { - return condition()->IsInlineable() && then_statement()->IsInlineable() && - else_statement()->IsInlineable(); -} - - -bool ReturnStatement::IsInlineable() const { - return expression()->IsInlineable(); -} - - -bool Conditional::IsInlineable() const { - return condition()->IsInlineable() && then_expression()->IsInlineable() && - else_expression()->IsInlineable(); -} - - -bool VariableProxy::IsInlineable() const { - return var()->is_global() || var()->IsStackAllocated(); -} - - -bool Assignment::IsInlineable() const { - return target()->IsInlineable() && value()->IsInlineable(); -} - - -bool Property::IsInlineable() const { - return obj()->IsInlineable() && key()->IsInlineable(); -} - - -bool Call::IsInlineable() const { - if (!expression()->IsInlineable()) return false; - const int count = arguments()->length(); - for (int i = 0; i < count; ++i) { - if (!arguments()->at(i)->IsInlineable()) return false; - } - return true; -} - - -bool CallNew::IsInlineable() const { - if (!expression()->IsInlineable()) return false; - const int count = arguments()->length(); - for (int i = 0; i < count; ++i) { - if (!arguments()->at(i)->IsInlineable()) return false; - } - return true; -} - - -bool CallRuntime::IsInlineable() const { - const int count = arguments()->length(); - for (int i = 0; i < count; ++i) { - if (!arguments()->at(i)->IsInlineable()) return false; - } - return true; -} - - -bool UnaryOperation::IsInlineable() const { - return expression()->IsInlineable(); -} - - -bool BinaryOperation::IsInlineable() const { - return left()->IsInlineable() && right()->IsInlineable(); -} - - -bool CompareOperation::IsInlineable() const { - return left()->IsInlineable() && right()->IsInlineable(); -} - - -bool CompareToNull::IsInlineable() const { - return expression()->IsInlineable(); -} - - -bool CountOperation::IsInlineable() const { - return expression()->IsInlineable(); -} - - -// ---------------------------------------------------------------------------- -// Recording of type feedback - -void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - // Record type feedback from the oracle in the AST. - is_monomorphic_ = oracle->LoadIsMonomorphic(this); - if (key()->IsPropertyName()) { - if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) { - is_array_length_ = true; - } else { - Literal* lit_key = key()->AsLiteral(); - ASSERT(lit_key != NULL && lit_key->handle()->IsString()); - Handle name = Handle::cast(lit_key->handle()); - ZoneMapList* types = oracle->LoadReceiverTypes(this, name); - receiver_types_ = types; - } - } else if (is_monomorphic_) { - monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this); - } -} - - -void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - Property* prop = target()->AsProperty(); - ASSERT(prop != NULL); - is_monomorphic_ = oracle->StoreIsMonomorphic(this); - if (prop->key()->IsPropertyName()) { - Literal* lit_key = prop->key()->AsLiteral(); - ASSERT(lit_key != NULL && lit_key->handle()->IsString()); - Handle name = Handle::cast(lit_key->handle()); - ZoneMapList* types = oracle->StoreReceiverTypes(this, name); - receiver_types_ = types; - } else if (is_monomorphic_) { - // Record receiver type for monomorphic keyed loads. - monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this); - } -} - - -void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - TypeInfo info = oracle->SwitchType(this); - if (info.IsSmi()) { - compare_type_ = SMI_ONLY; - } else if (info.IsNonPrimitive()) { - compare_type_ = OBJECT_ONLY; - } else { - ASSERT(compare_type_ == NONE); - } -} - - -static bool CallWithoutIC(Handle target, int arity) { - if (target->NeedsArgumentsAdaption()) { - // If the number of formal parameters of the target function - // does not match the number of arguments we're passing, we - // don't want to deal with it. - return target->shared()->formal_parameter_count() == arity; - } else { - // If the target doesn't need arguments adaption, we can call - // it directly, but we avoid to do so if it has a custom call - // generator, because that is likely to generate better code. - return !target->shared()->HasCustomCallGenerator(); - } -} - - -bool Call::ComputeTarget(Handle type, Handle name) { - holder_ = Handle::null(); - while (true) { - LookupResult lookup; - type->LookupInDescriptors(NULL, *name, &lookup); - // If the function wasn't found directly in the map, we start - // looking upwards through the prototype chain. - if (!lookup.IsFound() && type->prototype()->IsJSObject()) { - holder_ = Handle(JSObject::cast(type->prototype())); - type = Handle(holder()->map()); - } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) { - target_ = Handle(lookup.GetConstantFunctionFromMap(*type)); - return CallWithoutIC(target_, arguments()->length()); - } else { - return false; - } - } -} - - -bool Call::ComputeGlobalTarget(Handle global, - Handle name) { - target_ = Handle::null(); - cell_ = Handle::null(); - LookupResult lookup; - global->Lookup(*name, &lookup); - if (lookup.IsProperty() && lookup.type() == NORMAL) { - cell_ = Handle(global->GetPropertyCell(&lookup)); - if (cell_->value()->IsJSFunction()) { - Handle candidate(JSFunction::cast(cell_->value())); - // If the function is in new space we assume it's more likely to - // change and thus prefer the general IC code. - if (!Heap::InNewSpace(*candidate) - && CallWithoutIC(candidate, arguments()->length())) { - target_ = candidate; - return true; - } - } - } - return false; -} - - -void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - Property* property = expression()->AsProperty(); - ASSERT(property != NULL); - // Specialize for the receiver types seen at runtime. - Literal* key = property->key()->AsLiteral(); - ASSERT(key != NULL && key->handle()->IsString()); - Handle name = Handle::cast(key->handle()); - receiver_types_ = oracle->CallReceiverTypes(this, name); -#ifdef DEBUG - if (FLAG_enable_slow_asserts) { - if (receiver_types_ != NULL) { - int length = receiver_types_->length(); - for (int i = 0; i < length; i++) { - Handle map = receiver_types_->at(i); - ASSERT(!map.is_null() && *map != NULL); - } - } - } -#endif - if (receiver_types_ != NULL && receiver_types_->length() > 0) { - Handle type = receiver_types_->at(0); - is_monomorphic_ = oracle->CallIsMonomorphic(this); - if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name); - } -} - - -void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT); - TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT); - is_smi_only_ = left.IsSmi() && right.IsSmi(); -} - - -void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT); - TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT); - if (left.IsSmi() && right.IsSmi()) { - compare_type_ = SMI_ONLY; - } else if (left.IsNonPrimitive() && right.IsNonPrimitive()) { - compare_type_ = OBJECT_ONLY; - } else { - ASSERT(compare_type_ == NONE); - } -} - - // ---------------------------------------------------------------------------- // Implementation of AstVisitor @@ -1032,12 +742,15 @@ RegExpAlternative::RegExpAlternative(ZoneList* nodes) } -CaseClause::CaseClause(Expression* label, - ZoneList* statements, - int pos) - : label_(label), - statements_(statements), - position_(pos), - compare_type_(NONE) {} +WhileStatement::WhileStatement(ZoneStringList* labels) + : IterationStatement(labels), + cond_(NULL), + may_have_function_literal_(true) { +} + + +CaseClause::CaseClause(Expression* label, ZoneList* statements) + : label_(label), statements_(statements) { +} } } // namespace v8::internal diff --git a/src/ast.h b/src/ast.h index cdf456f67..0846dbc53 100644 --- a/src/ast.h +++ b/src/ast.h @@ -75,6 +75,7 @@ namespace internal { V(FunctionLiteral) \ V(SharedFunctionInfoLiteral) \ V(Conditional) \ + V(Slot) \ V(VariableProxy) \ V(Literal) \ V(RegExpLiteral) \ @@ -101,11 +102,10 @@ namespace internal { EXPRESSION_NODE_LIST(V) // Forward declarations -class BitVector; -class DefinitionInfo; -class MaterializedLiteral; class TargetCollector; -class TypeFeedbackOracle; +class MaterializedLiteral; +class DefinitionInfo; +class BitVector; #define DEF_FORWARD_DECLARATION(type) class type; AST_NODE_LIST(DEF_FORWARD_DECLARATION) @@ -133,10 +133,6 @@ class AstNode: public ZoneObject { }; #undef DECLARE_TYPE_ENUM - static const int kNoNumber = -1; - - AstNode() : id_(GetNextId()) { count_++; } - virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; @@ -154,27 +150,6 @@ class AstNode: public ZoneObject { virtual BreakableStatement* AsBreakableStatement() { return NULL; } virtual IterationStatement* AsIterationStatement() { return NULL; } virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; } - virtual Slot* AsSlot() { return NULL; } - - // True if the node is simple enough for us to inline calls containing it. - virtual bool IsInlineable() const { return false; } - - static int Count() { return count_; } - static void ResetIds() { current_id_ = 0; } - unsigned id() const { return id_; } - - protected: - static unsigned GetNextId() { return current_id_++; } - static unsigned ReserveIdRange(int n) { - unsigned tmp = current_id_; - current_id_ += n; - return tmp; - } - - private: - static unsigned current_id_; - static unsigned count_; - unsigned id_; }; @@ -199,18 +174,6 @@ class Statement: public AstNode { class Expression: public AstNode { public: - enum Context { - // Not assigned a context yet, or else will not be visited during - // code generation. - kUninitialized, - // Evaluated for its side effects. - kEffect, - // Evaluated for its value (and side effects). - kValue, - // Evaluated for control flow (and side effects). - kTest - }; - Expression() : bitfields_(0) {} virtual Expression* AsExpression() { return this; } @@ -218,10 +181,6 @@ class Expression: public AstNode { virtual bool IsTrivial() { return false; } virtual bool IsValidLeftHandSide() { return false; } - // Helpers for ToBoolean conversion. - virtual bool ToBooleanIsTrue() { return false; } - virtual bool ToBooleanIsFalse() { return false; } - // Symbols that cannot be parsed as array indices are considered property // names. We do not treat symbols that can be array indexes as property // names because [] for string objects is handled only by keyed ICs. @@ -239,24 +198,6 @@ class Expression: public AstNode { // True iff the expression is a literal represented as a smi. virtual bool IsSmiLiteral() { return false; } - // Type feedback information for assignments and properties. - virtual bool IsMonomorphic() { - UNREACHABLE(); - return false; - } - virtual bool IsArrayLength() { - UNREACHABLE(); - return false; - } - virtual ZoneMapList* GetReceiverTypes() { - UNREACHABLE(); - return NULL; - } - virtual Handle GetMonomorphicReceiverType() { - UNREACHABLE(); - return Handle(); - } - // Static type information for this expression. StaticType* type() { return &type_; } @@ -360,10 +301,6 @@ class BreakableStatement: public Statement { // Testers. bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; } - // Bailout support. - int EntryId() const { return entry_id_; } - int ExitId() const { return exit_id_; } - protected: inline BreakableStatement(ZoneStringList* labels, Type type); @@ -371,8 +308,6 @@ class BreakableStatement: public Statement { ZoneStringList* labels_; Type type_; BreakTarget break_target_; - int entry_id_; - int exit_id_; }; @@ -392,8 +327,6 @@ class Block: public BreakableStatement { return statements_[0]->StatementAsCountOperation(); } - virtual bool IsInlineable() const; - void AddStatement(Statement* statement) { statements_.Add(statement); } ZoneList* statements() { return &statements_; } @@ -437,10 +370,6 @@ class IterationStatement: public BreakableStatement { Statement* body() const { return body_; } void set_body(Statement* stmt) { body_ = stmt; } - // Bailout support. - int OsrEntryId() const { return osr_entry_id_; } - virtual int ContinueId() const = 0; - // Code generation BreakTarget* continue_target() { return &continue_target_; } @@ -454,7 +383,6 @@ class IterationStatement: public BreakableStatement { private: Statement* body_; BreakTarget continue_target_; - int osr_entry_id_; }; @@ -476,19 +404,15 @@ class DoWhileStatement: public IterationStatement { int condition_position() { return condition_position_; } void set_condition_position(int pos) { condition_position_ = pos; } - // Bailout support. - virtual int ContinueId() const { return next_id_; } - private: Expression* cond_; int condition_position_; - int next_id_; }; class WhileStatement: public IterationStatement { public: - explicit inline WhileStatement(ZoneStringList* labels); + explicit WhileStatement(ZoneStringList* labels); DECLARE_NODE_TYPE(WhileStatement) @@ -505,9 +429,6 @@ class WhileStatement: public IterationStatement { may_have_function_literal_ = value; } - // Bailout support. - virtual int ContinueId() const { return EntryId(); } - private: Expression* cond_; // True if there is a function literal subexpression in the condition. @@ -545,9 +466,6 @@ class ForStatement: public IterationStatement { may_have_function_literal_ = value; } - // Bailout support. - virtual int ContinueId() const { return next_id_; } - bool is_fast_smi_loop() { return loop_variable_ != NULL; } Variable* loop_variable() { return loop_variable_; } void set_loop_variable(Variable* var) { loop_variable_ = var; } @@ -559,7 +477,6 @@ class ForStatement: public IterationStatement { // True if there is a function literal subexpression in the condition. bool may_have_function_literal_; Variable* loop_variable_; - int next_id_; }; @@ -578,9 +495,6 @@ class ForInStatement: public IterationStatement { Expression* each() const { return each_; } Expression* enumerable() const { return enumerable_; } - // Bailout support. - virtual int ContinueId() const { return EntryId(); } - private: Expression* each_; Expression* enumerable_; @@ -594,13 +508,11 @@ class ExpressionStatement: public Statement { DECLARE_NODE_TYPE(ExpressionStatement) - virtual bool IsInlineable() const; - virtual Assignment* StatementAsSimpleAssignment(); virtual CountOperation* StatementAsCountOperation(); void set_expression(Expression* e) { expression_ = e; } - Expression* expression() const { return expression_; } + Expression* expression() { return expression_; } private: Expression* expression_; @@ -642,8 +554,7 @@ class ReturnStatement: public Statement { DECLARE_NODE_TYPE(ReturnStatement) - Expression* expression() const { return expression_; } - virtual bool IsInlineable() const; + Expression* expression() { return expression_; } private: Expression* expression_; @@ -677,7 +588,7 @@ class WithExitStatement: public Statement { class CaseClause: public ZoneObject { public: - CaseClause(Expression* label, ZoneList* statements, int pos); + CaseClause(Expression* label, ZoneList* statements); bool is_default() const { return label_ == NULL; } Expression* label() const { @@ -687,21 +598,10 @@ class CaseClause: public ZoneObject { JumpTarget* body_target() { return &body_target_; } ZoneList* statements() const { return statements_; } - int position() { return position_; } - void set_position(int pos) { position_ = pos; } - - // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - bool IsSmiCompare() { return compare_type_ == SMI_ONLY; } - bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; } - private: Expression* label_; JumpTarget body_target_; ZoneList* statements_; - int position_; - enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY }; - CompareTypeFeedback compare_type_; }; @@ -741,8 +641,6 @@ class IfStatement: public Statement { DECLARE_NODE_TYPE(IfStatement) - virtual bool IsInlineable() const; - bool HasThenStatement() const { return !then_statement()->IsEmpty(); } bool HasElseStatement() const { return !else_statement()->IsEmpty(); } @@ -846,8 +744,6 @@ class DebuggerStatement: public Statement { class EmptyStatement: public Statement { public: DECLARE_NODE_TYPE(EmptyStatement) - - virtual bool IsInlineable() const { return true; } }; @@ -858,7 +754,6 @@ class Literal: public Expression { DECLARE_NODE_TYPE(Literal) virtual bool IsTrivial() { return true; } - virtual bool IsInlineable() const { return true; } virtual bool IsSmiLiteral() { return handle_->IsSmi(); } // Check if this literal is identical to the other literal. @@ -874,14 +769,6 @@ class Literal: public Expression { return false; } - Handle AsPropertyName() { - ASSERT(IsPropertyName()); - return Handle::cast(handle_); - } - - virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); } - virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); } - // Identity testers. bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); } @@ -1019,21 +906,16 @@ class ArrayLiteral: public MaterializedLiteral { int depth) : MaterializedLiteral(literal_index, is_simple, depth), constant_elements_(constant_elements), - values_(values), - first_element_id_(ReserveIdRange(values->length())) {} + values_(values) {} DECLARE_NODE_TYPE(ArrayLiteral) Handle constant_elements() const { return constant_elements_; } ZoneList* values() const { return values_; } - // Return an AST id for an element that is used in simulate instructions. - int GetIdForElement(int i) { return first_element_id_ + i; } - private: Handle constant_elements_; ZoneList* values_; - int first_element_id_; }; @@ -1085,8 +967,6 @@ class VariableProxy: public Expression { return is_this_ || is_trivial_; } - virtual bool IsInlineable() const; - bool IsVariable(Handle n) { return !is_this() && name().is_identical_to(n); } @@ -1164,9 +1044,7 @@ class Slot: public Expression { ASSERT(var != NULL); } - virtual void Accept(AstVisitor* v); - - virtual Slot* AsSlot() { return this; } + DECLARE_NODE_TYPE(Slot) bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; } @@ -1191,41 +1069,17 @@ class Property: public Expression { // of the resolved Reference. enum Type { NORMAL, SYNTHETIC }; Property(Expression* obj, Expression* key, int pos, Type type = NORMAL) - : obj_(obj), - key_(key), - pos_(pos), - type_(type), - is_monomorphic_(false), - receiver_types_(NULL), - is_array_length_(false), - is_arguments_access_(false) { } + : obj_(obj), key_(key), pos_(pos), type_(type) { } DECLARE_NODE_TYPE(Property) virtual bool IsValidLeftHandSide() { return true; } - virtual bool IsInlineable() const; Expression* obj() const { return obj_; } Expression* key() const { return key_; } int position() const { return pos_; } bool is_synthetic() const { return type_ == SYNTHETIC; } - // Marks that this is actually an argument rewritten to a keyed property - // accessing the argument through the arguments shadow object. - void set_is_arguments_access(bool is_arguments_access) { - is_arguments_access_ = is_arguments_access; - } - bool is_arguments_access() const { return is_arguments_access_; } - - // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - virtual bool IsMonomorphic() { return is_monomorphic_; } - virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; } - virtual bool IsArrayLength() { return is_array_length_; } - virtual Handle GetMonomorphicReceiverType() { - return monomorphic_receiver_type_; - } - // Returns a property singleton property access on 'this'. Used // during preparsing. static Property* this_property() { return &this_property_; } @@ -1236,12 +1090,6 @@ class Property: public Expression { int pos_; Type type_; - bool is_monomorphic_; - ZoneMapList* receiver_types_; - bool is_array_length_; - bool is_arguments_access_; - Handle monomorphic_receiver_type_; - // Dummy property used during preparsing. static Property this_property_; }; @@ -1250,55 +1098,21 @@ class Property: public Expression { class Call: public Expression { public: Call(Expression* expression, ZoneList* arguments, int pos) - : expression_(expression), - arguments_(arguments), - pos_(pos), - is_monomorphic_(false), - receiver_types_(NULL), - return_id_(GetNextId()) { - } + : expression_(expression), arguments_(arguments), pos_(pos) { } DECLARE_NODE_TYPE(Call) - virtual bool IsInlineable() const; - Expression* expression() const { return expression_; } ZoneList* arguments() const { return arguments_; } int position() { return pos_; } - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; } - virtual bool IsMonomorphic() { return is_monomorphic_; } - Handle target() { return target_; } - Handle holder() { return holder_; } - Handle cell() { return cell_; } - - bool ComputeTarget(Handle type, Handle name); - bool ComputeGlobalTarget(Handle global, Handle name); - - // Bailout support. - int ReturnId() const { return return_id_; } - static Call* sentinel() { return &sentinel_; } -#ifdef DEBUG - // Used to assert that the FullCodeGenerator records the return site. - bool return_is_recorded_; -#endif - private: Expression* expression_; ZoneList* arguments_; int pos_; - bool is_monomorphic_; - ZoneMapList* receiver_types_; - Handle target_; - Handle holder_; - Handle cell_; - - int return_id_; - static Call sentinel_; }; @@ -1310,8 +1124,6 @@ class CallNew: public Expression { DECLARE_NODE_TYPE(CallNew) - virtual bool IsInlineable() const; - Expression* expression() const { return expression_; } ZoneList* arguments() const { return arguments_; } int position() { return pos_; } @@ -1336,8 +1148,6 @@ class CallRuntime: public Expression { DECLARE_NODE_TYPE(CallRuntime) - virtual bool IsInlineable() const; - Handle name() const { return name_; } Runtime::Function* function() const { return function_; } ZoneList* arguments() const { return arguments_; } @@ -1359,8 +1169,6 @@ class UnaryOperation: public Expression { DECLARE_NODE_TYPE(UnaryOperation) - virtual bool IsInlineable() const; - virtual bool ResultOverwriteAllowed(); Token::Value op() const { return op_; } @@ -1378,7 +1186,7 @@ class BinaryOperation: public Expression { Expression* left, Expression* right, int pos) - : op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) { + : op_(op), left_(left), right_(right), pos_(pos) { ASSERT(Token::IsBinaryOp(op)); } @@ -1387,8 +1195,6 @@ class BinaryOperation: public Expression { DECLARE_NODE_TYPE(BinaryOperation) - virtual bool IsInlineable() const; - virtual bool ResultOverwriteAllowed(); Token::Value op() const { return op_; } @@ -1396,16 +1202,11 @@ class BinaryOperation: public Expression { Expression* right() const { return right_; } int position() const { return pos_; } - // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - bool IsSmiOnly() const { return is_smi_only_; } - private: Token::Value op_; Expression* left_; Expression* right_; int pos_; - bool is_smi_only_; }; @@ -1450,8 +1251,6 @@ class CountOperation: public Expression { virtual void MarkAsStatement() { is_prefix_ = true; } - virtual bool IsInlineable() const; - private: bool is_prefix_; IncrementOperation* increment_; @@ -1465,7 +1264,7 @@ class CompareOperation: public Expression { Expression* left, Expression* right, int pos) - : op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) { + : op_(op), left_(left), right_(right), pos_(pos) { ASSERT(Token::IsCompareOp(op)); } @@ -1476,21 +1275,11 @@ class CompareOperation: public Expression { Expression* right() const { return right_; } int position() const { return pos_; } - virtual bool IsInlineable() const; - - // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - bool IsSmiCompare() { return compare_type_ == SMI_ONLY; } - bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; } - private: Token::Value op_; Expression* left_; Expression* right_; int pos_; - - enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY }; - CompareTypeFeedback compare_type_; }; @@ -1501,8 +1290,6 @@ class CompareToNull: public Expression { DECLARE_NODE_TYPE(CompareToNull) - virtual bool IsInlineable() const; - bool is_strict() const { return is_strict_; } Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; } Expression* expression() const { return expression_; } @@ -1528,8 +1315,6 @@ class Conditional: public Expression { DECLARE_NODE_TYPE(Conditional) - virtual bool IsInlineable() const; - Expression* condition() const { return condition_; } Expression* then_expression() const { return then_expression_; } Expression* else_expression() const { return else_expression_; } @@ -1548,12 +1333,14 @@ class Conditional: public Expression { class Assignment: public Expression { public: - Assignment(Token::Value op, Expression* target, Expression* value, int pos); + Assignment(Token::Value op, Expression* target, Expression* value, int pos) + : op_(op), target_(target), value_(value), pos_(pos), + block_start_(false), block_end_(false) { + ASSERT(Token::IsAssignmentOp(op)); + } DECLARE_NODE_TYPE(Assignment) - virtual bool IsInlineable() const; - Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } Token::Value binary_op() const; @@ -1562,8 +1349,6 @@ class Assignment: public Expression { Expression* target() const { return target_; } Expression* value() const { return value_; } int position() { return pos_; } - BinaryOperation* binary_operation() const { return binary_operation_; } - // This check relies on the definition order of token in token.h. bool is_compound() const { return op() > Token::ASSIGN; } @@ -1576,31 +1361,13 @@ class Assignment: public Expression { void mark_block_start() { block_start_ = true; } void mark_block_end() { block_end_ = true; } - // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - virtual bool IsMonomorphic() { return is_monomorphic_; } - virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; } - virtual Handle GetMonomorphicReceiverType() { - return monomorphic_receiver_type_; - } - - // Bailout support. - int compound_bailout_id() const { return compound_bailout_id_; } - private: Token::Value op_; Expression* target_; Expression* value_; int pos_; - BinaryOperation* binary_operation_; - int compound_bailout_id_; - bool block_start_; bool block_end_; - - bool is_monomorphic_; - ZoneMapList* receiver_types_; - Handle monomorphic_receiver_type_; }; @@ -1650,7 +1417,11 @@ class FunctionLiteral: public Expression { function_token_position_(RelocInfo::kNoPosition), inferred_name_(Heap::empty_string()), try_full_codegen_(false), - pretenure_(false) { } + pretenure_(false) { +#ifdef DEBUG + already_compiled_ = false; +#endif + } DECLARE_NODE_TYPE(FunctionLiteral) @@ -1675,7 +1446,6 @@ class FunctionLiteral: public Expression { int num_parameters() { return num_parameters_; } bool AllowsLazyCompilation(); - bool AllowOptimize(); Handle debug_name() const { if (name_->length() > 0) return name_; @@ -1693,6 +1463,13 @@ class FunctionLiteral: public Expression { bool pretenure() { return pretenure_; } void set_pretenure(bool value) { pretenure_ = value; } +#ifdef DEBUG + void mark_as_compiled() { + ASSERT(!already_compiled_); + already_compiled_ = true; + } +#endif + private: Handle name_; Scope* scope_; @@ -1710,6 +1487,9 @@ class FunctionLiteral: public Expression { Handle inferred_name_; bool try_full_codegen_; bool pretenure_; +#ifdef DEBUG + bool already_compiled_; +#endif }; @@ -2114,12 +1894,8 @@ class AstVisitor BASE_EMBEDDED { // node, calling SetStackOverflow will make sure that the visitor // bails out without visiting more nodes. void SetStackOverflow() { stack_overflow_ = true; } - void ClearStackOverflow() { stack_overflow_ = false; } - - // Nodes not appearing in the AST, including slots. - virtual void VisitSlot(Slot* node) { UNREACHABLE(); } - // Individual AST nodes. + // Individual nodes #define DEF_VISIT(type) \ virtual void Visit##type(type* node) = 0; AST_NODE_LIST(DEF_VISIT) diff --git a/src/atomicops.h b/src/atomicops.h deleted file mode 100644 index 72a0d0fb5..000000000 --- a/src/atomicops.h +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The routines exported by this module are subtle. If you use them, even if -// you get the code right, it will depend on careful reasoning about atomicity -// and memory ordering; it will be less readable, and harder to maintain. If -// you plan to use these routines, you should have a good reason, such as solid -// evidence that performance would otherwise suffer, or there being no -// alternative. You should assume only properties explicitly guaranteed by the -// specifications in this file. You are almost certainly _not_ writing code -// just for the x86; if you assume x86 semantics, x86 hardware bugs and -// implementations on other archtectures will cause your code to break. If you -// do not know what you are doing, avoid these routines, and use a Mutex. -// -// It is incorrect to make direct assignments to/from an atomic variable. -// You should use one of the Load or Store routines. The NoBarrier -// versions are provided when no barriers are needed: -// NoBarrier_Store() -// NoBarrier_Load() -// Although there are currently no compiler enforcement, you are encouraged -// to use these. -// - -#ifndef V8_ATOMICOPS_H_ -#define V8_ATOMICOPS_H_ - -#include "../include/v8.h" -#include "globals.h" - -namespace v8 { -namespace internal { - -typedef int32_t Atomic32; -#ifdef V8_HOST_ARCH_64_BIT -// We need to be able to go between Atomic64 and AtomicWord implicitly. This -// means Atomic64 and AtomicWord should be the same type on 64-bit. -#if defined(__APPLE__) -// MacOS is an exception to the implicit conversion rule above, -// because it uses long for intptr_t. -typedef int64_t Atomic64; -#else -typedef intptr_t Atomic64; -#endif -#endif - -// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or -// Atomic64 routines below, depending on your architecture. -typedef intptr_t AtomicWord; - -// Atomically execute: -// result = *ptr; -// if (*ptr == old_value) -// *ptr = new_value; -// return result; -// -// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". -// Always return the old value of "*ptr" -// -// This routine implies no memory barriers. -Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value); - -// Atomically store new_value into *ptr, returning the previous value held in -// *ptr. This routine implies no memory barriers. -Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); - -// Atomically increment *ptr by "increment". Returns the new value of -// *ptr with the increment applied. This routine implies no memory barriers. -Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); - -Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment); - -// These following lower-level operations are typically useful only to people -// implementing higher-level synchronization operations like spinlocks, -// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or -// a store with appropriate memory-ordering instructions. "Acquire" operations -// ensure that no later memory access can be reordered ahead of the operation. -// "Release" operations ensure that no previous memory access can be reordered -// after the operation. "Barrier" operations have both "Acquire" and "Release" -// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory -// access. -Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value); -Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value); - -void MemoryBarrier(); -void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); -void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); -void Release_Store(volatile Atomic32* ptr, Atomic32 value); - -Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); -Atomic32 Acquire_Load(volatile const Atomic32* ptr); -Atomic32 Release_Load(volatile const Atomic32* ptr); - -// 64-bit atomic operations (only available on 64-bit processors). -#ifdef V8_HOST_ARCH_64_BIT -Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value); -Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); -Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); -Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); - -Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value); -Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value); -void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); -void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); -void Release_Store(volatile Atomic64* ptr, Atomic64 value); -Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); -Atomic64 Acquire_Load(volatile const Atomic64* ptr); -Atomic64 Release_Load(volatile const Atomic64* ptr); -#endif // V8_HOST_ARCH_64_BIT - -} } // namespace v8::internal - -// Include our platform specific implementation. -#if defined(_MSC_VER) && \ - (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) -#include "atomicops_internals_x86_msvc.h" -#elif defined(__APPLE__) && \ - (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) -#include "atomicops_internals_x86_macosx.h" -#elif defined(__GNUC__) && \ - (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) -#include "atomicops_internals_x86_gcc.h" -#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM) -#include "atomicops_internals_arm_gcc.h" -#else -#error "Atomic operations are not supported on your platform" -#endif - -#endif // V8_ATOMICOPS_H_ diff --git a/src/atomicops_internals_arm_gcc.h b/src/atomicops_internals_arm_gcc.h deleted file mode 100644 index 6c30256d9..000000000 --- a/src/atomicops_internals_arm_gcc.h +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. -// -// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. - -#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ -#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ - -namespace v8 { -namespace internal { - -// 0xffff0fc0 is the hard coded address of a function provided by -// the kernel which implements an atomic compare-exchange. On older -// ARM architecture revisions (pre-v6) this may be implemented using -// a syscall. This address is stable, and in active use (hard coded) -// by at least glibc-2.7 and the Android C library. -typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, - Atomic32 new_value, - volatile Atomic32* ptr); -LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = - (LinuxKernelCmpxchgFunc) 0xffff0fc0; - -typedef void (*LinuxKernelMemoryBarrierFunc)(void); -LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = - (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; - - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value = *ptr; - do { - if (!pLinuxKernelCmpxchg(old_value, new_value, - const_cast(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - Atomic32 old_value; - do { - old_value = *ptr; - } while (pLinuxKernelCmpxchg(old_value, new_value, - const_cast(ptr))); - return old_value; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return Barrier_AtomicIncrement(ptr, increment); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - for (;;) { - // Atomic exchange the old value with an incremented one. - Atomic32 old_value = *ptr; - Atomic32 new_value = old_value + increment; - if (pLinuxKernelCmpxchg(old_value, new_value, - const_cast(ptr)) == 0) { - // The exchange took place as expected. - return new_value; - } - // Otherwise, *ptr changed mid-loop and we need to retry. - } -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void MemoryBarrier() { - pLinuxKernelMemoryBarrier(); -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ diff --git a/src/atomicops_internals_x86_gcc.cc b/src/atomicops_internals_x86_gcc.cc deleted file mode 100644 index a57256476..000000000 --- a/src/atomicops_internals_x86_gcc.cc +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This module gets enough CPU information to optimize the -// atomicops module on x86. - -#include - -#include "atomicops.h" - -// This file only makes sense with atomicops_internals_x86_gcc.h -- it -// depends on structs that are defined in that file. If atomicops.h -// doesn't sub-include that file, then we aren't needed, and shouldn't -// try to do anything. -#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ - -// Inline cpuid instruction. In PIC compilations, %ebx contains the address -// of the global offset table. To avoid breaking such executables, this code -// must preserve that register's value across cpuid instructions. -#if defined(__i386__) -#define cpuid(a, b, c, d, inp) \ - asm("mov %%ebx, %%edi\n" \ - "cpuid\n" \ - "xchg %%edi, %%ebx\n" \ - : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) -#elif defined(__x86_64__) -#define cpuid(a, b, c, d, inp) \ - asm("mov %%rbx, %%rdi\n" \ - "cpuid\n" \ - "xchg %%rdi, %%rbx\n" \ - : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) -#endif - -#if defined(cpuid) // initialize the struct only on x86 - -// Set the flags so that code will run correctly and conservatively, so even -// if we haven't been initialized yet, we're probably single threaded, and our -// default values should hopefully be pretty safe. -struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { - false, // bug can't exist before process spawns multiple threads - false, // no SSE2 -}; - -// Initialize the AtomicOps_Internalx86CPUFeatures struct. -static void AtomicOps_Internalx86CPUFeaturesInit() { - uint32_t eax; - uint32_t ebx; - uint32_t ecx; - uint32_t edx; - - // Get vendor string (issue CPUID with eax = 0) - cpuid(eax, ebx, ecx, edx, 0); - char vendor[13]; - memcpy(vendor, &ebx, 4); - memcpy(vendor + 4, &edx, 4); - memcpy(vendor + 8, &ecx, 4); - vendor[12] = 0; - - // get feature flags in ecx/edx, and family/model in eax - cpuid(eax, ebx, ecx, edx, 1); - - int family = (eax >> 8) & 0xf; // family and model fields - int model = (eax >> 4) & 0xf; - if (family == 0xf) { // use extended family and model fields - family += (eax >> 20) & 0xff; - model += ((eax >> 16) & 0xf) << 4; - } - - // Opteron Rev E has a bug in which on very rare occasions a locked - // instruction doesn't act as a read-acquire barrier if followed by a - // non-locked read-modify-write instruction. Rev F has this bug in - // pre-release versions, but not in versions released to customers, - // so we test only for Rev E, which is family 15, model 32..63 inclusive. - if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD - family == 15 && - 32 <= model && model <= 63) { - AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; - } else { - AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; - } - - // edx bit 26 is SSE2 which we use to tell use whether we can use mfence - AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); -} - -namespace { - -class AtomicOpsx86Initializer { - public: - AtomicOpsx86Initializer() { - AtomicOps_Internalx86CPUFeaturesInit(); - } -}; - -// A global to get use initialized on startup via static initialization :/ -AtomicOpsx86Initializer g_initer; - -} // namespace - -#endif // if x86 - -#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ diff --git a/src/atomicops_internals_x86_gcc.h b/src/atomicops_internals_x86_gcc.h deleted file mode 100644 index 3f17fa0dc..000000000 --- a/src/atomicops_internals_x86_gcc.h +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ -#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_ - -// This struct is not part of the public API of this module; clients may not -// use it. -// Features of this x86. Values may not be correct before main() is run, -// but are set conservatively. -struct AtomicOps_x86CPUFeatureStruct { - bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence - // after acquire compare-and-swap. - bool has_sse2; // Processor has SSE2. -}; -extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; - -#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") - -namespace v8 { -namespace internal { - -// 32-bit low-level operations on any platform. - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev; - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a" (prev) - : "q" (new_value), "m" (*ptr), "0" (old_value) - : "memory"); - return prev; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. - : "=r" (new_value) - : "m" (*ptr), "0" (new_value) - : "memory"); - return new_value; // Now it's the previous value. -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 temp = increment; - __asm__ __volatile__("lock; xaddl %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now holds the old value of *ptr - return temp + increment; -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 temp = increment; - __asm__ __volatile__("lock; xaddl %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now holds the old value of *ptr - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return temp + increment; -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return x; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -#if defined(__x86_64__) - -// 64-bit implementations of memory barrier can be simpler, because it -// "mfence" is guaranteed to exist. -inline void MemoryBarrier() { - __asm__ __volatile__("mfence" : : : "memory"); -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -#else - -inline void MemoryBarrier() { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - __asm__ __volatile__("mfence" : : : "memory"); - } else { // mfence is faster but not present on PIII - Atomic32 x = 0; - NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII - } -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - *ptr = value; - __asm__ __volatile__("mfence" : : : "memory"); - } else { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier on PIII - } -} -#endif - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - ATOMICOPS_COMPILER_BARRIER(); - *ptr = value; // An x86 store acts as a release barrier. - // See comments in Atomic64 version of Release_Store(), below. -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. - // See comments in Atomic64 version of Release_Store(), below. - ATOMICOPS_COMPILER_BARRIER(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -#if defined(__x86_64__) - -// 64-bit low-level operations on 64-bit platform. - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev; - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a" (prev) - : "q" (new_value), "m" (*ptr), "0" (old_value) - : "memory"); - return prev; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. - : "=r" (new_value) - : "m" (*ptr), "0" (new_value) - : "memory"); - return new_value; // Now it's the previous value. -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - Atomic64 temp = increment; - __asm__ __volatile__("lock; xaddq %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now contains the previous value of *ptr - return temp + increment; -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - Atomic64 temp = increment; - __asm__ __volatile__("lock; xaddq %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now contains the previous value of *ptr - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return temp + increment; -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - ATOMICOPS_COMPILER_BARRIER(); - - *ptr = value; // An x86 store acts as a release barrier - // for current AMD/Intel chips as of Jan 2008. - // See also Acquire_Load(), below. - - // When new chips come out, check: - // IA-32 Intel Architecture Software Developer's Manual, Volume 3: - // System Programming Guide, Chatper 7: Multiple-processor management, - // Section 7.2, Memory Ordering. - // Last seen at: - // http://developer.intel.com/design/pentium4/manuals/index_new.htm - // - // x86 stores/loads fail to act as barriers for a few instructions (clflush - // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are - // not generated by the compiler, and are rare. Users of these instructions - // need to know about cache behaviour in any case since all of these involve - // either flushing cache lines or non-temporal cache hints. -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, - // for current AMD/Intel chips as of Jan 2008. - // See also Release_Store(), above. - ATOMICOPS_COMPILER_BARRIER(); - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return x; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -#endif // defined(__x86_64__) - -} } // namespace v8::internal - -#undef ATOMICOPS_COMPILER_BARRIER - -#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_ diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h deleted file mode 100644 index 2bac006bd..000000000 --- a/src/atomicops_internals_x86_macosx.h +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ -#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ - -#include - -namespace v8 { -namespace internal { - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - do { - if (OSAtomicCompareAndSwap32(old_value, new_value, - const_cast(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, - Atomic32 new_value) { - Atomic32 old_value; - do { - old_value = *ptr; - } while (!OSAtomicCompareAndSwap32(old_value, new_value, - const_cast(ptr))); - return old_value; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, - Atomic32 increment) { - return OSAtomicAdd32(increment, const_cast(ptr)); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, - Atomic32 increment) { - return OSAtomicAdd32Barrier(increment, const_cast(ptr)); -} - -inline void MemoryBarrier() { - OSMemoryBarrier(); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - do { - if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, - const_cast(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, - Atomic32 old_value, - Atomic32 new_value) { - return Acquire_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { - Atomic32 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { - MemoryBarrier(); - return *ptr; -} - -#ifdef __LP64__ - -// 64-bit implementation on 64-bit platform - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev_value; - do { - if (OSAtomicCompareAndSwap64(old_value, new_value, - const_cast(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, - Atomic64 new_value) { - Atomic64 old_value; - do { - old_value = *ptr; - } while (!OSAtomicCompareAndSwap64(old_value, new_value, - const_cast(ptr))); - return old_value; -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, - Atomic64 increment) { - return OSAtomicAdd64(increment, const_cast(ptr)); -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, - Atomic64 increment) { - return OSAtomicAdd64Barrier(increment, const_cast(ptr)); -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev_value; - do { - if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, - const_cast(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, - Atomic64 old_value, - Atomic64 new_value) { - // The lib kern interface does not distinguish between - // Acquire and Release memory barriers; they are equivalent. - return Acquire_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { - Atomic64 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { - MemoryBarrier(); - return *ptr; -} - -#endif // defined(__LP64__) - -// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different -// on the Mac, even when they are the same size. We need to explicitly cast -// from AtomicWord to Atomic32/64 to implement the AtomicWord interface. -#ifdef __LP64__ -#define AtomicWordCastType Atomic64 -#else -#define AtomicWordCastType Atomic32 -#endif - -inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, - AtomicWord old_value, - AtomicWord new_value) { - return NoBarrier_CompareAndSwap( - reinterpret_cast(ptr), - old_value, new_value); -} - -inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, - AtomicWord new_value) { - return NoBarrier_AtomicExchange( - reinterpret_cast(ptr), new_value); -} - -inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, - AtomicWord increment) { - return NoBarrier_AtomicIncrement( - reinterpret_cast(ptr), increment); -} - -inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, - AtomicWord increment) { - return Barrier_AtomicIncrement( - reinterpret_cast(ptr), increment); -} - -inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, - AtomicWord old_value, - AtomicWord new_value) { - return v8::internal::Acquire_CompareAndSwap( - reinterpret_cast(ptr), - old_value, new_value); -} - -inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, - AtomicWord old_value, - AtomicWord new_value) { - return v8::internal::Release_CompareAndSwap( - reinterpret_cast(ptr), - old_value, new_value); -} - -inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { - NoBarrier_Store( - reinterpret_cast(ptr), value); -} - -inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { - return v8::internal::Acquire_Store( - reinterpret_cast(ptr), value); -} - -inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { - return v8::internal::Release_Store( - reinterpret_cast(ptr), value); -} - -inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { - return NoBarrier_Load( - reinterpret_cast(ptr)); -} - -inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { - return v8::internal::Acquire_Load( - reinterpret_cast(ptr)); -} - -inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { - return v8::internal::Release_Load( - reinterpret_cast(ptr)); -} - -#undef AtomicWordCastType - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ diff --git a/src/atomicops_internals_x86_msvc.h b/src/atomicops_internals_x86_msvc.h deleted file mode 100644 index a7753e489..000000000 --- a/src/atomicops_internals_x86_msvc.h +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ -#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ - -#include "win32-headers.h" - -namespace v8 { -namespace internal { - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - LONG result = InterlockedCompareExchange( - reinterpret_cast(ptr), - static_cast(new_value), - static_cast(old_value)); - return static_cast(result); -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - LONG result = InterlockedExchange( - reinterpret_cast(ptr), - static_cast(new_value)); - return static_cast(result); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return InterlockedExchangeAdd( - reinterpret_cast(ptr), - static_cast(increment)) + increment; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return Barrier_AtomicIncrement(ptr, increment); -} - -#if !(defined(_MSC_VER) && _MSC_VER >= 1400) -#error "We require at least vs2005 for MemoryBarrier" -#endif -inline void MemoryBarrier() { - // We use MemoryBarrier from WinNT.h - ::MemoryBarrier(); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier in this implementation -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; // works w/o barrier for current Intel chips as of June 2005 - // See comments in Atomic64 version of Release_Store() below. -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -#if defined(_WIN64) - -// 64-bit low-level operations on 64-bit platform. - -STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - PVOID result = InterlockedCompareExchangePointer( - reinterpret_cast(ptr), - reinterpret_cast(new_value), reinterpret_cast(old_value)); - return reinterpret_cast(result); -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - PVOID result = InterlockedExchangePointer( - reinterpret_cast(ptr), - reinterpret_cast(new_value)); - return reinterpret_cast(result); -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return InterlockedExchangeAdd64( - reinterpret_cast(ptr), - static_cast(increment)) + increment; -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return Barrier_AtomicIncrement(ptr, increment); -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier in this implementation -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; // works w/o barrier for current Intel chips as of June 2005 - - // When new chips come out, check: - // IA-32 Intel Architecture Software Developer's Manual, Volume 3: - // System Programming Guide, Chatper 7: Multiple-processor management, - // Section 7.2, Memory Ordering. - // Last seen at: - // http://developer.intel.com/design/pentium4/manuals/index_new.htm -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - Atomic64 value = *ptr; - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - - -#endif // defined(_WIN64) - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index 800c4376c..f60a975dc 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -500,24 +500,6 @@ Handle Genesis::CreateEmptyFunction() { } -static void AddToWeakGlobalContextList(Context* context) { - ASSERT(context->IsGlobalContext()); -#ifdef DEBUG - { // NOLINT - ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined()); - // Check that context is not in the list yet. - for (Object* current = Heap::global_contexts_list(); - !current->IsUndefined(); - current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) { - ASSERT(current != context); - } - } -#endif - context->set(Context::NEXT_CONTEXT_LINK, Heap::global_contexts_list()); - Heap::set_global_contexts_list(context); -} - - void Genesis::CreateRoots() { // Allocate the global context FixedArray first and then patch the // closure and extension object later (we need the empty function @@ -526,7 +508,6 @@ void Genesis::CreateRoots() { global_context_ = Handle::cast( GlobalHandles::Create(*Factory::NewGlobalContext())); - AddToWeakGlobalContextList(*global_context_); Top::set_context(*global_context()); // Allocate the message listeners object. @@ -1615,7 +1596,7 @@ bool Genesis::InstallJSBuiltins(Handle builtins) { = Handle(function->shared()); if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false; // Set the code object on the function object. - function->ReplaceCode(function->shared()->code()); + function->set_code(function->shared()->code()); builtins->set_javascript_builtin_code(id, shared->code()); } return true; @@ -1803,7 +1784,6 @@ Genesis::Genesis(Handle global_object, if (!new_context.is_null()) { global_context_ = Handle::cast(GlobalHandles::Create(*new_context)); - AddToWeakGlobalContextList(*global_context_); Top::set_context(*global_context_); i::Counters::contexts_created_by_snapshot.Increment(); result_ = global_context_; @@ -1839,6 +1819,11 @@ Genesis::Genesis(Handle global_object, i::Counters::contexts_created_from_scratch.Increment(); } + // Add this context to the weak list of global contexts. + (*global_context_)->set(Context::NEXT_CONTEXT_LINK, + Heap::global_contexts_list()); + Heap::set_global_contexts_list(*global_context_); + result_ = global_context_; } diff --git a/src/builtins.cc b/src/builtins.cc index a833119e8..e88ef6f0e 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -32,7 +32,6 @@ #include "bootstrapper.h" #include "builtins.h" #include "ic-inl.h" -#include "vm-state-inl.h" namespace v8 { namespace internal { @@ -1032,7 +1031,9 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( { // Leaving JavaScript. VMState state(EXTERNAL); - ExternalCallbackScope call_scope(v8::ToCData
(callback_obj)); +#ifdef ENABLE_LOGGING_AND_PROFILING + state.set_external_callback(v8::ToCData
(callback_obj)); +#endif value = callback(new_args); } if (value.IsEmpty()) { @@ -1102,7 +1103,9 @@ BUILTIN(FastHandleApiCall) { { // Leaving JavaScript. VMState state(EXTERNAL); - ExternalCallbackScope call_scope(v8::ToCData
(callback_obj)); +#ifdef ENABLE_LOGGING_AND_PROFILING + state.set_external_callback(v8::ToCData
(callback_obj)); +#endif v8::InvocationCallback callback = v8::ToCData(callback_obj); @@ -1166,7 +1169,9 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor( { // Leaving JavaScript. VMState state(EXTERNAL); - ExternalCallbackScope call_scope(v8::ToCData
(callback_obj)); +#ifdef ENABLE_LOGGING_AND_PROFILING + state.set_external_callback(v8::ToCData
(callback_obj)); +#endif value = callback(new_args); } if (value.IsEmpty()) { @@ -1327,11 +1332,6 @@ static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) { } -static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) { - StoreIC::GenerateGlobalProxy(masm); -} - - static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { KeyedStoreIC::GenerateGeneric(masm); } @@ -1581,5 +1581,4 @@ const char* Builtins::Lookup(byte* pc) { return NULL; } - } } // namespace v8::internal diff --git a/src/builtins.h b/src/builtins.h index d2b4be2f7..b5e8c4e8f 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2006-2008 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -71,10 +71,6 @@ enum BuiltinExtraArguments { V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(LazyCompile, BUILTIN, UNINITIALIZED) \ - V(LazyRecompile, BUILTIN, UNINITIALIZED) \ - V(NotifyDeoptimized, BUILTIN, UNINITIALIZED) \ - V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED) \ - V(NotifyOSR, BUILTIN, UNINITIALIZED) \ \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \ @@ -106,7 +102,6 @@ enum BuiltinExtraArguments { V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \ V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \ - V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC) \ \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \ @@ -125,9 +120,7 @@ enum BuiltinExtraArguments { V(ArrayCode, BUILTIN, UNINITIALIZED) \ V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \ \ - V(StringConstructCode, BUILTIN, UNINITIALIZED) \ - \ - V(OnStackReplacement, BUILTIN, UNINITIALIZED) + V(StringConstructCode, BUILTIN, UNINITIALIZED) #ifdef ENABLE_DEBUGGER_SUPPORT @@ -263,10 +256,6 @@ class Builtins : public AllStatic { static void Generate_JSEntryTrampoline(MacroAssembler* masm); static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm); static void Generate_LazyCompile(MacroAssembler* masm); - static void Generate_LazyRecompile(MacroAssembler* masm); - static void Generate_NotifyDeoptimized(MacroAssembler* masm); - static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); - static void Generate_NotifyOSR(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm); @@ -276,8 +265,6 @@ class Builtins : public AllStatic { static void Generate_ArrayConstructCode(MacroAssembler* masm); static void Generate_StringConstructCode(MacroAssembler* masm); - - static void Generate_OnStackReplacement(MacroAssembler* masm); }; } } // namespace v8::internal diff --git a/src/checks.h b/src/checks.h index aa557f00b..d49f97f13 100644 --- a/src/checks.h +++ b/src/checks.h @@ -281,7 +281,7 @@ bool EnableSlowAsserts(); // safely enabled in release mode. Moreover, the ((void) 0) expression // obeys different syntax rules than typedef's, e.g. it can't appear // inside class declaration, this leads to inconsistency between debug -// and release compilation modes behavior. +// and release compilation modes behaviour. #define STATIC_ASSERT(test) STATIC_CHECK(test) #define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p) diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 1b0d8b0b6..8b9198fb9 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -103,7 +103,6 @@ Handle CodeStub::GetCode() { GetICState()); Handle new_object = Factory::NewCode(desc, flags, masm.CodeObject()); RecordCodeGeneration(*new_object, &masm); - FinishCode(*new_object); // Update the dictionary and the root in Heap. Handle dict = @@ -143,7 +142,6 @@ MaybeObject* CodeStub::TryGetCode() { } code = Code::cast(new_object); RecordCodeGeneration(code, &masm); - FinishCode(code); // Try to update the code cache but do not fail if unable. MaybeObject* maybe_new_object = @@ -172,29 +170,4 @@ const char* CodeStub::MajorName(CodeStub::Major major_key, } -int ICCompareStub::MinorKey() { - return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); -} - - -void ICCompareStub::Generate(MacroAssembler* masm) { - switch (state_) { - case CompareIC::UNINITIALIZED: - GenerateMiss(masm); - break; - case CompareIC::SMIS: - GenerateSmis(masm); - break; - case CompareIC::HEAP_NUMBERS: - GenerateHeapNumbers(masm); - break; - case CompareIC::OBJECTS: - GenerateObjects(masm); - break; - default: - UNREACHABLE(); - } -} - - } } // namespace v8::internal diff --git a/src/code-stubs.h b/src/code-stubs.h index 1010e9512..b156647d5 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -29,6 +29,7 @@ #define V8_CODE_STUBS_H_ #include "globals.h" +#include "macro-assembler.h" namespace v8 { namespace internal { @@ -38,15 +39,11 @@ namespace internal { #define CODE_STUB_LIST_ALL_PLATFORMS(V) \ V(CallFunction) \ V(GenericBinaryOp) \ - V(TypeRecordingBinaryOp) \ V(StringAdd) \ - V(StringCharAt) \ V(SubString) \ V(StringCompare) \ V(SmiOp) \ V(Compare) \ - V(CompareIC) \ - V(MathPow) \ V(RecordWrite) \ V(ConvertToDouble) \ V(WriteInt32ToHeapNumber) \ @@ -63,7 +60,6 @@ namespace internal { V(CounterOp) \ V(ArgumentsAccess) \ V(RegExpExec) \ - V(RegExpConstructResult) \ V(NumberToString) \ V(CEntry) \ V(JSEntry) \ @@ -129,7 +125,7 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} protected: - static const int kMajorBits = 6; + static const int kMajorBits = 5; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; private: @@ -147,9 +143,6 @@ class CodeStub BASE_EMBEDDED { // initially generated. void RecordCodeGeneration(Code* code, MacroAssembler* masm); - // Finish the code object after it has been generated. - virtual void FinishCode(Code* code) { } - // Returns information for computing the number key. virtual Major MajorKey() = 0; virtual int MinorKey() = 0; @@ -223,11 +216,11 @@ namespace v8 { namespace internal { -// RuntimeCallHelper implementation used in stubs: enters/leaves a +// RuntimeCallHelper implementation used in IC stubs: enters/leaves a // newly created internal frame before/after the runtime call. -class StubRuntimeCallHelper : public RuntimeCallHelper { +class ICRuntimeCallHelper : public RuntimeCallHelper { public: - StubRuntimeCallHelper() {} + ICRuntimeCallHelper() {} virtual void BeforeCall(MacroAssembler* masm) const; @@ -383,61 +376,9 @@ class GenericUnaryOpStub : public CodeStub { }; -class MathPowStub: public CodeStub { - public: - MathPowStub() {} - virtual void Generate(MacroAssembler* masm); - - private: - virtual CodeStub::Major MajorKey() { return MathPow; } - virtual int MinorKey() { return 0; } - - const char* GetName() { return "MathPowStub"; } -}; - - -class StringCharAtStub: public CodeStub { - public: - StringCharAtStub() {} - - private: - Major MajorKey() { return StringCharAt; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class ICCompareStub: public CodeStub { - public: - ICCompareStub(Token::Value op, CompareIC::State state) - : op_(op), state_(state) { - ASSERT(Token::IsCompareOp(op)); - } - - virtual void Generate(MacroAssembler* masm); - - private: - class OpField: public BitField { }; - class StateField: public BitField { }; - - virtual void FinishCode(Code* code) { code->set_compare_state(state_); } - - virtual CodeStub::Major MajorKey() { return CompareIC; } - virtual int MinorKey(); - - virtual int GetCodeKind() { return Code::COMPARE_IC; } - - void GenerateSmis(MacroAssembler* masm); - void GenerateHeapNumbers(MacroAssembler* masm); - void GenerateObjects(MacroAssembler* masm); - void GenerateMiss(MacroAssembler* masm); - - bool strict() const { return op_ == Token::EQ_STRICT; } - Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } - - Token::Value op_; - CompareIC::State state_; +enum NaNInformation { + kBothCouldBeNaN, + kCantBothBeNaN }; @@ -450,12 +391,6 @@ enum CompareFlags { }; -enum NaNInformation { - kBothCouldBeNaN, - kCantBothBeNaN -}; - - class CompareStub: public CodeStub { public: CompareStub(Condition cc, @@ -463,7 +398,7 @@ class CompareStub: public CodeStub { CompareFlags flags, Register lhs, Register rhs) : - cc_(cc), + cc_(cc), strict_(strict), never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), @@ -505,7 +440,6 @@ class CompareStub: public CodeStub { // Register holding the left hand side of the comparison if the stub gives // a choice, no_reg otherwise. - Register lhs_; // Register holding the right hand side of the comparison if the stub gives // a choice, no_reg otherwise. @@ -523,11 +457,6 @@ class CompareStub: public CodeStub { int MinorKey(); - virtual int GetCodeKind() { return Code::COMPARE_IC; } - virtual void FinishCode(Code* code) { - code->set_compare_state(CompareIC::GENERIC); - } - // Branch to the label if the given object isn't a symbol. void BranchIfNonSymbol(MacroAssembler* masm, Label* label, @@ -561,11 +490,9 @@ class CompareStub: public CodeStub { class CEntryStub : public CodeStub { public: - explicit CEntryStub(int result_size) - : result_size_(result_size), save_doubles_(false) { } + explicit CEntryStub(int result_size) : result_size_(result_size) { } void Generate(MacroAssembler* masm); - void SaveDoubles() { save_doubles_ = true; } private: void GenerateCore(MacroAssembler* masm, @@ -581,9 +508,10 @@ class CEntryStub : public CodeStub { // Number of pointers/values returned. const int result_size_; - bool save_doubles_; Major MajorKey() { return CEntry; } + // Minor key must differ if different result_size_ values means different + // code is generated. int MinorKey(); const char* GetName() { return "CEntryStub"; } @@ -669,26 +597,6 @@ class RegExpExecStub: public CodeStub { }; -class RegExpConstructResultStub: public CodeStub { - public: - RegExpConstructResultStub() { } - - private: - Major MajorKey() { return RegExpConstructResult; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "RegExpConstructResultStub"; } - -#ifdef DEBUG - void Print() { - PrintF("RegExpConstructResultStub\n"); - } -#endif -}; - - class CallFunctionStub: public CodeStub { public: CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags) diff --git a/src/codegen.cc b/src/codegen.cc index 8a64d77b7..fb8c5cd4a 100644 --- a/src/codegen.cc +++ b/src/codegen.cc @@ -139,16 +139,6 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) { print_source = FLAG_print_source; print_ast = FLAG_print_ast; print_json_ast = FLAG_print_json_ast; - Vector filter = CStrVector(FLAG_hydrogen_filter); - if (print_source && !filter.is_empty()) { - print_source = info->function()->name()->IsEqualTo(filter); - } - if (print_ast && !filter.is_empty()) { - print_ast = info->function()->name()->IsEqualTo(filter); - } - if (print_json_ast && !filter.is_empty()) { - print_json_ast = info->function()->name()->IsEqualTo(filter); - } ftype = "user-defined"; } @@ -184,24 +174,14 @@ Handle CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, masm->GetCode(&desc); Handle code = Factory::NewCode(desc, flags, masm->CodeObject()); - if (!code.is_null()) { - Counters::total_compiled_code_size.Increment(code->instruction_size()); - } - return code; -} - - -void CodeGenerator::PrintCode(Handle code, CompilationInfo* info) { #ifdef ENABLE_DISASSEMBLER bool print_code = Bootstrapper::IsActive() ? FLAG_print_builtin_code - : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code)); - Vector filter = CStrVector(FLAG_hydrogen_filter); - FunctionLiteral* function = info->function(); - bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter); - if (print_code && match) { + : FLAG_print_code; + if (print_code) { // Print the source code if available. Handle