From: sgjesse@chromium.org Date: Thu, 4 Feb 2010 20:36:58 +0000 (+0000) Subject: MIPS port initial commit X-Git-Tag: upstream/4.7.83~22537 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a6a7c75ae0bb0220725df642ce1e38fad4edf4b9;p=platform%2Fupstream%2Fv8.git MIPS port initial commit This is the first step in the MIPS port of V8. It adds assembler, disassembler and simulator for the MIPS32 architecture. Contains stubbed out implementation of all the compiler/code generator infrastructure to make it all build. Patch by Alexandre Rames from Sigma Designs Inc. This is the landing of http://codereview.chromium.org/543161. Review URL: http://codereview.chromium.org/561072 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3799 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/AUTHORS b/AUTHORS index 5d712fc27..9128ba3e7 100644 --- a/AUTHORS +++ b/AUTHORS @@ -4,6 +4,7 @@ # Name/Organization Google Inc. +Sigma Designs Inc. Alexander Botero-Lowry Alexandre Vassalotti diff --git a/SConstruct b/SConstruct index c9c5a55c8..0b038039e 100644 --- a/SConstruct +++ b/SConstruct @@ -191,6 +191,17 @@ LIBRARY_FLAGS = { 'armvariant:arm': { 'CPPDEFINES': ['V8_ARM_VARIANT_ARM'] }, + 'arch:mips': { + 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], + 'simulator:none': { + 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'], + 'LDFLAGS': ['-EL'] + } + }, + 'simulator:mips': { + 'CCFLAGS': ['-m32'], + 'LINKFLAGS': ['-m32'] + }, 'arch:x64': { 'CPPDEFINES': ['V8_TARGET_ARCH_X64'], 'CCFLAGS': ['-m64'], @@ -293,6 +304,9 @@ V8_EXTRA_FLAGS = { # used by the arm simulator. 'WARNINGFLAGS': ['/wd4996'] }, + 'arch:mips': { + 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], + }, 'disassembler:on': { 'CPPDEFINES': ['ENABLE_DISASSEMBLER'] } @@ -458,10 +472,22 @@ SAMPLE_FLAGS = { 'CCFLAGS': ['-m64'], 'LINKFLAGS': ['-m64'] }, + 'arch:mips': { + 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], + 'simulator:none': { + 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'], + 'LINKFLAGS': ['-EL'], + 'LDFLAGS': ['-EL'] + } + }, 'simulator:arm': { 'CCFLAGS': ['-m32'], 'LINKFLAGS': ['-m32'] }, + 'simulator:mips': { + 'CCFLAGS': ['-m32'], + 'LINKFLAGS': ['-m32'] + }, 'mode:release': { 'CCFLAGS': ['-O2'] }, @@ -602,7 +628,7 @@ SIMPLE_OPTIONS = { 'help': 'the os to build for (' + OS_GUESS + ')' }, 'arch': { - 'values':['arm', 'ia32', 'x64'], + 'values':['arm', 'ia32', 'x64', 'mips'], 'default': ARCH_GUESS, 'help': 'the architecture to build for (' + ARCH_GUESS + ')' }, @@ -652,7 +678,7 @@ SIMPLE_OPTIONS = { 'help': 'use Microsoft Visual C++ link-time code generation' }, 'simulator': { - 'values': ['arm', 'none'], + 'values': ['arm', 'mips', 'none'], 'default': 'none', 'help': 'build with simulator' }, @@ -872,6 +898,11 @@ def PostprocessOptions(options): options['armvariant'] = 'arm' if (options['armvariant'] != 'none' and options['arch'] != 'arm'): options['armvariant'] = 'none' + if options['arch'] == 'mips': + if ('regexp' in ARGUMENTS) and options['regexp'] == 'native': + # Print a warning if native regexp is specified for mips + print "Warning: forcing regexp to interpreted for mips" + options['regexp'] = 'interpreted' def ParseEnvOverrides(arg, imports): diff --git a/src/SConscript b/src/SConscript index ebda77ac2..b5a894fcf 100755 --- a/src/SConscript +++ b/src/SConscript @@ -131,6 +131,24 @@ SOURCES = { 'armvariant:thumb2': Split(""" arm/assembler-thumb2.cc """), + 'arch:mips': Split(""" + mips/assembler-mips.cc + mips/builtins-mips.cc + mips/codegen-mips.cc + mips/constants-mips.cc + mips/cpu-mips.cc + mips/debug-mips.cc + mips/disasm-mips.cc + mips/fast-codegen-mips.cc + mips/full-codegen-mips.cc + mips/frames-mips.cc + mips/ic-mips.cc + mips/jump-target-mips.cc + mips/macro-assembler-mips.cc + mips/register-allocator-mips.cc + mips/stub-cache-mips.cc + mips/virtual-frame-mips.cc + """), 'arch:ia32': Split(""" ia32/assembler-ia32.cc ia32/builtins-ia32.cc @@ -168,6 +186,7 @@ SOURCES = { x64/virtual-frame-x64.cc """), 'simulator:arm': ['arm/simulator-arm.cc'], + 'simulator:mips': ['mips/simulator-mips.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'], diff --git a/src/assembler.h b/src/assembler.h index ec47d5712..942ce476d 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -506,8 +506,10 @@ static inline bool is_intn(int x, int n) { return -(1 << (n-1)) <= x && x < (1 << (n-1)); } -static inline bool is_int24(int x) { return is_intn(x, 24); } static inline bool is_int8(int x) { return is_intn(x, 8); } +static inline bool is_int16(int x) { return is_intn(x, 16); } +static inline bool is_int18(int x) { return is_intn(x, 18); } +static inline bool is_int24(int x) { return is_intn(x, 24); } static inline bool is_uintn(int x, int n) { return (x & -(1 << n)) == 0; @@ -519,9 +521,20 @@ static inline bool is_uint4(int x) { return is_uintn(x, 4); } static inline bool is_uint5(int x) { return is_uintn(x, 5); } static inline bool is_uint6(int x) { return is_uintn(x, 6); } static inline bool is_uint8(int x) { return is_uintn(x, 8); } +static inline bool is_uint10(int x) { return is_uintn(x, 10); } static inline bool is_uint12(int x) { return is_uintn(x, 12); } static inline bool is_uint16(int x) { return is_uintn(x, 16); } static inline bool is_uint24(int x) { return is_uintn(x, 24); } +static inline bool is_uint26(int x) { return is_uintn(x, 26); } +static inline bool is_uint28(int x) { return is_uintn(x, 28); } + +static inline int NumberOfBitsSet(uint32_t x) { + unsigned int num_bits_set; + for (num_bits_set = 0; x; x >>= 1) { + num_bits_set += x & 1; + } + return num_bits_set; +} } } // namespace v8::internal diff --git a/src/codegen-inl.h b/src/codegen-inl.h index 465cf68c9..ea9bf0a49 100644 --- a/src/codegen-inl.h +++ b/src/codegen-inl.h @@ -39,6 +39,8 @@ #include "x64/codegen-x64-inl.h" #elif V8_TARGET_ARCH_ARM #include "arm/codegen-arm-inl.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/codegen-mips-inl.h" #else #error Unsupported target architecture. #endif diff --git a/src/codegen.h b/src/codegen.h index d0be5f1b1..5a143bc27 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -86,6 +86,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; #include "x64/codegen-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/codegen-arm.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/codegen-mips.h" #else #error Unsupported target architecture. #endif diff --git a/src/flag-definitions.h b/src/flag-definitions.h index b57f2cb62..07b2aa502 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -218,7 +218,7 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") // rewriter.cc DEFINE_bool(optimize_ast, true, "optimize the ast") -// simulator-arm.cc +// simulator-arm.cc and simulator-mips.cc DEFINE_bool(trace_sim, false, "trace simulator execution") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") diff --git a/src/frames-inl.h b/src/frames-inl.h index c5f2f1a33..722185132 100644 --- a/src/frames-inl.h +++ b/src/frames-inl.h @@ -36,6 +36,8 @@ #include "x64/frames-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/frames-arm.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/frames-mips.h" #else #error Unsupported target architecture. #endif diff --git a/src/globals.h b/src/globals.h index 39f6bcb26..369aafa68 100644 --- a/src/globals.h +++ b/src/globals.h @@ -46,6 +46,9 @@ namespace internal { #elif defined(__ARMEL__) #define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_32_BIT 1 +#elif defined(_MIPS_ARCH_MIPS32R2) +#define V8_HOST_ARCH_MIPS 1 +#define V8_HOST_ARCH_32_BIT 1 #else #error Your host architecture was not detected as supported by v8 #endif @@ -53,6 +56,7 @@ namespace internal { #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) #define V8_TARGET_CAN_READ_UNALIGNED 1 #elif V8_TARGET_ARCH_ARM +#elif V8_TARGET_ARCH_MIPS #else #error Your target architecture is not supported by v8 #endif diff --git a/src/macro-assembler.h b/src/macro-assembler.h index 0fe432823..e33148ce4 100644 --- a/src/macro-assembler.h +++ b/src/macro-assembler.h @@ -86,6 +86,13 @@ enum AllocationFlags { #endif #include "code.h" // must be after assembler_*.h #include "arm/macro-assembler-arm.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/constants-mips.h" +#include "assembler.h" +#include "mips/assembler-mips.h" +#include "mips/assembler-mips-inl.h" +#include "code.h" // must be after assembler_*.h +#include "mips/macro-assembler-mips.h" #else #error Unsupported target architecture. #endif diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h new file mode 100644 index 000000000..2e634617c --- /dev/null +++ b/src/mips/assembler-mips-inl.h @@ -0,0 +1,215 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2010 the V8 project authors. All rights reserved. + + +#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_ +#define V8_MIPS_ASSEMBLER_MIPS_INL_H_ + +#include "mips/assembler-mips.h" +#include "cpu.h" + + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// Condition + +Condition NegateCondition(Condition cc) { + ASSERT(cc != cc_always); + return static_cast(cc ^ 1); +} + + +// ----------------------------------------------------------------------------- +// Operand and MemOperand + +Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) { + rm_ = no_reg; + imm32_ = immediate; + rmode_ = rmode; +} + +Operand::Operand(const ExternalReference& f) { + rm_ = no_reg; + imm32_ = reinterpret_cast(f.address()); + rmode_ = RelocInfo::EXTERNAL_REFERENCE; +} + +Operand::Operand(const char* s) { + rm_ = no_reg; + imm32_ = reinterpret_cast(s); + rmode_ = RelocInfo::EMBEDDED_STRING; +} + +Operand::Operand(Smi* value) { + rm_ = no_reg; + imm32_ = reinterpret_cast(value); + rmode_ = RelocInfo::NONE; +} + +Operand::Operand(Register rm) { + rm_ = rm; +} + +bool Operand::is_reg() const { + return rm_.is_valid(); +} + + + +// ----------------------------------------------------------------------------- +// RelocInfo + +void RelocInfo::apply(intptr_t delta) { + // On MIPS we do not use pc relative addressing, so we don't need to patch the + // code here. +} + + +Address RelocInfo::target_address() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + return Assembler::target_address_at(pc_); +} + + +Address RelocInfo::target_address_address() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + return reinterpret_cast
(pc_); +} + + +void RelocInfo::set_target_address(Address target) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + Assembler::set_target_address_at(pc_, target); +} + + +Object* RelocInfo::target_object() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return reinterpret_cast(Assembler::target_address_at(pc_)); +} + + +Handle RelocInfo::target_object_handle(Assembler *origin) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return Handle(reinterpret_cast( + Assembler::target_address_at(pc_))); +} + + +Object** RelocInfo::target_object_address() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return reinterpret_cast(pc_); +} + + +void RelocInfo::set_target_object(Object* target) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + Assembler::set_target_address_at(pc_, reinterpret_cast
(target)); +} + + +Address* RelocInfo::target_reference_address() { + ASSERT(rmode_ == EXTERNAL_REFERENCE); + return reinterpret_cast(pc_); +} + + +Address RelocInfo::call_address() { + ASSERT(IsPatchedReturnSequence()); + // The 2 instructions offset assumes patched return sequence. + ASSERT(IsJSReturn(rmode())); + return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize); +} + + +void RelocInfo::set_call_address(Address target) { + ASSERT(IsPatchedReturnSequence()); + // The 2 instructions offset assumes patched return sequence. + ASSERT(IsJSReturn(rmode())); + Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; +} + + +Object* RelocInfo::call_object() { + return *call_object_address(); +} + + +Object** RelocInfo::call_object_address() { + ASSERT(IsPatchedReturnSequence()); + // The 2 instructions offset assumes patched return sequence. + ASSERT(IsJSReturn(rmode())); + return reinterpret_cast(pc_ + 2 * Assembler::kInstrSize); +} + + +void RelocInfo::set_call_object(Object* target) { + *call_object_address() = target; +} + + +bool RelocInfo::IsPatchedReturnSequence() { +#ifdef DEBUG + PrintF("%s - %d - %s : Checking for jal(r)", + __FILE__, __LINE__, __func__); +#endif + return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) && + (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) || + ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR)); +} + + +// ----------------------------------------------------------------------------- +// Assembler + + +void Assembler::CheckBuffer() { + if (buffer_space() <= kGap) { + GrowBuffer(); + } +} + + +void Assembler::emit(Instr x) { + CheckBuffer(); + *reinterpret_cast(pc_) = x; + pc_ += kInstrSize; +} + + +} } // namespace v8::internal + +#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_ diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc new file mode 100644 index 000000000..4a91624ed --- /dev/null +++ b/src/mips/assembler-mips.cc @@ -0,0 +1,1208 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2010 the V8 project authors. All rights reserved. + + +#include "v8.h" +#include "mips/assembler-mips-inl.h" +#include "serialize.h" + + +namespace v8 { +namespace internal { + + + +const Register no_reg = { -1 }; + +const Register zero_reg = { 0 }; +const Register at = { 1 }; +const Register v0 = { 2 }; +const Register v1 = { 3 }; +const Register a0 = { 4 }; +const Register a1 = { 5 }; +const Register a2 = { 6 }; +const Register a3 = { 7 }; +const Register t0 = { 8 }; +const Register t1 = { 9 }; +const Register t2 = { 10 }; +const Register t3 = { 11 }; +const Register t4 = { 12 }; +const Register t5 = { 13 }; +const Register t6 = { 14 }; +const Register t7 = { 15 }; +const Register s0 = { 16 }; +const Register s1 = { 17 }; +const Register s2 = { 18 }; +const Register s3 = { 19 }; +const Register s4 = { 20 }; +const Register s5 = { 21 }; +const Register s6 = { 22 }; +const Register s7 = { 23 }; +const Register t8 = { 24 }; +const Register t9 = { 25 }; +const Register k0 = { 26 }; +const Register k1 = { 27 }; +const Register gp = { 28 }; +const Register sp = { 29 }; +const Register s8_fp = { 30 }; +const Register ra = { 31 }; + + +const FPURegister no_creg = { -1 }; + +const FPURegister f0 = { 0 }; +const FPURegister f1 = { 1 }; +const FPURegister f2 = { 2 }; +const FPURegister f3 = { 3 }; +const FPURegister f4 = { 4 }; +const FPURegister f5 = { 5 }; +const FPURegister f6 = { 6 }; +const FPURegister f7 = { 7 }; +const FPURegister f8 = { 8 }; +const FPURegister f9 = { 9 }; +const FPURegister f10 = { 10 }; +const FPURegister f11 = { 11 }; +const FPURegister f12 = { 12 }; +const FPURegister f13 = { 13 }; +const FPURegister f14 = { 14 }; +const FPURegister f15 = { 15 }; +const FPURegister f16 = { 16 }; +const FPURegister f17 = { 17 }; +const FPURegister f18 = { 18 }; +const FPURegister f19 = { 19 }; +const FPURegister f20 = { 20 }; +const FPURegister f21 = { 21 }; +const FPURegister f22 = { 22 }; +const FPURegister f23 = { 23 }; +const FPURegister f24 = { 24 }; +const FPURegister f25 = { 25 }; +const FPURegister f26 = { 26 }; +const FPURegister f27 = { 27 }; +const FPURegister f28 = { 28 }; +const FPURegister f29 = { 29 }; +const FPURegister f30 = { 30 }; +const FPURegister f31 = { 31 }; + +int ToNumber(Register reg) { + ASSERT(reg.is_valid()); + const int kNumbers[] = { + 0, // zero_reg + 1, // at + 2, // v0 + 3, // v1 + 4, // a0 + 5, // a1 + 6, // a2 + 7, // a3 + 8, // t0 + 9, // t1 + 10, // t2 + 11, // t3 + 12, // t4 + 13, // t5 + 14, // t6 + 15, // t7 + 16, // s0 + 17, // s1 + 18, // s2 + 19, // s3 + 20, // s4 + 21, // s5 + 22, // s6 + 23, // s7 + 24, // t8 + 25, // t9 + 26, // k0 + 27, // k1 + 28, // gp + 29, // sp + 30, // s8_fp + 31, // ra + }; + return kNumbers[reg.code()]; +} + +Register ToRegister(int num) { + ASSERT(num >= 0 && num < kNumRegisters); + const Register kRegisters[] = { + zero_reg, + at, + v0, v1, + a0, a1, a2, a3, + t0, t1, t2, t3, t4, t5, t6, t7, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, + k0, k1, + gp, + sp, + s8_fp, + ra + }; + return kRegisters[num]; +} + + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo. + +const int RelocInfo::kApplyMask = 0; + +// Patch the code at the current address with the supplied instructions. +void RelocInfo::PatchCode(byte* instructions, int instruction_count) { + Instr* pc = reinterpret_cast(pc_); + Instr* instr = reinterpret_cast(instructions); + for (int i = 0; i < instruction_count; i++) { + *(pc + i) = *(instr + i); + } + + // Indicate that code has changed. + CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); +} + + +// Patch the code at the current PC with a call to the target address. +// Additional guard instructions can be added if required. +void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { + // Patch the code at the current address with a call to the target. + UNIMPLEMENTED_MIPS(); +} + + +// ----------------------------------------------------------------------------- +// Implementation of Operand and MemOperand. +// See assembler-mips-inl.h for inlined constructors. + +Operand::Operand(Handle handle) { + rm_ = no_reg; + // Verify all Objects referred by code are NOT in new space. + Object* obj = *handle; + ASSERT(!Heap::InNewSpace(obj)); + if (obj->IsHeapObject()) { + imm32_ = reinterpret_cast(handle.location()); + rmode_ = RelocInfo::EMBEDDED_OBJECT; + } else { + // No relocation needed. + imm32_ = reinterpret_cast(obj); + rmode_ = RelocInfo::NONE; + } +} + +MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) { + offset_ = offset; +} + + +// ----------------------------------------------------------------------------- +// Implementation of Assembler. + +static const int kMinimalBufferSize = 4*KB; +static byte* spare_buffer_ = NULL; + +Assembler::Assembler(void* buffer, int buffer_size) { + if (buffer == NULL) { + // Do our own buffer management. + if (buffer_size <= kMinimalBufferSize) { + buffer_size = kMinimalBufferSize; + + if (spare_buffer_ != NULL) { + buffer = spare_buffer_; + spare_buffer_ = NULL; + } + } + if (buffer == NULL) { + buffer_ = NewArray(buffer_size); + } else { + buffer_ = static_cast(buffer); + } + buffer_size_ = buffer_size; + own_buffer_ = true; + + } else { + // Use externally provided buffer instead. + ASSERT(buffer_size > 0); + buffer_ = static_cast(buffer); + buffer_size_ = buffer_size; + own_buffer_ = false; + } + + // Setup buffer pointers. + ASSERT(buffer_ != NULL); + pc_ = buffer_; + reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + current_statement_position_ = RelocInfo::kNoPosition; + current_position_ = RelocInfo::kNoPosition; + written_statement_position_ = current_statement_position_; + written_position_ = current_position_; +} + + +Assembler::~Assembler() { + if (own_buffer_) { + if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { + spare_buffer_ = buffer_; + } else { + DeleteArray(buffer_); + } + } +} + + +void Assembler::GetCode(CodeDesc* desc) { + ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap + // Setup code descriptor. + desc->buffer = buffer_; + desc->buffer_size = buffer_size_; + desc->instr_size = pc_offset(); + desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); +} + + +// Labels refer to positions in the (to be) generated code. +// There are bound, linked, and unused labels. +// +// Bound labels refer to known positions in the already +// generated code. pos() is the position the label refers to. +// +// Linked labels refer to unknown positions in the code +// to be generated; pos() is the position of the last +// instruction using the label. + + +// The link chain is terminated by a negative code position (must be aligned). +const int kEndOfChain = -4; + +bool Assembler::is_branch(Instr instr) { + uint32_t opcode = ((instr & kOpcodeMask)); + uint32_t rt_field = ((instr & kRtFieldMask)); + uint32_t rs_field = ((instr & kRsFieldMask)); + // Checks if the instruction is a branch. + return opcode == BEQ || + opcode == BNE || + opcode == BLEZ || + opcode == BGTZ || + opcode == BEQL || + opcode == BNEL || + opcode == BLEZL || + opcode == BGTZL|| + (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || + rt_field == BLTZAL || rt_field == BGEZAL)) || + (opcode == COP1 && rs_field == BC1); // Coprocessor branch. +} + + +int Assembler::target_at(int32_t pos) { + Instr instr = instr_at(pos); + if ((instr & ~kImm16Mask) == 0) { + // Emitted label constant, not part of a branch. + return instr - (Code::kHeaderSize - kHeapObjectTag); + } + // Check we have a branch instruction. + ASSERT(is_branch(instr)); + // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming + // the compiler uses arithmectic shifts for signed integers. + int32_t imm18 = ((instr & + static_cast(kImm16Mask)) << 16) >> 14; + + return pos + kBranchPCOffset + imm18; +} + + +void Assembler::target_at_put(int32_t pos, int32_t target_pos) { + Instr instr = instr_at(pos); + if ((instr & ~kImm16Mask) == 0) { + ASSERT(target_pos == kEndOfChain || target_pos >= 0); + // Emitted label constant, not part of a branch. + // Make label relative to Code* of generated Code object. + instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + return; + } + + ASSERT(is_branch(instr)); + int32_t imm18 = target_pos - (pos + kBranchPCOffset); + ASSERT((imm18 & 3) == 0); + + instr &= ~kImm16Mask; + int32_t imm16 = imm18 >> 2; + ASSERT(is_int16(imm16)); + + instr_at_put(pos, instr | (imm16 & kImm16Mask)); +} + + +void Assembler::print(Label* L) { + if (L->is_unused()) { + PrintF("unused label\n"); + } else if (L->is_bound()) { + PrintF("bound label to %d\n", L->pos()); + } else if (L->is_linked()) { + Label l = *L; + PrintF("unbound label"); + while (l.is_linked()) { + PrintF("@ %d ", l.pos()); + Instr instr = instr_at(l.pos()); + if ((instr & ~kImm16Mask) == 0) { + PrintF("value\n"); + } else { + PrintF("%d\n", instr); + } + next(&l); + } + } else { + PrintF("label in inconsistent state (pos = %d)\n", L->pos_); + } +} + + +void Assembler::bind_to(Label* L, int pos) { + ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position + while (L->is_linked()) { + int32_t fixup_pos = L->pos(); + next(L); // call next before overwriting link with target at fixup_pos + target_at_put(fixup_pos, pos); + } + L->bind_to(pos); + + // Keep track of the last bound label so we don't eliminate any instructions + // before a bound label. + if (pos > last_bound_pos_) + last_bound_pos_ = pos; +} + + +void Assembler::link_to(Label* L, Label* appendix) { + if (appendix->is_linked()) { + if (L->is_linked()) { + // Append appendix to L's list. + int fixup_pos; + int link = L->pos(); + do { + fixup_pos = link; + link = target_at(fixup_pos); + } while (link > 0); + ASSERT(link == kEndOfChain); + target_at_put(fixup_pos, appendix->pos()); + } else { + // L is empty, simply use appendix + *L = *appendix; + } + } + appendix->Unuse(); // appendix should not be used anymore +} + + +void Assembler::bind(Label* L) { + ASSERT(!L->is_bound()); // label can only be bound once + bind_to(L, pc_offset()); +} + + +void Assembler::next(Label* L) { + ASSERT(L->is_linked()); + int link = target_at(L->pos()); + if (link > 0) { + L->link_to(link); + } else { + ASSERT(link == kEndOfChain); + L->Unuse(); + } +} + + +// We have to use a temporary register for things that can be relocated even +// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction +// space. There is no guarantee that the relocated location can be similarly +// encoded. +bool Assembler::MustUseAt(RelocInfo::Mode rmode) { + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { + return Serializer::enabled(); + } else if (rmode == RelocInfo::NONE) { + return false; + } + return true; +} + + +void Assembler::GenInstrRegister(Opcode opcode, + Register rs, + Register rt, + Register rd, + uint16_t sa, + SecondaryField func) { + ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (rd.code() << kRdShift) | (sa << kSaShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + SecondaryField fmt, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func) { + ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); + Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift) + | (fd.code() << 6) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + SecondaryField fmt, + Register rt, + FPURegister fs, + FPURegister fd, + SecondaryField func) { + ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); + Instr instr = opcode | fmt | (rt.code() << kRtShift) + | (fs.code() << kFsShift) | (fd.code() << 6) | func; + emit(instr); +} + + +// Instructions with immediate value. +// Registers are in the order of the instruction encoding, from left to right. +void Assembler::GenInstrImmediate(Opcode opcode, + Register rs, + Register rt, + int32_t j) { + ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (j & kImm16Mask); + emit(instr); +} + + +void Assembler::GenInstrImmediate(Opcode opcode, + Register rs, + SecondaryField SF, + int32_t j) { + ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); + emit(instr); +} + + +void Assembler::GenInstrImmediate(Opcode opcode, + Register rs, + FPURegister ft, + int32_t j) { + ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) + | (j & kImm16Mask); + emit(instr); +} + + +// Registers are in the order of the instruction encoding, from left to right. +void Assembler::GenInstrJump(Opcode opcode, + uint32_t address) { + ASSERT(is_uint26(address)); + Instr instr = opcode | address; + emit(instr); +} + + +int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link + } else { + target_pos = kEndOfChain; + } + L->link_to(pc_offset()); + } + + int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); + return offset; +} + + +void Assembler::label_at_put(Label* L, int at_offset) { + int target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link + } else { + target_pos = kEndOfChain; + } + L->link_to(at_offset); + instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } +} + + +//------- Branch and jump instructions -------- + +void Assembler::b(int16_t offset) { + beq(zero_reg, zero_reg, offset); +} + + +void Assembler::bal(int16_t offset) { + bgezal(zero_reg, offset); +} + + +void Assembler::beq(Register rs, Register rt, int16_t offset) { + GenInstrImmediate(BEQ, rs, rt, offset); +} + + +void Assembler::bgez(Register rs, int16_t offset) { + GenInstrImmediate(REGIMM, rs, BGEZ, offset); +} + + +void Assembler::bgezal(Register rs, int16_t offset) { + GenInstrImmediate(REGIMM, rs, BGEZAL, offset); +} + + +void Assembler::bgtz(Register rs, int16_t offset) { + GenInstrImmediate(BGTZ, rs, zero_reg, offset); +} + + +void Assembler::blez(Register rs, int16_t offset) { + GenInstrImmediate(BLEZ, rs, zero_reg, offset); +} + + +void Assembler::bltz(Register rs, int16_t offset) { + GenInstrImmediate(REGIMM, rs, BLTZ, offset); +} + + +void Assembler::bltzal(Register rs, int16_t offset) { + GenInstrImmediate(REGIMM, rs, BLTZAL, offset); +} + + +void Assembler::bne(Register rs, Register rt, int16_t offset) { + GenInstrImmediate(BNE, rs, rt, offset); +} + + +void Assembler::j(int32_t target) { + ASSERT(is_uint28(target) && ((target & 3) == 0)); + GenInstrJump(J, target >> 2); +} + + +void Assembler::jr(Register rs) { + GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); +} + + +void Assembler::jal(int32_t target) { + ASSERT(is_uint28(target) && ((target & 3) == 0)); + GenInstrJump(JAL, target >> 2); +} + + +void Assembler::jalr(Register rs, Register rd) { + GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); +} + + +//-------Data-processing-instructions--------- + +// Arithmetic. + +void Assembler::add(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD); +} + + +void Assembler::addu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); +} + + +void Assembler::addi(Register rd, Register rs, int32_t j) { + GenInstrImmediate(ADDI, rs, rd, j); +} + + +void Assembler::addiu(Register rd, Register rs, int32_t j) { + GenInstrImmediate(ADDIU, rs, rd, j); +} + + +void Assembler::sub(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB); +} + + +void Assembler::subu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); +} + + +void Assembler::mul(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); +} + + +void Assembler::mult(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); +} + + +void Assembler::multu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); +} + + +void Assembler::div(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); +} + + +void Assembler::divu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); +} + + +// Logical. + +void Assembler::and_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); +} + + +void Assembler::andi(Register rt, Register rs, int32_t j) { + GenInstrImmediate(ANDI, rs, rt, j); +} + + +void Assembler::or_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); +} + + +void Assembler::ori(Register rt, Register rs, int32_t j) { + GenInstrImmediate(ORI, rs, rt, j); +} + + +void Assembler::xor_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); +} + + +void Assembler::xori(Register rt, Register rs, int32_t j) { + GenInstrImmediate(XORI, rs, rt, j); +} + + +void Assembler::nor(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); +} + + +// Shifts. +void Assembler::sll(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); +} + + +void Assembler::sllv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); +} + + +void Assembler::srl(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL); +} + + +void Assembler::srlv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV); +} + + +void Assembler::sra(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA); +} + + +void Assembler::srav(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); +} + + +//------------Memory-instructions------------- + +void Assembler::lb(Register rd, const MemOperand& rs) { + GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); +} + + +void Assembler::lbu(Register rd, const MemOperand& rs) { + GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); +} + + +void Assembler::lw(Register rd, const MemOperand& rs) { + GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); +} + + +void Assembler::sb(Register rd, const MemOperand& rs) { + GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); +} + + +void Assembler::sw(Register rd, const MemOperand& rs) { + GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); +} + + +void Assembler::lui(Register rd, int32_t j) { + GenInstrImmediate(LUI, zero_reg, rd, j); +} + + +//-------------Misc-instructions-------------- + +// Break / Trap instructions. +void Assembler::break_(uint32_t code) { + ASSERT((code & ~0xfffff) == 0); + Instr break_instr = SPECIAL | BREAK | (code << 6); + emit(break_instr); +} + + +void Assembler::tge(Register rs, Register rt, uint16_t code) { + ASSERT(is_uint10(code)); + Instr instr = SPECIAL | TGE | rs.code() << kRsShift + | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tgeu(Register rs, Register rt, uint16_t code) { + ASSERT(is_uint10(code)); + Instr instr = SPECIAL | TGEU | rs.code() << kRsShift + | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tlt(Register rs, Register rt, uint16_t code) { + ASSERT(is_uint10(code)); + Instr instr = + SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tltu(Register rs, Register rt, uint16_t code) { + ASSERT(is_uint10(code)); + Instr instr = SPECIAL | TLTU | rs.code() << kRsShift + | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::teq(Register rs, Register rt, uint16_t code) { + ASSERT(is_uint10(code)); + Instr instr = + SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tne(Register rs, Register rt, uint16_t code) { + ASSERT(is_uint10(code)); + Instr instr = + SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +// Move from HI/LO register. + +void Assembler::mfhi(Register rd) { + GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI); +} + + +void Assembler::mflo(Register rd) { + GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO); +} + + +// Set on less than instructions. +void Assembler::slt(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT); +} + + +void Assembler::sltu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU); +} + + +void Assembler::slti(Register rt, Register rs, int32_t j) { + GenInstrImmediate(SLTI, rs, rt, j); +} + + +void Assembler::sltiu(Register rt, Register rs, int32_t j) { + GenInstrImmediate(SLTIU, rs, rt, j); +} + + +//--------Coprocessor-instructions---------------- + +// Load, store, move. +void Assembler::lwc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); +} + + +void Assembler::ldc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); +} + + +void Assembler::swc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); +} + + +void Assembler::sdc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(SDC1, src.rm(), fd, src.offset_); +} + + +void Assembler::mtc1(FPURegister fs, Register rt) { + GenInstrRegister(COP1, MTC1, rt, fs, f0); +} + + +void Assembler::mthc1(FPURegister fs, Register rt) { + GenInstrRegister(COP1, MTHC1, rt, fs, f0); +} + + +void Assembler::mfc1(FPURegister fs, Register rt) { + GenInstrRegister(COP1, MFC1, rt, fs, f0); +} + + +void Assembler::mfhc1(FPURegister fs, Register rt) { + GenInstrRegister(COP1, MFHC1, rt, fs, f0); +} + + +// Conversions. + +void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S); +} + + +void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D); +} + + +void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); +} + + +void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); +} + + +void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); +} + + +void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); +} + + +void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); +} + + +void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); +} + + +void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); +} + + +void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); +} + + +// Conditions. +void Assembler::c(FPUCondition cond, SecondaryField fmt, + FPURegister ft, FPURegister fs, uint16_t cc) { + ASSERT(is_uint3(cc)); + ASSERT((fmt & ~(31 << kRsShift)) == 0); + Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift + | cc << 8 | 3 << 4 | cond; + emit(instr); +} + + +void Assembler::bc1f(int16_t offset, uint16_t cc) { + ASSERT(is_uint3(cc)); + Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); + emit(instr); +} + + +void Assembler::bc1t(int16_t offset, uint16_t cc) { + ASSERT(is_uint3(cc)); + Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); + emit(instr); +} + + +// Debugging. +void Assembler::RecordJSReturn() { + WriteRecordedPositions(); + CheckBuffer(); + RecordRelocInfo(RelocInfo::JS_RETURN); +} + + +void Assembler::RecordComment(const char* msg) { + if (FLAG_debug_code) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast(msg)); + } +} + + +void Assembler::RecordPosition(int pos) { + if (pos == RelocInfo::kNoPosition) return; + ASSERT(pos >= 0); + current_position_ = pos; +} + + +void Assembler::RecordStatementPosition(int pos) { + if (pos == RelocInfo::kNoPosition) return; + ASSERT(pos >= 0); + current_statement_position_ = pos; +} + + +void Assembler::WriteRecordedPositions() { + // Write the statement position if it is different from what was written last + // time. + if (current_statement_position_ != written_statement_position_) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_); + written_statement_position_ = current_statement_position_; + } + + // Write the position if it is different from what was written last time and + // also different from the written statement position. + if (current_position_ != written_position_ && + current_position_ != written_statement_position_) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::POSITION, current_position_); + written_position_ = current_position_; + } +} + + +void Assembler::GrowBuffer() { + if (!own_buffer_) FATAL("external code buffer is too small"); + + // Compute new buffer size. + CodeDesc desc; // the new buffer + if (buffer_size_ < 4*KB) { + desc.buffer_size = 4*KB; + } else if (buffer_size_ < 1*MB) { + desc.buffer_size = 2*buffer_size_; + } else { + desc.buffer_size = buffer_size_ + 1*MB; + } + CHECK_GT(desc.buffer_size, 0); // no overflow + + // Setup new buffer. + desc.buffer = NewArray(desc.buffer_size); + + desc.instr_size = pc_offset(); + desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); + + // Copy the data. + int pc_delta = desc.buffer - buffer_; + int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); + memmove(desc.buffer, buffer_, desc.instr_size); + memmove(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.pos(), desc.reloc_size); + + // Switch buffers. + DeleteArray(buffer_); + buffer_ = desc.buffer; + buffer_size_ = desc.buffer_size; + pc_ += pc_delta; + reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.last_pc() + pc_delta); + + + // On ia32 and ARM pc relative addressing is used, and we thus need to apply a + // shift by pc_delta. But on MIPS the target address it directly loaded, so + // we do not need to relocate here. + + ASSERT(!overflow()); +} + + +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { + RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants + if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) { + // Adjust code for new modes. + ASSERT(RelocInfo::IsJSReturn(rmode) + || RelocInfo::IsComment(rmode) + || RelocInfo::IsPosition(rmode)); + // These modes do not need an entry in the constant pool. + } + if (rinfo.rmode() != RelocInfo::NONE) { + // Don't record external references unless the heap will be serialized. + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !Serializer::enabled() && + !FLAG_debug_code) { + return; + } + ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here + reloc_info_writer.Write(&rinfo); + } +} + + +Address Assembler::target_address_at(Address pc) { + Instr instr1 = instr_at(pc); + Instr instr2 = instr_at(pc + kInstrSize); + // Check we have 2 instructions generated by li. + ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || + ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI || + (instr2 & kOpcodeMask) == ORI || + (instr2 & kOpcodeMask) == LUI))); + // Interpret these 2 instructions. + if (instr1 == nopInstr) { + if ((instr2 & kOpcodeMask) == ADDI) { + return reinterpret_cast
(((instr2 & kImm16Mask) << 16) >> 16); + } else if ((instr2 & kOpcodeMask) == ORI) { + return reinterpret_cast
(instr2 & kImm16Mask); + } else if ((instr2 & kOpcodeMask) == LUI) { + return reinterpret_cast
((instr2 & kImm16Mask) << 16); + } + } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) { + // 32 bits value. + return reinterpret_cast
( + (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask)); + } + + // We should never get here. + UNREACHABLE(); + return (Address)0x0; +} + + +void Assembler::set_target_address_at(Address pc, Address target) { + // On MIPS we need to patch the code to generate. + + // First check we have a li. + Instr instr2 = instr_at(pc + kInstrSize); +#ifdef DEBUG + Instr instr1 = instr_at(pc); + + // Check we have indeed the result from a li with MustUseAt true. + CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || + ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU || + (instr2 & kOpcodeMask)== ORI || + (instr2 & kOpcodeMask)== LUI))); +#endif + + + uint32_t rt_code = (instr2 & kRtFieldMask); + uint32_t* p = reinterpret_cast(pc); + uint32_t itarget = reinterpret_cast(target); + + if (is_int16(itarget)) { + // nop + // addiu rt zero_reg j + *p = nopInstr; + *(p+1) = ADDIU | rt_code | (itarget & LOMask); + } else if (!(itarget & HIMask)) { + // nop + // ori rt zero_reg j + *p = nopInstr; + *(p+1) = ORI | rt_code | (itarget & LOMask); + } else if (!(itarget & LOMask)) { + // nop + // lui rt (HIMask & itarget)>>16 + *p = nopInstr; + *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16); + } else { + // lui rt (HIMask & itarget)>>16 + // ori rt rt, (LOMask & itarget) + *p = LUI | rt_code | ((itarget & HIMask)>>16); + *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask); + } + + CPU::FlushICache(pc, 2 * sizeof(int32_t)); +} + + +} } // namespace v8::internal + diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h new file mode 100644 index 000000000..4f5ae3ebe --- /dev/null +++ b/src/mips/assembler-mips.h @@ -0,0 +1,663 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2010 the V8 project authors. All rights reserved. + + +#ifndef V8_MIPS_ASSEMBLER_MIPS_H_ +#define V8_MIPS_ASSEMBLER_MIPS_H_ + +#include +#include "assembler.h" +#include "constants-mips.h" +#include "serialize.h" + +using namespace assembler::mips; + +namespace v8 { +namespace internal { + +// CPU Registers. +// +// 1) We would prefer to use an enum, but enum values are assignment- +// compatible with int, which has caused code-generation bugs. +// +// 2) We would prefer to use a class instead of a struct but we don't like +// the register initialization to depend on the particular initialization +// order (which appears to be different on OS X, Linux, and Windows for the +// installed versions of C++ we tried). Using a struct permits C-style +// "initialization". Also, the Register objects cannot be const as this +// forces initialization stubs in MSVC, making us dependent on initialization +// order. +// +// 3) By not using an enum, we are possibly preventing the compiler from +// doing certain constant folds, which may significantly reduce the +// code generated for some assembly instructions (because they boil down +// to a few constants). If this is a problem, we could change the code +// such that we use an enum in optimized mode, and the struct in debug +// mode. This way we get the compile-time error checking in debug mode +// and best performance in optimized code. + + +// ----------------------------------------------------------------------------- +// Implementation of Register and FPURegister + +// Core register. +struct Register { + bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is(Register reg) const { return code_ == reg.code_; } + int code() const { + ASSERT(is_valid()); + return code_; + } + int bit() const { + ASSERT(is_valid()); + return 1 << code_; + } + + // Unfortunately we can't make this private in a struct. + int code_; +}; + +extern const Register no_reg; + +extern const Register zero_reg; +extern const Register at; +extern const Register v0; +extern const Register v1; +extern const Register a0; +extern const Register a1; +extern const Register a2; +extern const Register a3; +extern const Register t0; +extern const Register t1; +extern const Register t2; +extern const Register t3; +extern const Register t4; +extern const Register t5; +extern const Register t6; +extern const Register t7; +extern const Register s0; +extern const Register s1; +extern const Register s2; +extern const Register s3; +extern const Register s4; +extern const Register s5; +extern const Register s6; +extern const Register s7; +extern const Register t8; +extern const Register t9; +extern const Register k0; +extern const Register k1; +extern const Register gp; +extern const Register sp; +extern const Register s8_fp; +extern const Register ra; + +int ToNumber(Register reg); + +Register ToRegister(int num); + +// Coprocessor register. +struct FPURegister { + bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; } + bool is(FPURegister creg) const { return code_ == creg.code_; } + int code() const { + ASSERT(is_valid()); + return code_; + } + int bit() const { + ASSERT(is_valid()); + return 1 << code_; + } + + // Unfortunately we can't make this private in a struct. + int code_; +}; + +extern const FPURegister no_creg; + +extern const FPURegister f0; +extern const FPURegister f1; +extern const FPURegister f2; +extern const FPURegister f3; +extern const FPURegister f4; +extern const FPURegister f5; +extern const FPURegister f6; +extern const FPURegister f7; +extern const FPURegister f8; +extern const FPURegister f9; +extern const FPURegister f10; +extern const FPURegister f11; +extern const FPURegister f12; // arg +extern const FPURegister f13; +extern const FPURegister f14; // arg +extern const FPURegister f15; +extern const FPURegister f16; +extern const FPURegister f17; +extern const FPURegister f18; +extern const FPURegister f19; +extern const FPURegister f20; +extern const FPURegister f21; +extern const FPURegister f22; +extern const FPURegister f23; +extern const FPURegister f24; +extern const FPURegister f25; +extern const FPURegister f26; +extern const FPURegister f27; +extern const FPURegister f28; +extern const FPURegister f29; +extern const FPURegister f30; +extern const FPURegister f31; + + +// Returns the equivalent of !cc. +// Negation of the default no_condition (-1) results in a non-default +// no_condition value (-2). As long as tests for no_condition check +// for condition < 0, this will work as expected. +inline Condition NegateCondition(Condition cc); + +inline Condition ReverseCondition(Condition cc) { + switch (cc) { + case Uless: + return Ugreater; + case Ugreater: + return Uless; + case Ugreater_equal: + return Uless_equal; + case Uless_equal: + return Ugreater_equal; + case less: + return greater; + case greater: + return less; + case greater_equal: + return less_equal; + case less_equal: + return greater_equal; + default: + return cc; + }; +} + + +enum Hint { + no_hint = 0 +}; + +inline Hint NegateHint(Hint hint) { + return no_hint; +} + + +// ----------------------------------------------------------------------------- +// Machine instruction Operands. + +// Class Operand represents a shifter operand in data processing instructions. +class Operand BASE_EMBEDDED { + public: + // Immediate. + INLINE(explicit Operand(int32_t immediate, + RelocInfo::Mode rmode = RelocInfo::NONE)); + INLINE(explicit Operand(const ExternalReference& f)); + INLINE(explicit Operand(const char* s)); + INLINE(explicit Operand(Object** opp)); + INLINE(explicit Operand(Context** cpp)); + explicit Operand(Handle handle); + INLINE(explicit Operand(Smi* value)); + + // Register. + INLINE(explicit Operand(Register rm)); + + // Return true if this is a register operand. + INLINE(bool is_reg() const); + + Register rm() const { return rm_; } + + private: + Register rm_; + int32_t imm32_; // Valid if rm_ == no_reg + RelocInfo::Mode rmode_; + + friend class Assembler; + friend class MacroAssembler; +}; + + +// On MIPS we have only one adressing mode with base_reg + offset. +// Class MemOperand represents a memory operand in load and store instructions. +class MemOperand : public Operand { + public: + + explicit MemOperand(Register rn, int16_t offset = 0); + + private: + int16_t offset_; + + friend class Assembler; +}; + + +class Assembler : public Malloced { + public: + // Create an assembler. Instructions and relocation information are emitted + // into a buffer, with the instructions starting from the beginning and the + // relocation information starting from the end of the buffer. See CodeDesc + // for a detailed comment on the layout (globals.h). + // + // If the provided buffer is NULL, the assembler allocates and grows its own + // buffer, and buffer_size determines the initial buffer size. The buffer is + // owned by the assembler and deallocated upon destruction of the assembler. + // + // If the provided buffer is not NULL, the assembler uses the provided buffer + // for code generation and assumes its size to be buffer_size. If the buffer + // is too small, a fatal error occurs. No deallocation of the buffer is done + // upon destruction of the assembler. + Assembler(void* buffer, int buffer_size); + ~Assembler(); + + // GetCode emits any pending (non-emitted) code and fills the descriptor + // desc. GetCode() is idempotent; it returns the same result if no other + // Assembler functions are invoked in between GetCode() calls. + void GetCode(CodeDesc* desc); + + // Label operations & relative jumps (PPUM Appendix D). + // + // Takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // j(cc, &L); // forward branch to unbound label + // bind(&L); // bind label to the current pc + // j(cc, &L); // backward branch to bound label + // bind(&L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + void bind(Label* L); // binds an unbound label L to the current code position + + // Returns the branch offset to the given label from the current code position + // Links the label to the current position if it is still unbound + // Manages the jump elimination optimization if the second parameter is true. + int32_t branch_offset(Label* L, bool jump_elimination_allowed); + int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { + int32_t o = branch_offset(L, jump_elimination_allowed); + ASSERT((o & 3) == 0); // Assert the offset is aligned. + return o >> 2; + } + + // Puts a labels target address at the given position. + // The high 8 bits are set to zero. + void label_at_put(Label* L, int at_offset); + + // Size of an instruction. + static const int kInstrSize = sizeof(Instr); + + // Difference between address of current opcode and target address offset. + static const int kBranchPCOffset = 4; + + // Read/Modify the code target address in the branch/call instruction at pc. + static Address target_address_at(Address pc); + static void set_target_address_at(Address pc, Address target); + + // This sets the branch destination (which gets loaded at the call address). + // This is for calls and branches within generated code. + inline static void set_target_at(Address instruction_payload, + Address target) { + set_target_address_at(instruction_payload, target); + } + + // This sets the branch destination. + // This is for calls and branches to runtime code. + inline static void set_external_target_at(Address instruction_payload, + Address target) { + set_target_address_at(instruction_payload, target); + } + + static const int kCallTargetSize = 3 * kPointerSize; + static const int kExternalTargetSize = 3 * kPointerSize; + + // Distance between the instruction referring to the address of the call + // target and the return address. + static const int kCallTargetAddressOffset = 4 * kInstrSize; + + // Distance between start of patched return sequence and the emitted address + // to jump to. + static const int kPatchReturnSequenceAddressOffset = kInstrSize; + + + // --------------------------------------------------------------------------- + // Code generation. + + void nop() { sll(zero_reg, zero_reg, 0); } + + + //------- Branch and jump instructions -------- + // We don't use likely variant of instructions. + void b(int16_t offset); + void b(Label* L) { b(branch_offset(L, false)>>2); } + void bal(int16_t offset); + void bal(Label* L) { bal(branch_offset(L, false)>>2); } + + void beq(Register rs, Register rt, int16_t offset); + void beq(Register rs, Register rt, Label* L) { + beq(rs, rt, branch_offset(L, false) >> 2); + } + void bgez(Register rs, int16_t offset); + void bgezal(Register rs, int16_t offset); + void bgtz(Register rs, int16_t offset); + void blez(Register rs, int16_t offset); + void bltz(Register rs, int16_t offset); + void bltzal(Register rs, int16_t offset); + void bne(Register rs, Register rt, int16_t offset); + void bne(Register rs, Register rt, Label* L) { + bne(rs, rt, branch_offset(L, false)>>2); + } + + // Never use the int16_t b(l)cond version with a branch offset + // instead of using the Label* version. See Twiki for infos. + + // Jump targets must be in the current 256 MB-aligned region. ie 28 bits. + void j(int32_t target); + void jal(int32_t target); + void jalr(Register rs, Register rd = ra); + void jr(Register target); + + + //-------Data-processing-instructions--------- + + // Arithmetic. + void add(Register rd, Register rs, Register rt); + void addu(Register rd, Register rs, Register rt); + void sub(Register rd, Register rs, Register rt); + void subu(Register rd, Register rs, Register rt); + void mult(Register rs, Register rt); + void multu(Register rs, Register rt); + void div(Register rs, Register rt); + void divu(Register rs, Register rt); + void mul(Register rd, Register rs, Register rt); + + void addi(Register rd, Register rs, int32_t j); + void addiu(Register rd, Register rs, int32_t j); + + // Logical. + void and_(Register rd, Register rs, Register rt); + void or_(Register rd, Register rs, Register rt); + void xor_(Register rd, Register rs, Register rt); + void nor(Register rd, Register rs, Register rt); + + void andi(Register rd, Register rs, int32_t j); + void ori(Register rd, Register rs, int32_t j); + void xori(Register rd, Register rs, int32_t j); + void lui(Register rd, int32_t j); + + // Shifts. + void sll(Register rd, Register rt, uint16_t sa); + void sllv(Register rd, Register rt, Register rs); + void srl(Register rd, Register rt, uint16_t sa); + void srlv(Register rd, Register rt, Register rs); + void sra(Register rt, Register rd, uint16_t sa); + void srav(Register rt, Register rd, Register rs); + + + //------------Memory-instructions------------- + + void lb(Register rd, const MemOperand& rs); + void lbu(Register rd, const MemOperand& rs); + void lw(Register rd, const MemOperand& rs); + void sb(Register rd, const MemOperand& rs); + void sw(Register rd, const MemOperand& rs); + + + //-------------Misc-instructions-------------- + + // Break / Trap instructions. + void break_(uint32_t code); + void tge(Register rs, Register rt, uint16_t code); + void tgeu(Register rs, Register rt, uint16_t code); + void tlt(Register rs, Register rt, uint16_t code); + void tltu(Register rs, Register rt, uint16_t code); + void teq(Register rs, Register rt, uint16_t code); + void tne(Register rs, Register rt, uint16_t code); + + // Move from HI/LO register. + void mfhi(Register rd); + void mflo(Register rd); + + // Set on less than. + void slt(Register rd, Register rs, Register rt); + void sltu(Register rd, Register rs, Register rt); + void slti(Register rd, Register rs, int32_t j); + void sltiu(Register rd, Register rs, int32_t j); + + + //--------Coprocessor-instructions---------------- + + // Load, store, and move. + void lwc1(FPURegister fd, const MemOperand& src); + void ldc1(FPURegister fd, const MemOperand& src); + + void swc1(FPURegister fs, const MemOperand& dst); + void sdc1(FPURegister fs, const MemOperand& dst); + + // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be + // executed first, followed by the MTHC1. + void mtc1(FPURegister fs, Register rt); + void mthc1(FPURegister fs, Register rt); + void mfc1(FPURegister fs, Register rt); + void mfhc1(FPURegister fs, Register rt); + + // Conversion. + void cvt_w_s(FPURegister fd, FPURegister fs); + void cvt_w_d(FPURegister fd, FPURegister fs); + + void cvt_l_s(FPURegister fd, FPURegister fs); + void cvt_l_d(FPURegister fd, FPURegister fs); + + void cvt_s_w(FPURegister fd, FPURegister fs); + void cvt_s_l(FPURegister fd, FPURegister fs); + void cvt_s_d(FPURegister fd, FPURegister fs); + + void cvt_d_w(FPURegister fd, FPURegister fs); + void cvt_d_l(FPURegister fd, FPURegister fs); + void cvt_d_s(FPURegister fd, FPURegister fs); + + // Conditions and branches. + void c(FPUCondition cond, SecondaryField fmt, + FPURegister ft, FPURegister fs, uint16_t cc = 0); + + void bc1f(int16_t offset, uint16_t cc = 0); + void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } + void bc1t(int16_t offset, uint16_t cc = 0); + void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } + + + // Check the code size generated from label to here. + int InstructionsGeneratedSince(Label* l) { + return (pc_offset() - l->pos()) / kInstrSize; + } + + // Debugging. + + // Mark address of the ExitJSFrame code. + void RecordJSReturn(); + + // Record a comment relocation entry that can be used by a disassembler. + // Use --debug_code to enable. + void RecordComment(const char* msg); + + void RecordPosition(int pos); + void RecordStatementPosition(int pos); + void WriteRecordedPositions(); + + int32_t pc_offset() const { return pc_ - buffer_; } + int32_t current_position() const { return current_position_; } + int32_t current_statement_position() const { return current_position_; } + + // Check if there is less than kGap bytes available in the buffer. + // If this is the case, we need to grow the buffer before emitting + // an instruction or relocation information. + inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } + + // Get the number of bytes available in the buffer. + inline int available_space() const { return reloc_info_writer.pos() - pc_; } + + protected: + int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } + + // Read/patch instructions. + static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } + void instr_at_put(byte* pc, Instr instr) { + *reinterpret_cast(pc) = instr; + } + Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast(buffer_ + pos) = instr; + } + + // Check if an instruction is a branch of some kind. + bool is_branch(Instr instr); + + // Decode branch instruction at pos and return branch target pos. + int target_at(int32_t pos); + + // Patch branch instruction at pos to branch to given branch target pos. + void target_at_put(int32_t pos, int32_t target_pos); + + // Say if we need to relocate with this mode. + bool MustUseAt(RelocInfo::Mode rmode); + + // Record reloc info for current pc_. + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + private: + // Code buffer: + // The buffer into which code and relocation info are generated. + byte* buffer_; + int buffer_size_; + // True if the assembler owns the buffer, false if buffer is external. + bool own_buffer_; + + // Buffer size and constant pool distance are checked together at regular + // intervals of kBufferCheckInterval emitted bytes. + static const int kBufferCheckInterval = 1*KB/2; + + // Code generation. + // The relocation writer's position is at least kGap bytes below the end of + // the generated instructions. This is so that multi-instruction sequences do + // not have to check for overflow. The same is true for writes of large + // relocation info entries. + static const int kGap = 32; + byte* pc_; // The program counter - moves forward. + + // Relocation information generation. + // Each relocation is encoded as a variable size value. + static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; + RelocInfoWriter reloc_info_writer; + + // The bound position, before this we cannot do instruction elimination. + int last_bound_pos_; + + // Source position information. + int current_position_; + int current_statement_position_; + int written_position_; + int written_statement_position_; + + // Code emission. + inline void CheckBuffer(); + void GrowBuffer(); + inline void emit(Instr x); + + // Instruction generation. + // We have 3 different kind of encoding layout on MIPS. + // However due to many different types of objects encoded in the same fields + // we have quite a few aliases for each mode. + // Using the same structure to refer to Register and FPURegister would spare a + // few aliases, but mixing both does not look clean to me. + // Anyway we could surely implement this differently. + + void GenInstrRegister(Opcode opcode, + Register rs, + Register rt, + Register rd, + uint16_t sa = 0, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, + SecondaryField fmt, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, + SecondaryField fmt, + Register rt, + FPURegister fs, + FPURegister fd, + SecondaryField func = NULLSF); + + + void GenInstrImmediate(Opcode opcode, + Register rs, + Register rt, + int32_t j); + void GenInstrImmediate(Opcode opcode, + Register rs, + SecondaryField SF, + int32_t j); + void GenInstrImmediate(Opcode opcode, + Register r1, + FPURegister r2, + int32_t j); + + + void GenInstrJump(Opcode opcode, + uint32_t address); + + + // Labels. + void print(Label* L); + void bind_to(Label* L, int pos); + void link_to(Label* L, Label* appendix); + void next(Label* L); + + friend class RegExpMacroAssemblerMIPS; + friend class RelocInfo; +}; + +} } // namespace v8::internal + +#endif // V8_ARM_ASSEMBLER_MIPS_H_ + diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc new file mode 100644 index 000000000..3bd42ed6c --- /dev/null +++ b/src/mips/builtins-mips.cc @@ -0,0 +1,109 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +#include "v8.h" + +#include "codegen-inl.h" +#include "debug.h" +#include "runtime.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + + +void Builtins::Generate_Adaptor(MacroAssembler* masm, + CFunctionId id, + BuiltinExtraArguments extra_args) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_ArrayCode(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, + bool is_construct) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, false); +} + + +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, true); +} + + +void Builtins::Generate_FunctionCall(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_FunctionApply(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); +} + + +#undef __ + +} } // namespace v8::internal + diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h new file mode 100644 index 000000000..2a77715a3 --- /dev/null +++ b/src/mips/codegen-mips-inl.h @@ -0,0 +1,56 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_ +#define V8_MIPS_CODEGEN_MIPS_INL_H_ + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + +// Platform-specific inline functions. + +void DeferredCode::Jump() { __ b(&entry_label_); } + +void CodeGenerator::GenerateMathSin(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateMathCos(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_MIPS_CODEGEN_MIPS_INL_H_ + diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc new file mode 100644 index 000000000..5a27c2864 --- /dev/null +++ b/src/mips/codegen-mips.cc @@ -0,0 +1,501 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "v8.h" + +#include "bootstrapper.h" +#include "codegen-inl.h" +#include "debug.h" +#include "parser.h" +#include "register-allocator-inl.h" +#include "runtime.h" +#include "scopes.h" +#include "compiler.h" + + + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + + + +// ------------------------------------------------------------------------- +// Platform-specific DeferredCode functions. + + +void DeferredCode::SaveRegisters() { + UNIMPLEMENTED_MIPS(); +} + + +void DeferredCode::RestoreRegisters() { + UNIMPLEMENTED_MIPS(); +} + + +// ------------------------------------------------------------------------- +// CodeGenerator implementation + +CodeGenerator::CodeGenerator(MacroAssembler* masm) + : deferred_(8), + masm_(masm), + scope_(NULL), + frame_(NULL), + allocator_(NULL), + cc_reg_(cc_always), + state_(NULL), + function_return_is_shadowed_(false) { +} + + +// Calling conventions: +// s8_fp: caller's frame pointer +// sp: stack pointer +// a1: called JS function +// cp: callee's context + +void CodeGenerator::Generate(CompilationInfo* info, Mode mode) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitStatements(ZoneList* statements) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitBlock(Block* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::DeclareGlobals(Handle pairs) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitDeclaration(Declaration* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitIfStatement(IfStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitBreakStatement(BreakStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitWhileStatement(WhileStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitForStatement(ForStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitForInStatement(ForInStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitFunctionBoilerplateLiteral( + FunctionBoilerplateLiteral* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitConditional(Conditional* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitSlot(Slot* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitVariableProxy(VariableProxy* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitLiteral(Literal* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitAssignment(Assignment* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitThrow(Throw* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitProperty(Property* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitCall(Call* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitCallNew(CallNew* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateClassOf(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateValueOf(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateSetValueOf(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsSmi(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateLog(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +// This should generate code that performs a charCodeAt() call or returns +// undefined in order to trigger the slow case, Runtime_StringCharCodeAt. +// It is not yet implemented on ARM, so it always goes to the slow case. +void CodeGenerator::GenerateFastCharCodeAt(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsArray(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsConstructCall(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateArgumentsLength(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateArgumentsAccess(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateRandomPositiveSmi(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateObjectEquals(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsObject(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsFunction(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateIsUndetectableObject(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateStringAdd(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateSubString(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateStringCompare(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::GenerateRegExpExec(ZoneList* args) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitCallRuntime(CallRuntime* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitCountOperation(CountOperation* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitThisFunction(ThisFunction* node) { + UNIMPLEMENTED_MIPS(); +} + + +void CodeGenerator::VisitCompareOperation(CompareOperation* node) { + UNIMPLEMENTED_MIPS(); +} + + +#ifdef DEBUG +bool CodeGenerator::HasValidEntryRegisters() { return true; } +#endif + + +#undef __ +#define __ ACCESS_MASM(masm) + + +// On entry a0 and a1 are the things to be compared. On exit v0 is 0, +// positive or negative to indicate the result of the comparison. +void CompareStub::Generate(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x765); +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x790); +} + + +void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x808); +} + + +void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type) { + UNIMPLEMENTED_MIPS(); + __ break_(0x815); +} + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate) { + UNIMPLEMENTED_MIPS(); + __ break_(0x826); +} + +void CEntryStub::Generate(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x831); +} + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + UNIMPLEMENTED_MIPS(); + // Load a result. + __ li(v0, Operand(0x1234)); + __ jr(ra); + // Return + __ nop(); +} + + +// This stub performs an instanceof, calling the builtin function if +// necessary. Uses a1 for the object, a0 for the function that it may +// be an instance of (these are fetched from the stack). +void InstanceofStub::Generate(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x845); +} + + +void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x851); +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x857); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + UNIMPLEMENTED_MIPS(); + __ break_(0x863); +} + + +const char* CompareStub::GetName() { + UNIMPLEMENTED_MIPS(); + return NULL; // UNIMPLEMENTED RETURN +} + + +int CompareStub::MinorKey() { + // Encode the two parameters in a unique 16 bit value. + ASSERT(static_cast(cc_) >> 28 < (1 << 15)); + return (static_cast(cc_) >> 27) | (strict_ ? 1 : 0); +} + + +#undef __ + +} } // namespace v8::internal + diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h new file mode 100644 index 000000000..05138bc64 --- /dev/null +++ b/src/mips/codegen-mips.h @@ -0,0 +1,311 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef V8_MIPS_CODEGEN_MIPS_H_ +#define V8_MIPS_CODEGEN_MIPS_H_ + +namespace v8 { +namespace internal { + +// Forward declarations +class CompilationInfo; +class DeferredCode; +class RegisterAllocator; +class RegisterFile; + +enum InitState { CONST_INIT, NOT_CONST_INIT }; +enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; + + +// ------------------------------------------------------------------------- +// Code generation state + +// The state is passed down the AST by the code generator (and back up, in +// the form of the state of the label pair). It is threaded through the +// call stack. Constructing a state implicitly pushes it on the owning code +// generator's stack of states, and destroying one implicitly pops it. + +class CodeGenState BASE_EMBEDDED { + public: + // Create an initial code generator state. Destroying the initial state + // leaves the code generator with a NULL state. + explicit CodeGenState(CodeGenerator* owner); + + // Create a code generator state based on a code generator's current + // state. The new state has its own typeof state and pair of branch + // labels. + CodeGenState(CodeGenerator* owner, + JumpTarget* true_target, + JumpTarget* false_target); + + // Destroy a code generator state and restore the owning code generator's + // previous state. + ~CodeGenState(); + + TypeofState typeof_state() const { return typeof_state_; } + JumpTarget* true_target() const { return true_target_; } + JumpTarget* false_target() const { return false_target_; } + + private: + // The owning code generator. + CodeGenerator* owner_; + + // A flag indicating whether we are compiling the immediate subexpression + // of a typeof expression. + TypeofState typeof_state_; + + JumpTarget* true_target_; + JumpTarget* false_target_; + + // The previous state of the owning code generator, restored when + // this state is destroyed. + CodeGenState* previous_; +}; + + + +// ------------------------------------------------------------------------- +// CodeGenerator + +class CodeGenerator: public AstVisitor { + public: + // Compilation mode. Either the compiler is used as the primary + // compiler and needs to setup everything or the compiler is used as + // the secondary compiler for split compilation and has to handle + // bailouts. + enum Mode { + PRIMARY, + SECONDARY + }; + + // Takes a function literal, generates code for it. This function should only + // be called by compiler.cc. + static Handle MakeCode(CompilationInfo* info); + + // Printing of AST, etc. as requested by flags. + static void MakeCodePrologue(CompilationInfo* info); + + // Allocate and install the code. + static Handle MakeCodeEpilogue(MacroAssembler* masm, + Code::Flags flags, + CompilationInfo* info); + +#ifdef ENABLE_LOGGING_AND_PROFILING + static bool ShouldGenerateLog(Expression* type); +#endif + + static void SetFunctionInfo(Handle fun, + FunctionLiteral* lit, + bool is_toplevel, + Handle