# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel x87
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
host_arch = 'arm'
elif host_arch == 'aarch64':
host_arch = 'arm64'
+ elif host_arch == 'mips64':
+ host_arch = 'mips64el'
elif host_arch.startswith('mips'):
host_arch = 'mipsel'
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
+ (v8_target_arch=="mips64el" and host_arch!="mips64el") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset': 1,
'v8_use_mips_abi_hardfloat%': 'true',
# Default arch variant for MIPS.
- 'mips_arch_variant%': 'mips32r2',
+ 'mips_arch_variant%': 'r2',
'v8_enable_backtrace%': 0,
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
- ['mips_arch_variant=="mips32r2"', {
+ ['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
- ['mips_arch_variant=="mips32r1"', {
+ ['mips_arch_variant=="r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
'__mips_soft_float=1'
],
}],
- ['mips_arch_variant=="mips32r2"', {
+ ['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
],
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
- ['mips_arch_variant=="mips32r2"', {
+ ['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
- ['mips_arch_variant=="mips32r1"', {
+ ['mips_arch_variant=="r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="loongson"', {
'__mips_soft_float=1'
],
}],
- ['mips_arch_variant=="mips32r2"', {
+ ['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
}],
],
}], # v8_target_arch=="mipsel"
+ ['v8_target_arch=="mips64el"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_MIPS64',
+ ],
+ 'variables': {
+ 'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
+ },
+ 'conditions': [
+ ['mipscompiler=="yes"', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': ['-EL'],
+ 'ldflags': ['-EL'],
+ 'conditions': [
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'cflags': ['-mhard-float'],
+ 'ldflags': ['-mhard-float'],
+ }, {
+ 'cflags': ['-msoft-float'],
+ 'ldflags': ['-msoft-float'],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
+ 'ldflags': [
+ '-mips64r2', '-mabi=64',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'cflags': ['-mips3', '-Wa,-mips3'],
+ }],
+ ],
+ }],
+ ],
+ }],
+ [ 'v8_can_use_fpu_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'defines': [
+ '__mips_hard_float=1',
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }, {
+ 'defines': [
+ '__mips_soft_float=1'
+ ],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R2',],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'defines': ['_MIPS_ARCH_LOONGSON',],
+ }],
+ ],
+ }], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
#include "src/arm/assembler-arm-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h" // NOLINT
#else
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_MIPS64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "src/base/atomicops_internals_mips_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
+#include "src/base/atomicops_internals_mips64_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %5\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "move %2, %4\n" // tmp = new_value
+ "sc %2, %1\n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 temp, old;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %1, %2\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp, temp2;
+
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %2\n" // temp = *ptr
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ MemoryBarrier();
+ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+
+// 64-bit versions of the atomic ops.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %0, %5\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "move %2, %4\n" // tmp = new_value
+ "scd %2, %1\n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 temp, old;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %1, %2\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "scd %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp, temp2;
+
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %0, %2\n" // temp = *ptr
+ "daddu %1, %0, %3\n" // temp2 = temp + increment
+ "scd %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "daddu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ MemoryBarrier();
+ Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__mips64)
+#define V8_HOST_ARCH_MIPS64 1
+#define V8_HOST_ARCH_64_BIT 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
+#elif defined(__mips64)
+#define V8_TARGET_ARCH_MIPS64 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#else
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_MIPS64
+#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
+#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
+#error Target architecture mips64 is only supported on mips64 and x64 host
+#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
+#elif V8_TARGET_ARCH_MIPS64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
#else
#endif // !V8_LIBC_MSVCRT
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \
+ || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#if V8_OS_LINUX
size_t datalen_;
};
-#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
return false;
}
-#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#endif // V8_OS_LINUX
#endif // V8_OS_LINUX
-#elif V8_HOST_ARCH_MIPS
+#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
+#elif V8_HOST_ARCH_MIPS64
+ asm("break");
#elif V8_HOST_ARCH_IA32
#if defined(__native_client__)
asm("hlt");
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
+#define CODE_STUB_LIST_MIPS(V) \
+ V(RegExpCEntry) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#elif V8_TARGET_ARCH_MIPS64
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry) \
#include "src/arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/code-stubs-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/code-stubs-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/code-stubs-x87.h"
#else
#include "src/arm/codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/codegen-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/codegen-x87.h" // NOLINT
#else
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#ifdef V8_TARGET_ARCH_ARM64
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_INT(sim_stack_size, 2 * MB / KB,
- "Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
+ "Stack size of the ARM64 and MIPS64 simulator "
+ "in kBytes (default is 2 MB)")
DEFINE_BOOL(log_regs_modified, true,
"When logging register values, only print modified registers.")
DEFINE_BOOL(log_colour, true, "When logging, try to use coloured output.")
#include "src/arm/frames-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/frames-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/frames-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/frames-x87.h" // NOLINT
#else
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 120;
+#elif V8_TARGET_ARCH_MIPS64
+ static const int kCodeSizeMultiplier = 149;
+ static const int kBootCodeSizeMultiplier = 120;
#else
#error Unsupported target architecture.
#endif
Label* if_true,
Label* if_false,
Label* fall_through);
+#elif V8_TARGET_ARCH_MIPS64
+ void Split(Condition cc,
+ Register lhs,
+ const Operand& rhs,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
#else // All non-mips arch.
void Split(Condition cc,
Label* if_true,
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS
UNIMPLEMENTED();
+#elif V8_TARGET_ARCH_MIPS64
+ UNIMPLEMENTED();
#else
#error Unsupported target architecture.
#endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
+#define USE_SIMULATOR 1
+#endif
#endif
// Determine whether the architecture uses an out-of-line constant pool.
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#endif
+#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+#endif
namespace v8 {
namespace internal {
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else
#include "src/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-codegen-x87.h" // NOLINT
#else
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
Simulator::Initialize(this);
#endif
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
#endif
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
+ V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
+ V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
V(bool, simulator_initialized, false) \
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
simulator_(NULL),
#endif
next_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
FIELD_ACCESSOR(Simulator*, simulator)
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
Simulator* simulator_;
#endif
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
#else
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_MIPS64
+ RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_X87
RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/lithium-arm64.h" // NOLINT
#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT
#include "src/mips/assembler-mips-inl.h"
#include "src/code.h" // must be after assembler_*.h
#include "src/mips/macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/constants-mips64.h"
+#include "src/assembler.h"
+#include "src/mips64/assembler-mips64.h" // NOLINT
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/assembler.h"
#include "src/x87/assembler-x87.h"
--- /dev/null
+plind44@gmail.com
+gergely@homejinni.com
+palfia@homejinni.com
+kilvadyb@homejinni.com
+Dusan.Milosavljevic@rt-rk.com
--- /dev/null
+
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+
+#include "src/mips64/assembler-mips64.h"
+
+#include "src/assembler.h"
+#include "src/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand.
+
+Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm64_ = immediate;
+ rmode_ = rmode;
+}
+
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm64_ = reinterpret_cast<int64_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm64_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+}
+
+
+bool Operand::is_reg() const {
+ return rm_.is_valid();
+}
+
+
+int Register::NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+}
+
+
+int DoubleRegister::NumRegisters() {
+ return FPURegister::kMaxNumRegisters;
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+ return FPURegister::kMaxNumAllocatableRegisters;
+}
+
+
+int FPURegister::ToAllocationIndex(FPURegister reg) {
+ ASSERT(reg.code() % 2 == 0);
+ ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
+ ASSERT(reg.is_valid());
+ ASSERT(!reg.is(kDoubleRegZero));
+ ASSERT(!reg.is(kLithiumScratchDouble));
+ return (reg.code() / 2);
+}
+
+
+// -----------------------------------------------------------------------------
+// RelocInfo.
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ byte* p = reinterpret_cast<byte*>(pc_);
+ int count = Assembler::RelocateInternalReference(p, delta);
+ CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
+ }
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT ||
+ rmode_ == EXTERNAL_REFERENCE);
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ // return reinterpret_cast<Address>(
+ // pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+ return reinterpret_cast<Address>(
+ pc_ + Assembler::kInstructionsFor64BitConstant * Assembler::kInstrSize);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+int RelocInfo::target_address_size() {
+ return Assembler::kSpecialTargetSize;
+}
+
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
+ Memory::Address_at(pc_) = address;
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
+}
+
+
+static const int kNoCodeAgeSequenceLength = 9 * Assembler::kInstrSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ host_,
+ stub->instruction_start());
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Object* RelocInfo::call_object() {
+ return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ return reinterpret_cast<Object**>(pc_ + 6 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ Instr instr0 = Assembler::instr_at(pc_); // lui.
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); // ori.
+ Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize); // dsll.
+ Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize); // ori.
+ Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); // jalr.
+
+ bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
+ (instr1 & kOpcodeMask) == ORI &&
+ (instr2 & kFunctionFieldMask) == DSLL &&
+ (instr3 & kOpcodeMask) == ORI &&
+ (instr4 & kFunctionFieldMask) == JALR);
+ return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instr current_instr = Assembler::instr_at(pc_);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+
+void Assembler::CheckTrampolinePoolQuick() {
+ if (pc_offset() >= next_buffer_check_) {
+ CheckTrampolinePool();
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
+}
+
+
+void Assembler::emit(uint64_t x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ *reinterpret_cast<uint64_t*>(pc_) = x;
+ pc_ += kInstrSize * 2;
+ CheckTrampolinePoolQuick();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/base/cpu.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
+// can be defined to enable FPU instructions when building the
+// snapshot.
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
+#ifdef CAN_USE_FPU_INSTRUCTIONS
+ answer |= 1u << FPU;
+#endif // def CAN_USE_FPU_INSTRUCTIONS
+
+ // If the compiler is allowed to use FPU then we can use FPU too in our code
+ // generation even when generating snapshots. This won't work for cross
+ // compilation.
+#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
+ answer |= 1u << FPU;
+#endif
+
+ return answer;
+}
+
+
+const char* DoubleRegister::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "f0",
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26"
+ };
+ return names[index];
+}
+
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+ // If the compiler is allowed to use fpu then we can use fpu too in our
+ // code generation.
+#ifndef __mips__
+ // For the simulator build, use FPU.
+ supported_ |= 1u << FPU;
+#else
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#endif
+}
+
+
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
+int ToNumber(Register reg) {
+ ASSERT(reg.is_valid());
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // at
+ 2, // v0
+ 3, // v1
+ 4, // a0
+ 5, // a1
+ 6, // a2
+ 7, // a3
+ 8, // a4
+ 9, // a5
+ 10, // a6
+ 11, // a7
+ 12, // t0
+ 13, // t1
+ 14, // t2
+ 15, // t3
+ 16, // s0
+ 17, // s1
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // t8
+ 25, // t9
+ 26, // k0
+ 27, // k1
+ 28, // gp
+ 29, // sp
+ 30, // fp
+ 31, // ra
+ };
+ return kNumbers[reg.code()];
+}
+
+
+Register ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg,
+ at,
+ v0, v1,
+ a0, a1, a2, a3, a4, a5, a6, a7,
+ t0, t1, t2, t3,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ fp,
+ ra
+ };
+ return kRegisters[num];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+ 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on MIPS means that it is a lui/ori instruction, and that is
+ // always the case inside code objects.
+ return true;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
+// Patch the code at the current address with the supplied instructions.
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-mips-inl.h for inlined constructors.
+
+Operand::Operand(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ imm64_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // No relocation needed.
+ imm64_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
+ }
+}
+
+
+MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
+ offset_ = offset;
+}
+
+
+MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
+ OffsetAddend offset_addend) : Operand(rm) {
+ offset_ = unit * multiplier + offset_addend;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+static const int kNegOffset = 0x00008000;
+// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
+ | (kRegister_sp_Code << kRtShift)
+ | (kPointerSize & kImm16Mask); // NOLINT
+// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
+ | (kRegister_sp_Code << kRtShift)
+ | (-kPointerSize & kImm16Mask); // NOLINT
+// sd(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
+ | (0 & kImm16Mask); // NOLINT
+// ld(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
+ | (0 & kImm16Mask); // NOLINT
+
+const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
+ | (0 & kImm16Mask); // NOLINT
+
+const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
+ | (0 & kImm16Mask); // NOLINT
+
+const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
+ | (kNegOffset & kImm16Mask); // NOLINT
+
+const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
+ | (kNegOffset & kImm16Mask); // NOLINT
+// A mask for the Rt register for push, pop, lw, sw instructions.
+const Instr kRtMask = kRtFieldMask;
+const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
+const Instr kLwSwOffsetMask = kImm16Mask;
+
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = FLAG_force_long_branches;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
+ // Set up code descriptor.
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+
+Register Assembler::GetRtReg(Instr instr) {
+ Register rt;
+ rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ return rt;
+}
+
+
+Register Assembler::GetRsReg(Instr instr) {
+ Register rs;
+ rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+ Register rd;
+ rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+ return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+ return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+ return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+ return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+ return instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+ return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+ return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+ return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetFunction(Instr instr) {
+ return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+ return instr & kFunctionFieldMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+ return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+ return instr & ~kImm16Mask;
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+ return (instr & ~kRtMask) == kPopRegPattern;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+ return (instr & ~kRtMask) == kPushRegPattern;
+}
+
+
+bool Assembler::IsSwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsSwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kSwRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kLwRegFpNegOffsetPattern);
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a value in the instruction of -1,
+// which is an otherwise illegal value (branch -1 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
+
+const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
+
+
+bool Assembler::IsBranch(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ // Checks if the instruction is a branch.
+ return opcode == BEQ ||
+ opcode == BNE ||
+ opcode == BLEZ ||
+ opcode == BGTZ ||
+ opcode == BEQL ||
+ opcode == BNEL ||
+ opcode == BLEZL ||
+ opcode == BGTZL ||
+ (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
+ rt_field == BLTZAL || rt_field == BGEZAL)) ||
+ (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+}
+
+
+bool Assembler::IsEmittedConstant(Instr instr) {
+ uint32_t label_constant = GetLabelConst(instr);
+ return label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+
+bool Assembler::IsBeq(Instr instr) {
+ return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+ return GetOpcodeField(instr) == BNE;
+}
+
+
+bool Assembler::IsJump(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rd_field = GetRdField(instr);
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J || opcode == JAL ||
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J;
+}
+
+
+bool Assembler::IsJal(Instr instr) {
+ return GetOpcodeField(instr) == JAL;
+}
+
+
+bool Assembler::IsJr(Instr instr) {
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+}
+
+
+bool Assembler::IsJalr(Instr instr) {
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == ORI;
+}
+
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ ASSERT(type < 32);
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t function = GetFunctionField(instr);
+ uint32_t rt = GetRt(instr);
+ uint32_t rd = GetRd(instr);
+ uint32_t sa = GetSa(instr);
+
+ // Traditional mips nop == sll(zero_reg, zero_reg, 0)
+ // When marking non-zero type, use sll(zero_reg, at, type)
+ // to avoid use of mips ssnop and ehb special encodings
+ // of the sll instruction.
+
+ Register nop_rt_reg = (type == 0) ? zero_reg : at;
+ bool ret = (opcode == SPECIAL && function == SLL &&
+ rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
+ sa == type);
+
+ return ret;
+}
+
+
+int32_t Assembler::GetBranchOffset(Instr instr) {
+ ASSERT(IsBranch(instr));
+ return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
+}
+
+
+bool Assembler::IsLw(Instr instr) {
+ return ((instr & kOpcodeMask) == LW);
+}
+
+
+int16_t Assembler::GetLwOffset(Instr instr) {
+ ASSERT(IsLw(instr));
+ return ((instr & kImm16Mask));
+}
+
+
+Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsLw(instr));
+
+ // We actually create a new lw instruction based on the original one.
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
+ | (offset & kImm16Mask);
+
+ return temp_instr;
+}
+
+
+bool Assembler::IsSw(Instr instr) {
+ return ((instr & kOpcodeMask) == SW);
+}
+
+
+Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsSw(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAddImmediate(Instr instr) {
+ return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
+}
+
+
+Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
+ ASSERT(IsAddImmediate(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAndImmediate(Instr instr) {
+ return GetOpcodeField(instr) == ANDI;
+}
+
+
+int64_t Assembler::target_at(int64_t pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
+ }
+ // Check we have a branch or jump instruction.
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ if (IsBranch(instr)) {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ if (imm18 == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + kBranchPCOffset + imm18;
+ }
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ ASSERT(IsOri(instr_ori2));
+
+ // TODO(plind) create named constants for shift values.
+ int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
+ imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
+ imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
+ // Sign extend address;
+ imm >>= 16;
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+ int64_t delta = instr_address - imm;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
+ } else {
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (imm28 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+ instr_address &= kImm28Mask;
+ int64_t delta = instr_address - imm28;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
+ }
+}
+
+
+void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code* of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+ if (IsBranch(instr)) {
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+ ASSERT((imm18 & 3) == 0);
+
+ instr &= ~kImm16Mask;
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ ASSERT(IsOri(instr_ori2));
+
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+ instr_ori2 &= ~kImm16Mask;
+
+ instr_at_put(pos + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> 32) & kImm16Mask));
+ instr_at_put(pos + 1 * Assembler::kInstrSize,
+ instr_ori | ((imm >> 16) & kImm16Mask));
+ instr_at_put(pos + 3 * Assembler::kInstrSize,
+ instr_ori2 | (imm & kImm16Mask));
+ } else {
+ uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
+
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ }
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~kImm16Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ PrintF("%d\n", instr);
+ }
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int32_t trampoline_pos = kInvalidSlotPos;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+
+ while (L->is_linked()) {
+ int32_t fixup_pos = L->pos();
+ int32_t dist = pos - fixup_pos;
+ next(L); // Call next before overwriting link with target at fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+ target_at_put(fixup_pos, trampoline_pos);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
+ }
+ target_at_put(fixup_pos, pos);
+ } else {
+ ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
+ target_at_put(fixup_pos, pos);
+ }
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // Label can only be bound once.
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else {
+ ASSERT(link >= 0);
+ L->link_to(link);
+ }
+}
+
+
+bool Assembler::is_near(Label* L) {
+ if (L->is_bound()) {
+ return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ }
+ return false;
+}
+
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsNone(rmode);
+}
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ Register rd,
+ uint16_t sa,
+ SecondaryField func) {
+ ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func) {
+ ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (msb << kRdShift) | (lsb << kSaShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
+ | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
+ Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+ Instr instr = opcode | fmt | (rt.code() << kRtShift)
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func) {
+ ASSERT(fs.is_valid() && rt.is_valid());
+ Instr instr =
+ opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
+ emit(instr);
+}
+
+
+// Instructions with immediate value.
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrImmediate(Opcode opcode,
+ Register rs,
+ Register rt,
+ int32_t j) {
+ ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (j & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+ Register rs,
+ SecondaryField SF,
+ int32_t j) {
+ ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+ Register rs,
+ FPURegister ft,
+ int32_t j) {
+ ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
+ | (j & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrJump(Opcode opcode,
+ uint32_t address) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ ASSERT(is_uint26(address));
+ Instr instr = opcode | address;
+ emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+
+uint64_t Assembler::jump_address(Label* L) {
+ int64_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ return imm;
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ ASSERT((offset & 3) == 0);
+ ASSERT(is_int16(offset >> 2));
+
+ return offset;
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ ASSERT((imm18 & 3) == 0);
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
+ } else {
+ target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ }
+ L->link_to(at_offset);
+ }
+}
+
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int16_t offset) {
+ beq(zero_reg, zero_reg, offset);
+}
+
+
+void Assembler::bal(int16_t offset) {
+ positions_recorder()->WriteRecordedPositions();
+ bgezal(zero_reg, offset);
+}
+
+
+void Assembler::beq(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenInstrImmediate(BEQ, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::bgez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::bgezal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::bgtz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::blez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::bltz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::bltzal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::bne(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenInstrImmediate(BNE, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::j(int64_t target) {
+#if DEBUG
+ // Get pc of delay slot.
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ ASSERT(in_range && ((target & 3) == 0));
+#endif
+ GenInstrJump(J, target >> 2);
+}
+
+
+void Assembler::jr(Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::jal(int64_t target) {
+#ifdef DEBUG
+ // Get pc of delay slot.
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ ASSERT(in_range && ((target & 3) == 0));
+#endif
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrJump(JAL, target >> 2);
+}
+
+
+void Assembler::jalr(Register rs, Register rd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+void Assembler::j_or_jr(int64_t target, Register rs) {
+ // Get pc of delay slot.
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ if (in_range) {
+ j(target);
+ } else {
+ jr(t9);
+ }
+}
+
+
+void Assembler::jal_or_jalr(int64_t target, Register rs) {
+ // Get pc of delay slot.
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+ (kImm26Bits+kImmFieldShift)) == 0;
+ if (in_range) {
+ jal(target);
+ } else {
+ jalr(t9);
+ }
+}
+
+
+// -------Data-processing-instructions---------
+
+// Arithmetic.
+
+void Assembler::addu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
+}
+
+
+void Assembler::addiu(Register rd, Register rs, int32_t j) {
+ GenInstrImmediate(ADDIU, rs, rd, j);
+}
+
+
+void Assembler::subu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
+}
+
+
+void Assembler::mul(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+}
+
+
+void Assembler::mult(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
+}
+
+
+void Assembler::multu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
+}
+
+
+void Assembler::daddiu(Register rd, Register rs, int32_t j) {
+ GenInstrImmediate(DADDIU, rs, rd, j);
+}
+
+
+void Assembler::div(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
+}
+
+
+void Assembler::divu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
+}
+
+
+void Assembler::daddu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
+}
+
+
+void Assembler::dsubu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
+}
+
+
+void Assembler::dmult(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
+}
+
+
+void Assembler::dmultu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
+}
+
+
+void Assembler::ddiv(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
+}
+
+
+void Assembler::ddivu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
+}
+
+
+// Logical.
+
+void Assembler::and_(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
+}
+
+
+void Assembler::andi(Register rt, Register rs, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(ANDI, rs, rt, j);
+}
+
+
+void Assembler::or_(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
+}
+
+
+void Assembler::ori(Register rt, Register rs, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(ORI, rs, rt, j);
+}
+
+
+void Assembler::xor_(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
+}
+
+
+void Assembler::xori(Register rt, Register rs, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(XORI, rs, rt, j);
+}
+
+
+void Assembler::nor(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
+}
+
+
+// Shifts.
+void Assembler::sll(Register rd,
+ Register rt,
+ uint16_t sa,
+ bool coming_from_nop) {
+ // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
+ // generated using the sll instruction. They must be generated using
+ // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
+ // instructions.
+ ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+}
+
+
+void Assembler::sllv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
+}
+
+
+void Assembler::srl(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+}
+
+
+void Assembler::srlv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
+}
+
+
+void Assembler::sra(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+}
+
+
+void Assembler::srav(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
+}
+
+
+void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ ASSERT(kArchVariant == kMips64r2);
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+ emit(instr);
+}
+
+
+void Assembler::rotrv(Register rd, Register rt, Register rs) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ ASSERT(kArchVariant == kMips64r2);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+ emit(instr);
+}
+
+
+void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
+}
+
+
+void Assembler::dsllv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
+}
+
+
+void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
+}
+
+
+void Assembler::dsrlv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
+}
+
+
+void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
+ emit(instr);
+}
+
+
+void Assembler::drotrv(Register rd, Register rt, Register rs) {
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
+ emit(instr);
+}
+
+
+void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
+}
+
+
+void Assembler::dsrav(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
+}
+
+
+void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
+}
+
+
+void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
+}
+
+
+void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
+}
+
+
+// ------------Memory-instructions-------------
+
+// Helper for base-reg + offset, when offset is larger than int16.
+void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+ ASSERT(!src.rm().is(at));
+ ASSERT(is_int32(src.offset_));
+ daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
+ dsll(at, at, kLuiShift);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+}
+
+
+void Assembler::lb(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lbu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lhu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lw(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lwu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lwl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lwr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sb(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sw(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::swl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::swr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lui(Register rd, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(LUI, zero_reg, rd, j);
+}
+
+
+void Assembler::ldl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::ldr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sdl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sdr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::ld(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sd(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ }
+}
+
+
+// -------------Misc-instructions--------------
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ ASSERT((code & ~0xfffff) == 0);
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ ASSERT((break_as_stop &&
+ code <= kMaxStopCode &&
+ code > kMaxWatchpointCode) ||
+ (!break_as_stop &&
+ (code > kMaxStopCode ||
+ code <= kMaxWatchpointCode)));
+ Instr break_instr = SPECIAL | BREAK | (code << 6);
+ emit(break_instr);
+}
+
+
+void Assembler::stop(const char* msg, uint32_t code) {
+ ASSERT(code > kMaxWatchpointCode);
+ ASSERT(code <= kMaxStopCode);
+#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
+ break_(0x54321);
+#else // V8_HOST_ARCH_MIPS
+ BlockTrampolinePoolFor(3);
+ // The Simulator will handle the stop instruction and get the message address.
+ // On MIPS stop() is just a special kind of break_().
+ break_(code, true);
+ emit(reinterpret_cast<uint64_t>(msg));
+#endif
+}
+
+
+void Assembler::tge(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr = SPECIAL | TGE | rs.code() << kRsShift
+ | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
+ | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tlt(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tltu(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TLTU | rs.code() << kRsShift
+ | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::teq(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tne(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+// Move from HI/LO register.
+
+void Assembler::mfhi(Register rd) {
+ GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
+}
+
+
+void Assembler::mflo(Register rd) {
+ GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
+}
+
+
+// Set on less than instructions.
+void Assembler::slt(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
+}
+
+
+void Assembler::sltu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
+}
+
+
+void Assembler::slti(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(SLTI, rs, rt, j);
+}
+
+
+void Assembler::sltiu(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(SLTIU, rs, rt, j);
+}
+
+
+// Conditional move.
+void Assembler::movz(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
+}
+
+
+void Assembler::movn(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
+}
+
+
+void Assembler::movt(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::movf(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+// Bit twiddling.
+void Assembler::clz(Register rd, Register rs) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+}
+
+
+void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ins.
+ // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
+}
+
+
+void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ext.
+ // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
+}
+
+
+void Assembler::pref(int32_t hint, const MemOperand& rs) {
+ ASSERT(kArchVariant != kLoongson);
+ ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
+ Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
+ | (rs.offset_);
+ emit(instr);
+}
+
+
+// --------Coprocessor-instructions----------------
+
+// Load, store, move.
+void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::swc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::mtc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTC1, rt, fs, f0);
+}
+
+
+void Assembler::mthc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
+void Assembler::dmtc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, DMTC1, rt, fs, f0);
+}
+
+
+void Assembler::mfc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFC1, rt, fs, f0);
+}
+
+
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
+void Assembler::dmfc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, DMFC1, rt, fs, f0);
+}
+
+
+void Assembler::ctc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CTC1, rt, fs);
+}
+
+
+void Assembler::cfc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CFC1, rt, fs);
+}
+
+
+void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
+ uint64_t i;
+ memcpy(&i, &d, 8);
+
+ *lo = i & 0xffffffff;
+ *hi = i >> 32;
+}
+
+
+// Arithmetic.
+
+void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
+}
+
+
+void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
+}
+
+
+void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
+}
+
+
+void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ ASSERT(kArchVariant != kLoongson);
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+}
+
+
+void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
+}
+
+
+void Assembler::abs_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
+}
+
+
+void Assembler::mov_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+}
+
+
+void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
+}
+
+
+void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
+}
+
+
+// Conversions.
+
+void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
+}
+
+
+void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
+}
+
+
+void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
+}
+
+
+void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
+}
+
+
+void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
+}
+
+
+void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
+}
+
+
+void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
+}
+
+
+void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
+}
+
+
+void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
+}
+
+
+void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
+}
+
+
+void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
+}
+
+
+void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
+}
+
+
+void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
+}
+
+
+void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
+}
+
+
+void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
+}
+
+
+void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
+}
+
+
+void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
+}
+
+
+void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
+}
+
+
+void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
+}
+
+
+void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
+}
+
+
+void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
+}
+
+
+void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
+}
+
+
+void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
+}
+
+
+void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
+}
+
+
+void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
+}
+
+
+void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
+}
+
+
+// Conditions.
+void Assembler::c(FPUCondition cond, SecondaryField fmt,
+ FPURegister fs, FPURegister ft, uint16_t cc) {
+ ASSERT(is_uint3(cc));
+ ASSERT((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
+ | cc << 8 | 3 << 4 | cond;
+ emit(instr);
+}
+
+
+void Assembler::fcmp(FPURegister src1, const double src2,
+ FPUCondition cond) {
+ ASSERT(src2 == 0.0);
+ mtc1(zero_reg, f14);
+ cvt_d_w(f14, f14);
+ c(cond, D, src1, f14, 0);
+}
+
+
+void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ ASSERT(is_uint3(cc));
+ Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ ASSERT(is_uint3(cc));
+ Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ ASSERT(IsJ(instr) || IsLui(instr));
+ if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ ASSERT(IsOri(instr_ori2));
+ // TODO(plind): symbolic names for the shifts.
+ int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
+ imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
+ imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
+ // Sign extend address.
+ imm >>= 16;
+
+ if (imm == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm += pc_delta;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+ instr_ori2 &= ~kImm16Mask;
+
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> 32) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm >> 16 & kImm16Mask));
+ instr_at_put(pc + 3 * Assembler::kInstrSize,
+ instr_ori2 | (imm & kImm16Mask));
+ return 4; // Number of instructions patched.
+ } else {
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
+
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // The new buffer.
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer_;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer_ + buffer_size_);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+ RelocateInternalReference(p, pc_delta);
+ }
+ }
+
+ ASSERT(!overflow());
+}
+
+
+void Assembler::db(uint8_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit_code_stub_address(Code* stub) {
+ CheckBuffer();
+ *reinterpret_cast<uint64_t*>(pc_) =
+ reinterpret_cast<uint64_t>(stub->instruction_start());
+ pc_ += sizeof(uint64_t);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, rmode, data, NULL);
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode));
+ // These modes do not need an entry in the constant pool.
+ }
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(pc_,
+ rmode,
+ RecordedAstId().ToInt(),
+ NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ ASSERT(!trampoline_emitted_);
+ ASSERT(unbound_labels_count_ >= 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop();
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint64_t imm64;
+ imm64 = jump_address(&after_pool);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and available
+ // to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ // TODO(plind): Verify this, presume I cannot use macro-assembler
+ // here.
+ lui(at, (imm64 >> 32) & kImm16Mask);
+ ori(at, at, (imm64 >> 16) & kImm16Mask);
+ dsll(at, at, 16);
+ ori(at, at, imm64 & kImm16Mask);
+ }
+ jr(at);
+ nop();
+ }
+ bind(&after_pool);
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ = pc_offset() +
+ kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ Instr instr3 = instr_at(pc + 3 * kInstrSize);
+
+ // Interpret 4 instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
+ (GetOpcodeField(instr3) == ORI)) {
+ // Assemble the 48 bit value.
+ int64_t addr = static_cast<int64_t>(
+ ((uint64_t)(GetImmediate16(instr0)) << 32) |
+ ((uint64_t)(GetImmediate16(instr1)) << 16) |
+ ((uint64_t)(GetImmediate16(instr3))));
+
+ // Sign extend to get canonical address.
+ addr = (addr << 16) >> 16;
+ return reinterpret_cast<Address>(addr);
+ }
+ // We should never get here, force a bad address if we do.
+ UNREACHABLE();
+ return (Address)0x0;
+}
+
+
+// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
+// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
+// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
+// OS::nan_value() returns a qNaN.
+void Assembler::QuietNaN(HeapObject* object) {
+ HeapNumber::cast(object)->set_value(base::OS::nan_value());
+}
+
+
+// On Mips64, a target address is stored in a 4-instruction sequence:
+// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+// 2: dsll(rd, rd, 16);
+// 3: ori(rd, rd, j.imm32_ & kImm16Mask);
+//
+// Patching the address must replace all the lui & ori instructions,
+// and flush the i-cache.
+//
+// There is an optimization below, which emits a nop when the address
+// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
+// and possibly removed.
+void Assembler::set_target_address_at(Address pc,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+// There is an optimization where only 4 instructions are used to load address
+// in code on MIP64 because only 48-bits of address is effectively used.
+// It relies on fact the upper [63:48] bits are not used for virtual address
+// translation and they have to be set according to value of bit 47 in order
+// get canonical address.
+ Instr instr1 = instr_at(pc + kInstrSize);
+ uint32_t rt_code = GetRt(instr1);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ uint64_t itarget = reinterpret_cast<uint64_t>(target);
+
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instr instr0 = instr_at(pc);
+ Instr instr3 = instr_at(pc + kInstrSize * 3);
+ CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
+ GetOpcodeField(instr3) == ORI));
+#endif
+
+ // Must use 4 instructions to insure patchable code.
+ // lui rt, upper-16.
+ // ori rt, rt, lower-16.
+ // dsll rt, rt, 16.
+ // ori rt rt, lower-16.
+ *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
+ *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
+ | ((itarget >> 16) & kImm16Mask);
+ *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
+ | (itarget & kImm16Mask);
+
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
+ }
+}
+
+
+void Assembler::JumpLabelToJumpRegister(Address pc) {
+ // Address pc points to lui/ori instructions.
+ // Jump to label may follow at pc + 2 * kInstrSize.
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+#ifdef DEBUG
+ Instr instr1 = instr_at(pc);
+#endif
+ Instr instr2 = instr_at(pc + 1 * kInstrSize);
+ Instr instr3 = instr_at(pc + 6 * kInstrSize);
+ bool patched = false;
+
+ if (IsJal(instr3)) {
+ ASSERT(GetOpcodeField(instr1) == LUI);
+ ASSERT(GetOpcodeField(instr2) == ORI);
+
+ uint32_t rs_field = GetRt(instr2) << kRsShift;
+ uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
+ *(p+6) = SPECIAL | rs_field | rd_field | JALR;
+ patched = true;
+ } else if (IsJ(instr3)) {
+ ASSERT(GetOpcodeField(instr1) == LUI);
+ ASSERT(GetOpcodeField(instr2) == ORI);
+
+ uint32_t rs_field = GetRt(instr2) << kRsShift;
+ *(p+6) = SPECIAL | rs_field | JR;
+ patched = true;
+ }
+
+ if (patched) {
+ CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
+ }
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
+#define V8_MIPS_ASSEMBLER_MIPS_H_
+
+#include <stdio.h>
+#include "src/assembler.h"
+#include "src/mips64/constants-mips64.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+// Core register.
+struct Register {
+ static const int kNumRegisters = v8::internal::kNumRegisters;
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
+ static const int kSizeInBytes = 8;
+ static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+
+ inline static int NumAllocatableRegisters();
+
+ static int ToAllocationIndex(Register reg) {
+ ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
+ reg.is(from_code(kCpRegister)));
+ return reg.is(from_code(kCpRegister)) ?
+ kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
+ reg.code() - 2; // zero_reg and 'at' are skipped.
+ }
+
+ static Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return index == kMaxNumAllocatableRegisters - 1 ?
+ from_code(kCpRegister) : // Last index is always the 'cp' register.
+ from_code(index + 2); // zero_reg and 'at' are skipped.
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "v0",
+ "v1",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ "a5",
+ "a6",
+ "a7",
+ "t0",
+ "t1",
+ "t2",
+ "s7",
+ };
+ return names[index];
+ }
+
+ static Register from_code(int code) {
+ Register r = { code };
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+#define REGISTER(N, C) \
+ const int kRegister_ ## N ## _Code = C; \
+ const Register N = { C }
+
+REGISTER(no_reg, -1);
+// Always zero.
+REGISTER(zero_reg, 0);
+// at: Reserved for synthetic instructions.
+REGISTER(at, 1);
+// v0, v1: Used when returning multiple values from subroutines.
+REGISTER(v0, 2);
+REGISTER(v1, 3);
+// a0 - a4: Used to pass non-FP parameters.
+REGISTER(a0, 4);
+REGISTER(a1, 5);
+REGISTER(a2, 6);
+REGISTER(a3, 7);
+// a4 - a7 t0 - t3: Can be used without reservation, act as temporary registers
+// and are allowed to be destroyed by subroutines.
+REGISTER(a4, 8);
+REGISTER(a5, 9);
+REGISTER(a6, 10);
+REGISTER(a7, 11);
+REGISTER(t0, 12);
+REGISTER(t1, 13);
+REGISTER(t2, 14);
+REGISTER(t3, 15);
+// s0 - s7: Subroutine register variables. Subroutines that write to these
+// registers must restore their values before exiting so that the caller can
+// expect the values to be preserved.
+REGISTER(s0, 16);
+REGISTER(s1, 17);
+REGISTER(s2, 18);
+REGISTER(s3, 19);
+REGISTER(s4, 20);
+REGISTER(s5, 21);
+REGISTER(s6, 22);
+REGISTER(s7, 23);
+REGISTER(t8, 24);
+REGISTER(t9, 25);
+// k0, k1: Reserved for system calls and interrupt handlers.
+REGISTER(k0, 26);
+REGISTER(k1, 27);
+// gp: Reserved.
+REGISTER(gp, 28);
+// sp: Stack pointer.
+REGISTER(sp, 29);
+// fp: Frame pointer.
+REGISTER(fp, 30);
+// ra: Return address pointer.
+REGISTER(ra, 31);
+
+#undef REGISTER
+
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Coprocessor register.
+struct FPURegister {
+ static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
+
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
+
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0.
+ // f28: 0.0
+ // f30: scratch register.
+ static const int kNumReservedRegisters = 2;
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
+ kNumReservedRegisters;
+
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
+ inline static int ToAllocationIndex(FPURegister reg);
+ static const char* AllocationIndexToString(int index);
+
+ static FPURegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return from_code(index * 2);
+ }
+
+ static FPURegister from_code(int code) {
+ FPURegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
+ bool is(FPURegister creg) const { return code_ == creg.code_; }
+ FPURegister low() const {
+ // TODO(plind): Create ASSERT for FR=0 mode. This usage suspect for FR=1.
+ // Find low reg of a Double-reg pair, which is the reg itself.
+ ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
+ FPURegister reg;
+ reg.code_ = code_;
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+ FPURegister high() const {
+ // TODO(plind): Create ASSERT for FR=0 mode. This usage illegal in FR=1.
+ // Find high reg of a Doubel-reg pair, which is reg + 1.
+ ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
+ FPURegister reg;
+ reg.code_ = code_ + 1;
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
+// 32-bit registers, f0 through f31. When used as 'double' they are used
+// in pairs, starting with the even numbered register. So a double operation
+// on f0 really uses f0 and f1.
+// (Modern mips hardware also supports 32 64-bit registers, via setting
+// (privileged) Status Register FR bit to 1. This is used by the N32 ABI,
+// but it is not in common use. Someday we will want to support this in v8.)
+
+// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
+typedef FPURegister DoubleRegister;
+typedef FPURegister FloatRegister;
+
+const FPURegister no_freg = { -1 };
+
+const FPURegister f0 = { 0 }; // Return value in hard float mode.
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+// Defined using #define instead of "static const Register&" because Clang
+// complains otherwise when a compilation unit that includes this header
+// doesn't use the variables.
+#define kRootRegister s6
+#define cp s7
+#define kLithiumScratchReg s3
+#define kLithiumScratchReg2 s4
+#define kLithiumScratchDouble f30
+#define kDoubleRegZero f28
+
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+ bool is_valid() const { return code_ == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
+const FPUControlRegister FCSR = { kFCSRRegister };
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand BASE_EMBEDDED {
+ public:
+ // Immediate.
+ INLINE(explicit Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE64));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // Register.
+ INLINE(explicit Operand(Register rm));
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ inline int64_t immediate() const {
+ ASSERT(!is_reg());
+ return imm64_;
+ }
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ int64_t imm64_; // Valid if rm_ == no_reg.
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// On MIPS we have only one adressing mode with base_reg + offset.
+// Class MemOperand represents a memory operand in load and store instructions.
+class MemOperand : public Operand {
+ public:
+ // Immediate value attached to offset.
+ enum OffsetAddend {
+ offset_minus_one = -1,
+ offset_zero = 0
+ };
+
+ explicit MemOperand(Register rn, int64_t offset = 0);
+ explicit MemOperand(Register rn, int64_t unit, int64_t multiplier,
+ OffsetAddend offset_addend = offset_zero);
+ int32_t offset() const { return offset_; }
+
+ bool OffsetIsInt16Encodable() const {
+ return is_int16(offset_);
+ }
+
+ private:
+ int32_t offset_;
+
+ friend class Assembler;
+};
+
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~Assembler() { }
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D).
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+ void bind(Label* L); // Binds an unbound label L to current code position.
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
+
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
+ // Manages the jump elimination optimization if the second parameter is true.
+ int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+ int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
+ int32_t o = branch_offset(L, jump_elimination_allowed);
+ ASSERT((o & 3) == 0); // Assert the offset is aligned.
+ return o >> 2;
+ }
+ uint64_t jump_address(Label* L);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ static Address target_address_at(Address pc);
+ static void set_target_address_at(Address pc,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ // On MIPS there is no Constant Pool so we skip that parameter.
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool)) {
+ return target_address_at(pc);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
+
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ static void JumpLabelToJumpRegister(Address pc);
+
+ static void QuietNaN(HeapObject* nan);
+
+ // This sets the branch destination (which gets loaded at the call address).
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(
+ instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
+ code,
+ target);
+ }
+
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Difference between address of current opcode and target address offset.
+ static const int kBranchPCOffset = 4;
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // MIPS platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static const int kSpecialTargetSize = 0;
+
+ // Number of consecutive instructions used to store 32bit/64bit constant.
+ // Before jump-optimizations, this constant was used in
+ // RelocInfo::target_address_address() function to tell serializer address of
+ // the instruction that follows LUI/ORI instruction pair. Now, with new jump
+ // optimization, where jump-through-register instruction that usually
+ // follows LUI/ORI pair is substituted with J/JAL, this constant equals
+ // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
+ static const int kInstructionsFor32BitConstant = 3;
+ static const int kInstructionsFor64BitConstant = 5;
+
+ // Distance between the instruction referring to the address of the call
+ // target and the return address.
+ static const int kCallTargetAddressOffset = 6 * kInstrSize;
+
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 0;
+
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 4;
+
+ static const int kPatchDebugBreakSlotReturnOffset = 6 * kInstrSize;
+
+ // Number of instructions used for the JS return sequence. The constant is
+ // used by the debugger to patch the JS return sequence.
+ static const int kJSReturnSequenceInstructions = 7;
+ static const int kDebugBreakSlotInstructions = 6;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstrSize;
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ // Code aging
+ CODE_AGE_MARKER_NOP = 6,
+ CODE_AGE_SEQUENCE_NOP
+ };
+
+ // Type == 0 is the default non-marking nop. For mips this is a
+ // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
+ // marking, to avoid conflict with ssnop and ehb instructions.
+ void nop(unsigned int type = 0) {
+ ASSERT(type < 32);
+ Register nop_rt_reg = (type == 0) ? zero_reg : at;
+ sll(zero_reg, nop_rt_reg, type, true);
+ }
+
+
+ // --------Branch-and-jump-instructions----------
+ // We don't use likely variant of instructions.
+ void b(int16_t offset);
+ void b(Label* L) { b(branch_offset(L, false)>>2); }
+ void bal(int16_t offset);
+ void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+
+ void beq(Register rs, Register rt, int16_t offset);
+ void beq(Register rs, Register rt, Label* L) {
+ beq(rs, rt, branch_offset(L, false) >> 2);
+ }
+ void bgez(Register rs, int16_t offset);
+ void bgezal(Register rs, int16_t offset);
+ void bgtz(Register rs, int16_t offset);
+ void blez(Register rs, int16_t offset);
+ void bltz(Register rs, int16_t offset);
+ void bltzal(Register rs, int16_t offset);
+ void bne(Register rs, Register rt, int16_t offset);
+ void bne(Register rs, Register rt, Label* L) {
+ bne(rs, rt, branch_offset(L, false)>>2);
+ }
+
+ // Never use the int16_t b(l)cond version with a branch offset
+ // instead of using the Label* version.
+
+ // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
+ void j(int64_t target);
+ void jal(int64_t target);
+ void jalr(Register rs, Register rd = ra);
+ void jr(Register target);
+ void j_or_jr(int64_t target, Register rs);
+ void jal_or_jalr(int64_t target, Register rs);
+
+
+ // -------Data-processing-instructions---------
+
+ // Arithmetic.
+ void addu(Register rd, Register rs, Register rt);
+ void subu(Register rd, Register rs, Register rt);
+ void mult(Register rs, Register rt);
+ void multu(Register rs, Register rt);
+ void div(Register rs, Register rt);
+ void divu(Register rs, Register rt);
+ void mul(Register rd, Register rs, Register rt);
+ void daddu(Register rd, Register rs, Register rt);
+ void dsubu(Register rd, Register rs, Register rt);
+ void dmult(Register rs, Register rt);
+ void dmultu(Register rs, Register rt);
+ void ddiv(Register rs, Register rt);
+ void ddivu(Register rs, Register rt);
+
+ void addiu(Register rd, Register rs, int32_t j);
+ void daddiu(Register rd, Register rs, int32_t j);
+
+ // Logical.
+ void and_(Register rd, Register rs, Register rt);
+ void or_(Register rd, Register rs, Register rt);
+ void xor_(Register rd, Register rs, Register rt);
+ void nor(Register rd, Register rs, Register rt);
+
+ void andi(Register rd, Register rs, int32_t j);
+ void ori(Register rd, Register rs, int32_t j);
+ void xori(Register rd, Register rs, int32_t j);
+ void lui(Register rd, int32_t j);
+
+ // Shifts.
+ // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
+ // and may cause problems in normal code. coming_from_nop makes sure this
+ // doesn't happen.
+ void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
+ void sllv(Register rd, Register rt, Register rs);
+ void srl(Register rd, Register rt, uint16_t sa);
+ void srlv(Register rd, Register rt, Register rs);
+ void sra(Register rt, Register rd, uint16_t sa);
+ void srav(Register rt, Register rd, Register rs);
+ void rotr(Register rd, Register rt, uint16_t sa);
+ void rotrv(Register rd, Register rt, Register rs);
+ void dsll(Register rd, Register rt, uint16_t sa);
+ void dsllv(Register rd, Register rt, Register rs);
+ void dsrl(Register rd, Register rt, uint16_t sa);
+ void dsrlv(Register rd, Register rt, Register rs);
+ void drotr(Register rd, Register rt, uint16_t sa);
+ void drotrv(Register rd, Register rt, Register rs);
+ void dsra(Register rt, Register rd, uint16_t sa);
+ void dsrav(Register rd, Register rt, Register rs);
+ void dsll32(Register rt, Register rd, uint16_t sa);
+ void dsrl32(Register rt, Register rd, uint16_t sa);
+ void dsra32(Register rt, Register rd, uint16_t sa);
+
+
+ // ------------Memory-instructions-------------
+
+ void lb(Register rd, const MemOperand& rs);
+ void lbu(Register rd, const MemOperand& rs);
+ void lh(Register rd, const MemOperand& rs);
+ void lhu(Register rd, const MemOperand& rs);
+ void lw(Register rd, const MemOperand& rs);
+ void lwu(Register rd, const MemOperand& rs);
+ void lwl(Register rd, const MemOperand& rs);
+ void lwr(Register rd, const MemOperand& rs);
+ void sb(Register rd, const MemOperand& rs);
+ void sh(Register rd, const MemOperand& rs);
+ void sw(Register rd, const MemOperand& rs);
+ void swl(Register rd, const MemOperand& rs);
+ void swr(Register rd, const MemOperand& rs);
+ void ldl(Register rd, const MemOperand& rs);
+ void ldr(Register rd, const MemOperand& rs);
+ void sdl(Register rd, const MemOperand& rs);
+ void sdr(Register rd, const MemOperand& rs);
+ void ld(Register rd, const MemOperand& rs);
+ void sd(Register rd, const MemOperand& rs);
+
+
+ // ----------------Prefetch--------------------
+
+ void pref(int32_t hint, const MemOperand& rs);
+
+
+ // -------------Misc-instructions--------------
+
+ // Break / Trap instructions.
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(const char* msg, uint32_t code = kMaxStopCode);
+ void tge(Register rs, Register rt, uint16_t code);
+ void tgeu(Register rs, Register rt, uint16_t code);
+ void tlt(Register rs, Register rt, uint16_t code);
+ void tltu(Register rs, Register rt, uint16_t code);
+ void teq(Register rs, Register rt, uint16_t code);
+ void tne(Register rs, Register rt, uint16_t code);
+
+ // Move from HI/LO register.
+ void mfhi(Register rd);
+ void mflo(Register rd);
+
+ // Set on less than.
+ void slt(Register rd, Register rs, Register rt);
+ void sltu(Register rd, Register rs, Register rt);
+ void slti(Register rd, Register rs, int32_t j);
+ void sltiu(Register rd, Register rs, int32_t j);
+
+ // Conditional move.
+ void movz(Register rd, Register rs, Register rt);
+ void movn(Register rd, Register rs, Register rt);
+ void movt(Register rd, Register rs, uint16_t cc = 0);
+ void movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ void clz(Register rd, Register rs);
+ void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // --------Coprocessor-instructions----------------
+
+ // Load, store, and move.
+ void lwc1(FPURegister fd, const MemOperand& src);
+ void ldc1(FPURegister fd, const MemOperand& src);
+
+ void swc1(FPURegister fs, const MemOperand& dst);
+ void sdc1(FPURegister fs, const MemOperand& dst);
+
+ void mtc1(Register rt, FPURegister fs);
+ void mthc1(Register rt, FPURegister fs);
+ void dmtc1(Register rt, FPURegister fs);
+
+ void mfc1(Register rt, FPURegister fs);
+ void mfhc1(Register rt, FPURegister fs);
+ void dmfc1(Register rt, FPURegister fs);
+
+ void ctc1(Register rt, FPUControlRegister fs);
+ void cfc1(Register rt, FPUControlRegister fs);
+
+ // Arithmetic.
+ void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void abs_d(FPURegister fd, FPURegister fs);
+ void mov_d(FPURegister fd, FPURegister fs);
+ void neg_d(FPURegister fd, FPURegister fs);
+ void sqrt_d(FPURegister fd, FPURegister fs);
+
+ // Conversion.
+ void cvt_w_s(FPURegister fd, FPURegister fs);
+ void cvt_w_d(FPURegister fd, FPURegister fs);
+ void trunc_w_s(FPURegister fd, FPURegister fs);
+ void trunc_w_d(FPURegister fd, FPURegister fs);
+ void round_w_s(FPURegister fd, FPURegister fs);
+ void round_w_d(FPURegister fd, FPURegister fs);
+ void floor_w_s(FPURegister fd, FPURegister fs);
+ void floor_w_d(FPURegister fd, FPURegister fs);
+ void ceil_w_s(FPURegister fd, FPURegister fs);
+ void ceil_w_d(FPURegister fd, FPURegister fs);
+
+ void cvt_l_s(FPURegister fd, FPURegister fs);
+ void cvt_l_d(FPURegister fd, FPURegister fs);
+ void trunc_l_s(FPURegister fd, FPURegister fs);
+ void trunc_l_d(FPURegister fd, FPURegister fs);
+ void round_l_s(FPURegister fd, FPURegister fs);
+ void round_l_d(FPURegister fd, FPURegister fs);
+ void floor_l_s(FPURegister fd, FPURegister fs);
+ void floor_l_d(FPURegister fd, FPURegister fs);
+ void ceil_l_s(FPURegister fd, FPURegister fs);
+ void ceil_l_d(FPURegister fd, FPURegister fs);
+
+ void cvt_s_w(FPURegister fd, FPURegister fs);
+ void cvt_s_l(FPURegister fd, FPURegister fs);
+ void cvt_s_d(FPURegister fd, FPURegister fs);
+
+ void cvt_d_w(FPURegister fd, FPURegister fs);
+ void cvt_d_l(FPURegister fd, FPURegister fs);
+ void cvt_d_s(FPURegister fd, FPURegister fs);
+
+ // Conditions and branches.
+ void c(FPUCondition cond, SecondaryField fmt,
+ FPURegister ft, FPURegister fs, uint16_t cc = 0);
+
+ void bc1f(int16_t offset, uint16_t cc = 0);
+ void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+ void bc1t(int16_t offset, uint16_t cc = 0);
+ void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+ void fcmp(FPURegister src1, const double src2, FPUCondition cond);
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
+ }
+
+ // Class for scoping postponing the trampoline pool generation.
+ class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() {
+ assem_->EndBlockTrampolinePool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() {
+ assem_->EndBlockGrowBuffer();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
+ // Debugging.
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ TypeFeedbackId RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+ }
+
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --code-comments to enable.
+ void RecordComment(const char* msg);
+
+ static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
+
+ // Emits the address of the code stub's first instruction.
+ void emit_code_stub_address(Code* stub);
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions.
+ static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ static void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
+
+ static bool IsJump(Instr instr);
+ static bool IsJ(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsOri(Instr instr);
+
+ static bool IsJal(Instr instr);
+ static bool IsJr(Instr instr);
+ static bool IsJalr(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+ static bool IsPop(Instr instr);
+ static bool IsPush(Instr instr);
+ static bool IsLwRegFpOffset(Instr instr);
+ static bool IsSwRegFpOffset(Instr instr);
+ static bool IsLwRegFpNegOffset(Instr instr);
+ static bool IsSwRegFpNegOffset(Instr instr);
+
+ static Register GetRtReg(Instr instr);
+ static Register GetRsReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRt(Instr instr);
+ static uint32_t GetRtField(Instr instr);
+ static uint32_t GetRs(Instr instr);
+ static uint32_t GetRsField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa(Instr instr);
+ static uint32_t GetSaField(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
+
+ static int32_t GetBranchOffset(Instr instr);
+ static bool IsLw(Instr instr);
+ static int16_t GetLwOffset(Instr instr);
+ static Instr SetLwOffset(Instr instr, int16_t offset);
+
+ static bool IsSw(Instr instr);
+ static Instr SetSwOffset(Instr instr, int16_t offset);
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ static bool IsAndImmediate(Instr instr);
+ static bool IsEmittedConstant(Instr instr);
+
+ void CheckTrampolinePool();
+
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode branch instruction at pos and return branch target pos.
+ int64_t target_at(int64_t pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos.
+ void target_at_put(int64_t pos, int64_t target_pos);
+
+ // Say if we need to relocate with this mode.
+ bool MustUseReg(RelocInfo::Mode rmode);
+
+ // Record reloc info for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_++;
+ }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const {
+ return internal_trampoline_exception_;
+ }
+
+ void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
+
+ bool is_trampoline_emitted() const {
+ return trampoline_emitted_;
+ }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ ASSERT(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ ASSERT(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const {
+ return block_buffer_growth_;
+ }
+
+ private:
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes.
+ static const int kBufferCheckInterval = 1*KB/2;
+
+ // Code generation.
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
+ // Relocation information generation.
+ // Each relocation is encoded as a variable size value.
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Code emission.
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+ inline void emit(uint64_t x);
+ inline void CheckTrampolinePoolQuick();
+
+ // Instruction generation.
+ // We have 3 different kind of encoding layout on MIPS.
+ // However due to many different types of objects encoded in the same fields
+ // we have quite a few aliases for each mode.
+ // Using the same structure to refer to Register and FPURegister would spare a
+ // few aliases, but mixing both does not look clean to me.
+ // Anyway we could surely implement this differently.
+
+ void GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ Register rd,
+ uint16_t sa = 0,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func);
+
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func = NULLSF);
+
+
+ void GenInstrImmediate(Opcode opcode,
+ Register rs,
+ Register rt,
+ int32_t j);
+ void GenInstrImmediate(Opcode opcode,
+ Register rs,
+ SecondaryField SF,
+ int32_t j);
+ void GenInstrImmediate(Opcode opcode,
+ Register r1,
+ FPURegister r2,
+ int32_t j);
+
+
+ void GenInstrJump(Opcode opcode,
+ uint32_t address);
+
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
+
+ // Labels.
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void next(Label* L);
+
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots preceeds space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() {
+ return start_;
+ }
+ int end() {
+ return end_;
+ }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ ASSERT(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // If trampoline is emitted, generated code is becoming large. As this is
+ // already a slow case which can possibly break our code generation for the
+ // extreme case, we use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static const int kTrampolineSlotsSize = 6 * kInstrSize;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kInvalidSlotPos = -1;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
+ friend class RegExpMacroAssemblerMIPS;
+ friend class RelocInfo;
+ friend class CodePatcher;
+ friend class BlockTrampolinePoolScope;
+
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_MIPS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : called function (only guaranteed when
+ // -- extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[8 * (argc - 1)] : first argument
+ // -- sp[8 * agrc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(a1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects s0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Daddu(s0, a0, num_extra_args + 1);
+ __ dsll(s1, s0, kPointerSizeLog2);
+ __ Dsubu(s1, s1, kPointerSize);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+
+ __ ld(result,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ld(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ ld(result,
+ MemOperand(result,
+ Context::SlotOffset(
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+
+ __ ld(result,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ld(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
+ __ ld(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, a1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ SmiTst(a2, a4);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
+ a4, Operand(zero_reg));
+ __ GetObjectType(a2, a3, a4);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
+ a4, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ // Tail call a stub.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, a1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ SmiTst(a2, a4);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
+ a4, Operand(zero_reg));
+ __ GetObjectType(a2, a3, a4);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
+ a4, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ // Tail call a stub.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+ Register function = a1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+ __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
+ }
+
+ // Load the first arguments in a0 and get rid of the rest.
+ Label no_arguments;
+ __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+ // First args = sp[(argc - 1) * 8].
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ // sp now point to args[0], drop args[0] + receiver.
+ __ Drop(2);
+
+ Register argument = a2;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ a4, // Scratch.
+ a5, // Scratch.
+ ¬_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, a4);
+ __ bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- a2 : argument converted to string
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -----------------------------------
+
+ Label gc_required;
+ __ Allocate(JSValue::kSize,
+ v0, // Result.
+ a3, // Scratch.
+ a4, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
+
+ // Initialising the String Object.
+ Register map = a3;
+ __ LoadGlobalFunctionInitialMap(function, map, a4);
+ if (FLAG_debug_code) {
+ __ lbu(a4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
+ a4, Operand(JSValue::kSize >> kPointerSizeLog2));
+ __ lbu(a4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
+ a4, Operand(zero_reg));
+ }
+ __ sd(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ __ sd(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(¬_cached);
+ __ JumpIfSmi(a0, &convert_argument);
+
+ // Is it a String?
+ __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ And(a4, a3, Operand(kIsNotStringMask));
+ __ Branch(&convert_argument, ne, a4, Operand(zero_reg));
+ __ mov(argument, a0);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4);
+ __ Branch(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into a2.
+ __ bind(&convert_argument);
+ __ push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ pop(function);
+ __ mov(argument, v0);
+ __ Branch(&argument_is_string);
+
+ // Load the empty string into a2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ Branch(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, a4);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push call kind information and function as parameter to the runtime call.
+ __ Push(a1, a1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information and receiver.
+ __ Pop(a1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(a4, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(a4));
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a2 : allocation site or undefined
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ Isolate* isolate = masm->isolate();
+
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ push(a2);
+ }
+
+ // Preserve the two incoming parameters on the stack.
+ // Tag arguments count.
+ __ dsll32(a0, a0, 0);
+ __ MultiPushReversed(a0.bit() | a1.bit());
+
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ ld(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &rt_call);
+ __ GetObjectType(a2, a3, t0);
+ __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+ if (!is_api_function) {
+ Label allocate;
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwu(a4, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(a6, a4);
+ __ Branch(&allocate,
+ eq,
+ a6,
+ Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking)));
+ // Decrease generous allocation count.
+ __ Dsubu(a4, a4, Operand(1 << Map::ConstructionCount::kShift));
+ __ Branch(USE_DELAY_SLOT,
+ &allocate, ne, a6, Operand(JSFunction::kFinishSlackTracking));
+ __ sw(a4, bit_field3); // In delay slot.
+
+ __ Push(a1, a2, a1); // a1 = Constructor.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ Pop(a1, a2);
+ // Slack tracking counter is kNoSlackTracking after runtime call.
+ ASSERT(JSFunction::kNoSlackTracking == 0);
+ __ mov(a6, zero_reg);
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
+ __ Allocate(a3, t0, t1, t2, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (not including memento if create_memento)
+ // t0: JSObject (not tagged)
+ __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t1, t0);
+ __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
+ __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
+ __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
+ __ Daddu(t1, t1, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words, including memento if create_memento)
+ // t0: JSObject (not tagged)
+ // t1: First in-object property of JSObject (not tagged)
+ // a6: slack tracking counter (non-API function case)
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+
+ // Use t3 to hold undefined, which is used in several places below.
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ Branch(&no_inobject_slack_tracking,
+ eq,
+ a6,
+ Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking)));
+
+ // Allocate object with a slack.
+ __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(a0, t1, at);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ dsll(at, a3, kPointerSizeLog2);
+ __ Daddu(t2, t0, Operand(at)); // End of object.
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
+ a0, Operand(t2));
+ }
+ __ InitializeFieldsWithFiller(t1, a0, t3);
+ // To allow for truncation.
+ __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ Dsubu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(a0, t0, Operand(a0)); // End of object.
+ __ InitializeFieldsWithFiller(t1, a0, t3);
+
+ // Fill in memento fields.
+ // t1: points to the allocated but uninitialized memento.
+ __ LoadRoot(t3, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ sd(t3, MemOperand(t1));
+ __ Daddu(t1, t1, kPointerSize);
+ // Load the AllocationSite.
+ __ ld(t3, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ sd(t3, MemOperand(t1));
+ __ Daddu(t1, t1, kPointerSize);
+ } else {
+ __ dsll(at, a3, kPointerSizeLog2);
+ __ Daddu(a0, t0, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t1, a0, t3);
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Daddu(t0, t0, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t0: JSObject
+ // t1: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ Ext(t2, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Daddu(a3, a3, Operand(t2));
+ __ Ext(t2, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+ kBitsPerByte);
+ __ dsubu(a3, a3, t2);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, kPropertyAllocationCountFailed,
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t0: JSObject
+ // t1: start of next object
+ __ Daddu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ Allocate(
+ a0,
+ t1,
+ t2,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (untagged)
+ // t0: JSObject
+ // t1: start of next object
+ __ LoadRoot(t2, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t1);
+ __ sd(t2, MemOperand(a2, JSObject::kMapOffset));
+ // Tag number of elements.
+ __ dsll32(a0, a3, 0);
+ __ sd(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Daddu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t0: JSObject
+ // t1: FixedArray (not tagged)
+ __ dsll(a7, a3, kPointerSizeLog2);
+ __ daddu(t2, a2, a7); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (!is_api_function || create_memento) {
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sd(t3, MemOperand(a2));
+ __ daddiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t2));
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t0: JSObject
+ // t1: FixedArray (not tagged)
+ __ Daddu(t1, t1, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sd(t1, FieldMemOperand(t0, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t0: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t0, t1);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ __ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ld(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
+ __ push(a1); // Argument for Runtime_NewObject.
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 1);
+ }
+ __ mov(t0, v0);
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
+ // Receiver for constructor call allocated.
+ // t0: JSObject
+ __ bind(&allocated);
+
+ if (create_memento) {
+ __ ld(a2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ Branch(&count_incremented, eq, a2, Operand(t1));
+ // a2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ ld(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Daddu(a3, a3, Operand(Smi::FromInt(1)));
+ __ sd(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
+ __ Push(t0, t0);
+
+ // Reload the number of arguments from the stack.
+ // sp[0]: receiver
+ // sp[1]: receiver
+ // sp[2]: constructor function
+ // sp[3]: number of arguments (smi-tagged)
+ __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+ __ ld(a3, MemOperand(sp, 3 * kPointerSize));
+
+ // Set up pointer to last argument.
+ __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Set up number of arguments for function call below.
+ __ SmiUntag(a0, a3);
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: receiver
+ // sp[2]: constructor function
+ // sp[3]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ SmiUntag(a3);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ dsll(a4, a3, kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ ld(a5, MemOperand(a4));
+ __ push(a5);
+ __ bind(&entry);
+ __ Daddu(a3, a3, Operand(-1));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context from the frame.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(v0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a1, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ld(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
+ }
+
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Daddu(sp, sp, a4);
+ __ Daddu(sp, sp, kPointerSize);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody
+
+ // ----------- S t a t e -------------
+ // -- a0: code entry
+ // -- a1: function
+ // -- a2: receiver_pointer
+ // -- a3: argc
+ // -- s0: argv
+ // -----------------------------------
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // Clear the context before we push it when entering the JS frame.
+ __ mov(cp, zero_reg);
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ Push(a1, a2);
+
+ // Copy arguments to the stack in a loop.
+ // a3: argc
+ // s0: argv, i.e. points to first arg
+ Label loop, entry;
+ // TODO(plind): At least on simulator, argc in a3 is an int32_t with junk
+ // in upper bits. Should fix the root cause, rather than use below
+ // workaround to clear upper bits.
+ __ dsll32(a3, a3, 0); // int32_t -> int64_t.
+ __ dsrl32(a3, a3, 0);
+ __ dsll(a4, a3, kPointerSizeLog2);
+ __ daddu(a6, s0, a4);
+ __ b(&entry);
+ __ nop(); // Branch delay slot nop.
+ // a6 points past last arg.
+ __ bind(&loop);
+ __ ld(a4, MemOperand(s0)); // Read next parameter.
+ __ daddiu(s0, s0, kPointerSize);
+ __ ld(a4, MemOperand(a4)); // Dereference handle.
+ __ push(a4); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, s0, Operand(a6));
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, a4);
+ __ mov(s2, a4);
+ __ mov(s3, a4);
+ __ mov(s4, a4);
+ __ mov(s5, a4);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code and pass argc as a0.
+ __ mov(a0, a3);
+ if (is_construct) {
+ // No type feedback cell is available
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Leave internal frame.
+ }
+ __ Jump(ra);
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push function as parameter to the runtime call.
+ __ Push(a1, a1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(a1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Set a0 to point to the head of the PlatformCodeAge sequence.
+ __ Dsubu(a0, a0,
+ Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - isolate
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(2, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ MultiPop(saved_regs);
+ __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // Set a0 to point to the head of the PlatformCodeAge sequence.
+ __ Dsubu(a0, a0,
+ Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - isolate
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(2, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ __ MultiPop(saved_regs);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ Push(ra, fp, cp, a1);
+ __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Jump to point after the code-age stub.
+ __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength)));
+ __ Jump(a0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Jump(ra); // Jump to miss handler
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it -> a6.
+ __ ld(a6, MemOperand(sp, 0 * kPointerSize));
+ __ SmiUntag(a6);
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ Branch(&with_tos_register,
+ ne, a6, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Ret(USE_DELAY_SLOT);
+ // Safe to fill delay slot Addu will emit one instruction.
+ __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+
+ __ bind(&with_tos_register);
+ __ ld(v0, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG));
+
+ __ Ret(USE_DELAY_SLOT);
+ // Safe to fill delay slot Addu will emit one instruction.
+ __ Daddu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
+
+ __ bind(&unknown_state);
+ __ stop("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ push(a0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Uld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(a1);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ daddu(v0, v0, a1);
+ __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ // 1. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ { Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+ __ push(a6);
+ __ Daddu(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // a0: actual number of arguments
+ Label slow, non_function;
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ ld(a1, MemOperand(at));
+ __ JumpIfSmi(a1, &non_function);
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ // a0: actual number of arguments
+ // a1: function
+ Label shift_arguments;
+ __ li(a4, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
+ { Label convert_to_object, use_global_proxy, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset));
+ __ And(a7, a3, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ Branch(&shift_arguments, ne, a7, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+ __ And(a7, a3, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ Branch(&shift_arguments, ne, a7, Operand(zero_reg));
+
+ // Compute the receiver in sloppy mode.
+ // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(a2, sp, at);
+ __ ld(a2, MemOperand(a2, -kPointerSize));
+ // a0: actual number of arguments
+ // a1: function
+ // a2: first argument
+ __ JumpIfSmi(a2, &convert_to_object, a6);
+
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_proxy, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_proxy, eq, a2, Operand(a3));
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&convert_to_object);
+ // Enter an internal frame in order to preserve argument count.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ SmiUntag(a0);
+ // Leave internal frame.
+ }
+ // Restore the function to a1, and the flag to a4.
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ ld(a1, MemOperand(at));
+ __ Branch(USE_DELAY_SLOT, &patch_receiver);
+ __ li(a4, Operand(0, RelocInfo::NONE32));
+
+ __ bind(&use_global_proxy);
+ __ ld(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+
+ __ bind(&patch_receiver);
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(a3, sp, at);
+ __ sd(a2, MemOperand(a3, -kPointerSize));
+
+ __ Branch(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ bind(&slow);
+ __ li(a4, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
+ __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ __ bind(&non_function);
+ __ li(a4, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // a0: actual number of arguments
+ // a1: function
+ // a4: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(a2, sp, at);
+ __ sd(a1, MemOperand(a2, -kPointerSize));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // a0: actual number of arguments
+ // a1: function
+ // a4: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(a2, sp, at);
+
+ __ bind(&loop);
+ __ ld(at, MemOperand(a2, -kPointerSize));
+ __ sd(at, MemOperand(a2));
+ __ Dsubu(a2, a2, Operand(kPointerSize));
+ __ Branch(&loop, ne, a2, Operand(sp));
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Dsubu(a0, a0, Operand(1));
+ __ Pop();
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // a0: actual number of arguments
+ // a1: function
+ // a4: call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label function, non_proxy;
+ __ Branch(&function, eq, a4, Operand(zero_reg));
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ mov(a2, zero_reg);
+ __ Branch(&non_proxy, ne, a4, Operand(1));
+
+ __ push(a1); // Re-add proxy object as additional argument.
+ __ Daddu(a0, a0, Operand(1));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_proxy);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // a0: actual number of arguments
+ // a1: function
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ // The argument count is stored as int32_t on 64-bit platforms.
+ // TODO(plind): Smi on 32-bit platforms.
+ __ lw(a2,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ // Check formal and actual parameter counts.
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ ld(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ dsubu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ SmiScale(a7, v0, kPointerSizeLog2);
+ __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
+
+ // Out of stack space.
+ __ ld(a1, MemOperand(fp, kFunctionOffset));
+ __ Push(a1, v0);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ mov(a1, zero_reg);
+ __ Push(v0, a1); // Limit and initial index.
+
+ // Get the receiver.
+ __ ld(a0, MemOperand(fp, kRecvOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ ld(a1, MemOperand(fp, kFunctionOffset));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Change context eagerly to get the right global object if necessary.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a1.
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_proxy;
+ __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset));
+ __ And(a7, a7, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ Branch(&push_receiver, ne, a7, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a2).
+ __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+ __ And(a7, a7, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ Branch(&push_receiver, ne, a7, Operand(zero_reg));
+
+ // Compute the receiver in sloppy mode.
+ __ JumpIfSmi(a0, &call_to_object);
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_proxy, eq, a0, Operand(a1));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_proxy, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ __ bind(&use_global_proxy);
+ __ ld(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ld(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ ld(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ ld(a1, MemOperand(fp, kArgsOffset));
+ __ Push(a1, a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ ld(a0, MemOperand(fp, kIndexOffset));
+ __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
+ __ sd(a0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ld(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(a0);
+ __ SmiUntag(a0);
+ __ ld(a1, MemOperand(fp, kFunctionOffset));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+
+ frame_scope.GenerateLeaveFrame();
+ __ Ret(USE_DELAY_SLOT);
+ __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+
+ // Call the function proxy.
+ __ bind(&call_proxy);
+ __ push(a1); // Add function proxy as last argument.
+ __ Daddu(a0, a0, Operand(1));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ // Tear down the internal frame and remove function, receiver and args.
+ }
+
+ __ Ret(USE_DELAY_SLOT);
+ __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+}
+
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual number of arguments
+ // -- a1 : function (passed through to callee)
+ // -- a2 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
+ // Make a5 the space we have left. The stack might already be overflowed
+ // here which will cause a5 to become negative.
+ __ dsubu(a5, sp, a5);
+ // Check if the arguments will overflow the stack.
+ __ dsll(at, a2, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, a5, Operand(at));
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // __ sll(a0, a0, kSmiTagSize);
+ __ dsll32(a0, a0, 0);
+ __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
+ __ Daddu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ __ mov(sp, fp);
+ __ MultiPop(fp.bit() | ra.bit());
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Daddu(sp, sp, a4);
+ // Adjust for the receiver.
+ __ Daddu(sp, sp, Operand(kPointerSize));
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // State setup as expected by MacroAssembler::InvokePrologue.
+ // ----------- S t a t e -------------
+ // -- a0: actual arguments count
+ // -- a1: function (passed through to callee)
+ // -- a2: expected arguments count
+ // -----------------------------------
+
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Branch(&dont_adapt_arguments, eq,
+ a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ // We use Uless as the number of argument should always be greater than 0.
+ __ Branch(&too_few, Uless, a0, Operand(a2));
+
+ { // Enough parameters: actual >= expected.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into a0 and copy end address into a2.
+ __ SmiScale(a0, a0, kPointerSizeLog2);
+ __ Daddu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Daddu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address.
+ __ dsll(a2, a2, kPointerSizeLog2);
+ __ dsubu(a2, a0, a2);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: copy end address
+ // a3: code entry to call
+
+ Label copy;
+ __ bind(©);
+ __ ld(a4, MemOperand(a0));
+ __ push(a4);
+ __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a2));
+ __ daddiu(a0, a0, -kPointerSize); // In delay slot.
+
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into a0 and copy end address is fp.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ SmiScale(a0, a0, kPointerSizeLog2);
+ __ Daddu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Daddu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address. Also adjust for return address.
+ __ Daddu(a7, fp, kPointerSize);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ // a7: copy end address
+ Label copy;
+ __ bind(©);
+ __ ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
+ __ Dsubu(sp, sp, kPointerSize);
+ __ Dsubu(a0, a0, kPointerSize);
+ __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a7));
+ __ sd(a4, MemOperand(sp)); // In the delay slot.
+
+ // Fill the remaining expected arguments with undefined.
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+ __ dsll(a6, a2, kPointerSizeLog2);
+ __ Dsubu(a2, fp, Operand(a6));
+ // Adjust for frame.
+ __ Dsubu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ Dsubu(sp, sp, kPointerSize);
+ __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
+ __ sd(a4, MemOperand(sp));
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+
+ __ Call(a3);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+
+ // -------------------------------------------
+ // Don't adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ Jump(a3);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ break_(0xCC);
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a2 };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a1 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers);
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a0 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers);
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a0 };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a3, a2, a1 };
+ Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
+ representations);
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a3, a2, a1, a0 };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a2, a3 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a2, a1, a0 };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a2, a1, a0 };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a0, a1 };
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(entry));
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a0 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(CompareNilIC_Miss));
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // a0 -- number of arguments
+ // a1 -- function
+ // a2 -- allocation site with elements kind
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ Register registers[] = { a1, a2 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ deopt_handler,
+ NULL,
+ constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = { a1, a2, a0 };
+ Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ a0,
+ deopt_handler,
+ representations,
+ constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE,
+ PASS_ARGUMENTS);
+ }
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // a0 -- number of arguments
+ // a1 -- constructor function
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kInternalArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ Register registers[] = { a1 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ deopt_handler,
+ NULL,
+ constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = { a1, a0 };
+ Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ a0,
+ deopt_handler,
+ representations,
+ constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE,
+ PASS_ARGUMENTS);
+ }
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a0 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(ToBooleanIC_Miss));
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a1, a2, a0 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a0, a3, a1, a2 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a1, a0 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(BinaryOpIC_Miss));
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a2, a1, a0 };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ Register registers[] = { a1, a0 };
+ descriptor->Initialize(
+ ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ Register registers[] = { a1, // JSFunction
+ cp, // context
+ a0, // actual number of arguments
+ a2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ Register registers[] = { cp, // context
+ a2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ Register registers[] = { cp, // context
+ a2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ Register registers[] = { cp, // context
+ a0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ Register registers[] = { a0, // callee
+ a4, // call_data
+ a2, // holder
+ a1, // api_function_address
+ cp, // context
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
+ int param_count = descriptor->register_param_count();
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count() == 0 ||
+ a0.is(descriptor->GetParameterRegister(param_count - 1)));
+ // Push arguments, adjust sp.
+ __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
+ for (int i = 0; i < param_count; ++i) {
+ // Store argument to stack.
+ __ sd(descriptor->GetParameterRegister(i),
+ MemOperand(sp, (param_count-1-i) * kPointerSize));
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count());
+ }
+
+ __ Ret();
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public PlatformCodeStub {
+ public:
+ ConvertToDoubleStub(Isolate* isolate,
+ Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : PlatformCodeStub(isolate),
+ result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() const { return ConvertToDouble; }
+ int MinorKey() const {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ SmiUntag(source_);
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+ // Subtract from 0 if source was negative.
+ __ subu(at, zero_reg, source_);
+ __ Movn(source_, at, exponent);
+
+ // We have -1, 0 or 1, which we treat specially. Register source_ contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ Branch(¬_special, gt, source_, Operand(1));
+
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ // Safe to use 'at' as dest reg here.
+ __ Or(at, exponent, Operand(exponent_word_for_1));
+ __ Movn(exponent, at, source_); // Write exp when source not 0.
+ // 1, 0 and -1 all have 0 for the second word.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(mantissa, zero_reg);
+
+ __ bind(¬_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ __ Clz(zeros_, source_);
+ // Compute exponent and or it into the exponent register.
+ // We use mantissa as a scratch register here.
+ __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+ __ subu(mantissa, mantissa, zeros_);
+ __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+ __ Or(exponent, exponent, mantissa);
+
+ // Shift up the source chopping the top bit off.
+ __ Addu(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ sllv(source_, source_, zeros_);
+ // Compute lower part of fraction (last 12 bits).
+ __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+ // And the top (top 20 bits).
+ __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+
+ __ Ret(USE_DELAY_SLOT);
+ __ or_(exponent, exponent, source_);
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
+
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+
+ Register scratch =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch2 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch3 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kLithiumScratchDouble;
+
+ __ Push(scratch, scratch2, scratch3);
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+
+ // Clear cumulative exception flags and save the FCSR.
+ __ cfc1(scratch2, FCSR);
+ __ ctc1(zero_reg, FCSR);
+
+ // Try a conversion to a signed integer.
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ mfc1(scratch3, double_scratch);
+
+ // Retrieve and restore the FCSR.
+ __ cfc1(scratch, FCSR);
+ __ ctc1(scratch2, FCSR);
+
+ // Check for overflow and NaNs.
+ __ And(
+ scratch, scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
+ | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
+ }
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+
+ __ lw(input_low, MemOperand(input_reg, double_offset));
+ __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+
+ Label normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ __ Ext(result_reg,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Subu(result_reg,
+ result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sllv(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Subu(scratch, zero_reg, scratch);
+ __ sllv(input_low, input_low, scratch);
+ __ Branch(&shift_done);
+
+ __ bind(&pos_shift);
+ __ srlv(input_low, input_low, scratch);
+
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Subu(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
+
+ __ bind(&done);
+
+ __ Pop(scratch, scratch2, scratch3);
+ __ Ret();
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
+ WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
+ stub1.GetCode();
+ stub2.GetCode();
+}
+
+
+// See comment for class, this does NOT work for int32's that are in Smi range.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ // Test sign, and save for later conditionals.
+ __ And(sign_, the_int_, Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ or_(scratch_, scratch_, sign_);
+ // Subtract from 0 if the value was negative.
+ __ subu(at, zero_reg, the_int_);
+ __ Movn(the_int_, at, sign_);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ srl(at, the_int_, shift_distance);
+ __ or_(scratch_, scratch_, at);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(scratch_, zero_reg);
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ Register exp_mask_reg = t1;
+
+ __ Branch(¬_identical, ne, a0, Operand(a1));
+
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t0, t0);
+ __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t0, t0);
+ __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(a6));
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
+ }
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == less) {
+ __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == greater) {
+ __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
+ }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(a7, a6, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, a7, Operand(a6));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ }
+ // No fall through here.
+
+ __ bind(¬_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* slow,
+ bool strict) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ Label lhs_is_smi;
+ __ JumpIfSmi(lhs, &lhs_is_smi);
+ // Rhs is a Smi.
+ // Check whether the non-smi is a heap number.
+ __ GetObjectType(lhs, t0, t0);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal (lhs is already not zero).
+ __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
+ __ mov(v0, lhs);
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
+ }
+ // Rhs is a smi, lhs is a number.
+ // Convert smi rhs to double.
+ __ SmiUntag(at, rhs);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ // We now have both loaded as doubles.
+ __ jmp(both_loaded_as_doubles);
+
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ GetObjectType(rhs, t0, t0);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal.
+ __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
+ __ li(v0, Operand(1));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // Convert smi lhs to double.
+ __ SmiUntag(at, lhs);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ // If either operand is a JS object or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into a2 and compare it with
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ GetObjectType(lhs, a2, a2);
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Return non-zero.
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(1));
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ Or(a2, a2, Operand(a3));
+ __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ __ GetObjectType(lhs, a3, a2);
+ __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ // If first was a heap number & second wasn't, go to slow case.
+ __ Branch(slow, ne, a3, Operand(a2));
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ // a2 is object type of rhs.
+ Label object_test;
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ And(at, a2, Operand(kIsNotStringMask));
+ __ Branch(&object_test, ne, at, Operand(zero_reg));
+ __ And(at, a2, Operand(kIsNotInternalizedMask));
+ __ Branch(possible_strings, ne, at, Operand(zero_reg));
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ And(at, a3, Operand(kIsNotInternalizedMask));
+ __ Branch(possible_strings, ne, at, Operand(zero_reg));
+
+ // Both are internalized strings. We already checked they weren't the same
+ // pointer so they are not equal.
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(1)); // Non-zero indicates not equal.
+
+ __ bind(&object_test);
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ GetObjectType(rhs, a2, a3);
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ and_(a0, a2, a3);
+ __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Ret(USE_DELAY_SLOT);
+ __ xori(v0, a0, 1 << Map::kIsUndetectable);
+}
+
+
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+// On entry a1 and a2 are the values to be compared.
+// On exit a0 is 0, positive or negative to indicate the result of
+// the comparison.
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = a1;
+ Register rhs = a0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, ¬_two_smis);
+ __ SmiUntag(a1);
+ __ SmiUntag(a0);
+
+ __ Ret(USE_DELAY_SLOT);
+ __ dsubu(v0, a1, a0);
+ __ bind(¬_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ And(a6, lhs, Operand(rhs));
+ __ JumpIfNotSmi(a6, ¬_smis, a4);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to rhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison and the numbers have been loaded into f12 and f14 as doubles,
+ // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+ EmitSmiNonsmiComparison(masm, lhs, rhs,
+ &both_loaded_as_doubles, &slow, strict());
+
+ __ bind(&both_loaded_as_doubles);
+ // f12, f14 are the double representations of the left hand side
+ // and the right hand side if we have FPU. Otherwise a2, a3 represent
+ // left hand side and a0, a1 represent right hand side.
+
+ Label nan;
+ __ li(a4, Operand(LESS));
+ __ li(a5, Operand(GREATER));
+ __ li(a6, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ BranchF(NULL, &nan, eq, f12, f14);
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, a4);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, a5);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, a6);
+
+ __ Ret();
+
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+
+
+ __ bind(¬_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+ if (strict()) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
+ // In this case a2 will contain the type of lhs_.
+ EmitCheckForTwoHeapNumbers(masm,
+ lhs,
+ rhs,
+ &both_loaded_as_doubles,
+ &check_for_internalized_strings,
+ &flat_string_check);
+
+ __ bind(&check_for_internalized_strings);
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two
+ // detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that a2 is the type of lhs_ on entry.
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
+ if (cc == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+ lhs,
+ rhs,
+ a2,
+ a3,
+ a4);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs,
+ rhs,
+ a2,
+ a3,
+ a4,
+ a5);
+ }
+ // Never falls through to here.
+
+ __ bind(&slow);
+ // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+ // a1 (rhs) second.
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result.
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc == gt || cc == ge); // Remaining cases.
+ ncr = LESS;
+ }
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ MultiPush(kJSCallerSaved | ra.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ __ MultiPushFPU(kCallerSavedFPU);
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = a1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ MultiPopFPU(kCallerSavedFPU);
+ }
+
+ __ MultiPop(kJSCallerSaved | ra.bit());
+ __ Ret();
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ const Register base = a1;
+ const Register exponent = a2;
+ const Register heapnumbermap = a5;
+ const Register heapnumber = v0;
+ const DoubleRegister double_base = f2;
+ const DoubleRegister double_exponent = f4;
+ const DoubleRegister double_result = f0;
+ const DoubleRegister double_scratch = f6;
+ const FPURegister single_scratch = f8;
+ const Register scratch = t1;
+ const Register scratch2 = a7;
+
+ Label call_runtime, done, int_exponent;
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
+ __ ld(base, MemOperand(sp, 1 * kPointerSize));
+ __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+
+ __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+ __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+
+ __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent);
+
+ __ bind(&base_is_smi);
+ __ mtc1(scratch, single_scratch);
+ __ cvt_d_w(double_base, single_scratch);
+ __ bind(&unpack_exponent);
+
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+ __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ // Base is already in double_base.
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ scratch,
+ double_exponent,
+ at,
+ double_scratch,
+ scratch2,
+ kCheckForInexactConversion);
+ // scratch2 == 0 means there was no conversion error.
+ __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half;
+
+ // Test for 0.5.
+ __ Move(double_scratch, 0.5);
+ __ BranchF(USE_DELAY_SLOT,
+ ¬_plus_half,
+ NULL,
+ ne,
+ double_exponent,
+ double_scratch);
+ // double_scratch can be overwritten in the delay slot.
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ Move(double_scratch, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+ __ neg_d(double_result, double_scratch);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(double_scratch, double_base, kDoubleRegZero);
+ __ sqrt_d(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(¬_plus_half);
+ __ Move(double_scratch, -0.5);
+ __ BranchF(USE_DELAY_SLOT,
+ &call_runtime,
+ NULL,
+ ne,
+ double_exponent,
+ double_scratch);
+ // double_scratch can be overwritten in the delay slot.
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ Move(double_scratch, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+ __ Move(double_result, kDoubleRegZero);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(double_scratch, double_base, kDoubleRegZero);
+ __ Move(double_result, 1);
+ __ sqrt_d(double_scratch, double_scratch);
+ __ div_d(double_result, double_result, double_scratch);
+ __ jmp(&done);
+ }
+
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch2);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
+ }
+ __ pop(ra);
+ __ MovFromFloatResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ // Get two copies of exponent in the registers scratch and exponent.
+ if (exponent_type_ == INTEGER) {
+ __ mov(scratch, exponent);
+ } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
+ }
+
+ __ mov_d(double_scratch, double_base); // Back up base.
+ __ Move(double_result, 1.0);
+
+ // Get absolute value of exponent.
+ Label positive_exponent;
+ __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
+ __ Dsubu(scratch, zero_reg, scratch);
+ __ bind(&positive_exponent);
+
+ Label while_true, no_carry, loop_end;
+ __ bind(&while_true);
+
+ __ And(scratch2, scratch, 1);
+
+ __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
+ __ mul_d(double_result, double_result, double_scratch);
+ __ bind(&no_carry);
+
+ __ dsra(scratch, scratch, 1);
+
+ __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
+ __ mul_d(double_scratch, double_scratch, double_scratch);
+
+ __ Branch(&while_true);
+
+ __ bind(&loop_end);
+
+ __ Branch(&done, ge, exponent, Operand(zero_reg));
+ __ Move(double_scratch, 1.0);
+ __ div_d(double_result, double_scratch, double_result);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+
+ // double_exponent may not contain the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ mtc1(exponent, single_scratch);
+ __ cvt_d_w(double_exponent, single_scratch);
+
+ // Returning or bailing out.
+ Counters* counters = isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(
+ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ ASSERT(heapnumber.is(v0));
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ DropAndRet(2);
+ } else {
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
+ }
+ __ pop(ra);
+ __ MovFromFloatResult(double_result);
+
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret();
+ }
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return true;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = kSaveFPRegs;
+ CEntryStub save_doubles(isolate, 1, mode);
+ StoreBufferOverflowStub stub(isolate, mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+ save_doubles_code = *save_doubles.GetCode();
+ }
+ Code* store_buffer_overflow_code;
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
+ store_buffer_overflow_code = *stub.GetCode();
+ }
+ isolate->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // s0: number of arguments including receiver
+ // s1: size of arguments excluding receiver
+ // s2: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+ // The reason for this is that these arguments would need to be saved anyway
+ // so it's faster to set them up directly.
+ // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
+
+ // Compute the argv pointer in a callee-saved register.
+ __ Daddu(s1, sp, s1);
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_);
+
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ // Prepare arguments for C routine.
+ // a0 = argc
+ __ mov(a0, s0);
+ // a1 = argv (set in the delay slot after find_ra below).
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ // This branch-and-link sequence is needed to find the current PC on mips,
+ // saved to the ra register.
+ // Use masm-> here instead of the double-underscore macro since extra
+ // coverage code can interfere with the proper calculation of ra.
+ Label find_ra;
+ masm->bal(&find_ra); // bal exposes branch delay slot.
+ masm->mov(a1, s1);
+ masm->bind(&find_ra);
+
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jalr(t9)), and push it.
+ // This is the return address of the exit frame.
+ const int kNumInstructionsToJump = 5;
+ masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
+ masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ // Stack space reservation moved to the branch delay slot below.
+ // Stack is still aligned.
+
+ // Call the C routine.
+ masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ masm->jalr(t9);
+ // Set up sp in the delay slot.
+ masm->daddiu(sp, sp, -kCArgsSlotsSize);
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump,
+ masm->InstructionsGeneratedSince(&find_ra));
+ }
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ Branch(&okay, ne, v0, Operand(a4));
+ __ stop("The hole escaped");
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(a4, Heap::kExceptionRootIndex);
+ __ Branch(&exception_returned, eq, a4, Operand(v0));
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ li(a2, Operand(pending_exception_address));
+ __ ld(a2, MemOperand(a2));
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, a4, Operand(a2));
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // v0:v1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // s0: still holds argc (callee-saved).
+ __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ // Retrieve the pending exception.
+ __ li(a2, Operand(pending_exception_address));
+ __ ld(v0, MemOperand(a2));
+
+ // Clear the pending exception.
+ __ li(a3, Operand(isolate()->factory()->the_hole_value()));
+ __ sd(a3, MemOperand(a2));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ Label throw_termination_exception;
+ __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex);
+ __ Branch(&throw_termination_exception, eq, v0, Operand(a4));
+
+ // Handle normal exception.
+ __ Throw(v0);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(v0);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, handler_entry, exit;
+ Isolate* isolate = masm->isolate();
+
+ // TODO(plind): unify the ABI description here.
+ // Registers:
+ // a0: entry address
+ // a1: function
+ // a2: receiver
+ // a3: argc
+ // a4 (a4): on mips64
+
+ // Stack:
+ // 0 arg slots on mips64 (4 args slots on mips)
+ // args -- in a4/a4 on mips64, on stack on mips
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
+ // Load argv in s0 register.
+ if (kMipsAbi == kN64) {
+ __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
+ } else { // Abi O32.
+ // 5th parameter on stack for O32 abi.
+ int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
+ offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
+ __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
+ }
+
+ __ InitializeRootRegister();
+
+ // We build an EntryFrame.
+ __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ li(a6, Operand(Smi::FromInt(marker)));
+ __ li(a5, Operand(Smi::FromInt(marker)));
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
+ __ li(a4, Operand(c_entry_fp));
+ __ ld(a4, MemOperand(a4));
+ __ Push(a7, a6, a5, a4);
+ // Set up frame pointer for the frame to be pushed.
+ __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: receiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xff...f) |
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ __ li(a5, Operand(ExternalReference(js_entry_sp)));
+ __ ld(a6, MemOperand(a5));
+ __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
+ __ sd(fp, MemOperand(a5));
+ __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
+ __ push(a4);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
+ __ LoadRoot(v0, Heap::kExceptionRootIndex);
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
+ __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ sd(a5, MemOperand(a4));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: receiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate);
+ __ li(a4, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+ __ li(a4, Operand(entry));
+ }
+ __ ld(t9, MemOperand(a4)); // Deref address.
+ // Call JSEntryTrampoline.
+ __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(t9);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+ __ bind(&exit); // v0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(a5);
+ __ Branch(&non_outermost_js_2,
+ ne,
+ a5,
+ Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ li(a5, Operand(ExternalReference(js_entry_sp)));
+ __ sd(zero_reg, MemOperand(a5));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(a5);
+ __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate)));
+ __ sd(a5, MemOperand(a4));
+
+ // Reset the stack to the callee saved registers.
+ __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+
+// Uses registers a0 to a4.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register a4.
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ // ReturnTrueFalse is only implemented for inlined call sites.
+ ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+ // Fixed register usage throughout the stub:
+ const Register object = a0; // Object (lhs).
+ Register map = a3; // Map of the object.
+ const Register function = a1; // Function (rhs).
+ const Register prototype = a4; // Prototype of the function.
+ const Register inline_site = t1;
+ const Register scratch = a2;
+
+ const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
+
+ Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+ if (!HasArgsInRegisters()) {
+ __ ld(object, MemOperand(sp, 1 * kPointerSize));
+ __ ld(function, MemOperand(sp, 0));
+ }
+
+ // Check that the left hand is a JS object and load map.
+ __ JumpIfSmi(object, ¬_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
+
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(at));
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(at));
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ ASSERT(HasArgsInRegisters());
+ // Patch the (relocated) inlined map check.
+
+ // The offset was stored in a4 safepoint slot.
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ __ LoadFromSafepointRegisterSlot(scratch, a4);
+ __ Dsubu(inline_site, ra, scratch);
+ // Get the map location in scratch and patch it.
+ __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
+ __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
+ }
+
+ // Register mapping: a3 is object map and a4 is function prototype.
+ // Get prototype of object into a2.
+ __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+ // We don't need map any more. Use it as a scratch register.
+ Register scratch2 = map;
+ map = no_reg;
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ Branch(&is_instance, eq, scratch, Operand(prototype));
+ __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+ __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ __ Branch(&loop);
+
+ __ bind(&is_instance);
+ ASSERT(Smi::FromInt(0) == 0);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(v0, zero_reg);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Patch the call site to return true.
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ PatchRelocatedValue(inline_site, scratch, v0);
+
+ if (!ReturnTrueFalseObject()) {
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ __ mov(v0, zero_reg);
+ }
+ }
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&is_not_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Patch the call site to return false.
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ PatchRelocatedValue(inline_site, scratch, v0);
+
+ if (!ReturnTrueFalseObject()) {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
+ }
+
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(¬_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ GetObjectType(function, scratch2, scratch);
+ __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Null is not instance of anything.
+ __ Branch(&object_not_null,
+ ne,
+ scratch,
+ Operand(isolate()->factory()->null_value()));
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch, &slow);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ if (!ReturnTrueFalseObject()) {
+ if (HasArgsInRegisters()) {
+ __ Push(a0, a1);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ __ mov(a0, v0);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+ }
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+
+ ASSERT(kind() == Code::LOAD_IC ||
+ kind() == Code::KEYED_LOAD_IC);
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ Branch(&miss, ne, name,
+ Operand(isolate()->factory()->prototype_string()));
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, a4, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+Register InstanceofStub::left() { return a0; }
+
+
+Register InstanceofStub::right() { return a1; }
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smiGenerateReadElement.
+ Label slow;
+ __ JumpIfNotSmi(a1, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check index (a1) against formal parameters count limit passed in
+ // through register a0. Use unsigned comparison to get negative
+ // check for free.
+ __ Branch(&slow, hs, a1, Operand(a0));
+
+ // Read the argument from the stack and return it.
+ __ dsubu(a3, a0, a1);
+ __ SmiScale(a7, a3, kPointerSizeLog2);
+ __ Daddu(a3, fp, Operand(a7));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, MemOperand(a3, kDisplacement));
+
+ // Arguments adaptor case: Check index (a1) against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+ // Read the argument from the adaptor frame and return it.
+ __ dsubu(a3, a0, a1);
+ __ SmiScale(a7, a3, kPointerSizeLog2);
+ __ Daddu(a3, a2, Operand(a7));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, MemOperand(a3, kDisplacement));
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(a1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime,
+ ne,
+ a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sd(a2, MemOperand(sp, 0 * kPointerSize));
+ __ SmiScale(a7, a2, kPointerSizeLog2);
+ __ Daddu(a3, a3, Operand(a7));
+ __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+ __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // a6 : allocated object (tagged)
+ // t1 : mapped parameter count (tagged)
+
+ __ ld(a1, MemOperand(sp, 0 * kPointerSize));
+ // a1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame,
+ eq,
+ a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a2, a1);
+ __ Branch(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(t2, a2, kPointerSizeLog2);
+ __ Daddu(a3, a3, Operand(t2));
+ __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // a1 = parameter count (tagged)
+ // a2 = argument count (tagged)
+ // Compute the mapped parameter count = min(a1, a2) in a1.
+ Label skip_min;
+ __ Branch(&skip_min, lt, a1, Operand(a2));
+ __ mov(a1, a2);
+ __ bind(&skip_min);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
+ __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
+ __ SmiScale(t1, a1, kPointerSizeLog2);
+ __ daddiu(t1, t1, kParameterMapHeaderSize);
+ __ bind(¶m_map_size);
+
+ // 2. Backing store.
+ __ SmiScale(t2, a2, kPointerSizeLog2);
+ __ Daddu(t1, t1, Operand(t2));
+ __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into a4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ ld(a4, MemOperand(a4, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ ld(a4, MemOperand(a4, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (smi-tagged)
+ // a4 = address of arguments map (tagged)
+ __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ ld(a3, MemOperand(sp, 2 * kPointerSize));
+ __ AssertNotSmi(a3);
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ sd(a2, FieldMemOperand(v0, kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, a4 will point there, otherwise
+ // it will point to the backing store.
+ __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // a4 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+ // Move backing store address to a3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a3, a4);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
+ __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ SmiScale(t2, a1, kPointerSizeLog2);
+ __ Daddu(a6, a4, Operand(t2));
+ __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
+ __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(a6, a1);
+ __ ld(t1, MemOperand(sp, 0 * kPointerSize));
+ __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Dsubu(t1, t1, Operand(a1));
+ __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(a3, a4, Operand(t2));
+ __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
+
+ // a6 = loop variable (tagged)
+ // a1 = mapping index (tagged)
+ // a3 = address of backing store (tagged)
+ // a4 = address of parameter map (tagged)
+ // a5 = temporary scratch (a.o., for address calculation)
+ // a7 = the hole value
+ __ jmp(¶meters_test);
+
+ __ bind(¶meters_loop);
+
+ __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
+ __ SmiScale(a5, a6, kPointerSizeLog2);
+ __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Daddu(t2, a4, a5);
+ __ sd(t1, MemOperand(t2));
+ __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Daddu(t2, a3, a5);
+ __ sd(a7, MemOperand(t2));
+ __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ Branch(¶meters_loop, ne, a6, Operand(Smi::FromInt(0)));
+
+ __ bind(&skip_parameter_map);
+ // a2 = argument count (tagged)
+ // a3 = address of backing store (tagged)
+ // a5 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
+ __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
+ __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(t1, a1);
+ __ ld(a4, MemOperand(sp, 1 * kPointerSize));
+ __ SmiScale(t2, t1, kPointerSizeLog2);
+ __ Dsubu(a4, a4, Operand(t2));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Dsubu(a4, a4, Operand(kPointerSize));
+ __ ld(a6, MemOperand(a4, 0));
+ __ SmiScale(t2, t1, kPointerSizeLog2);
+ __ Daddu(a5, a3, Operand(t2));
+ __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t1, Operand(a2));
+
+ // Return and remove the on-stack parameters.
+ __ DropAndRet(3);
+
+ // Do the runtime call to allocate the arguments object.
+ // a2 = argument count (tagged)
+ __ bind(&runtime);
+ __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Get the length from the frame.
+ __ ld(a1, MemOperand(sp, 0));
+ __ Branch(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sd(a1, MemOperand(sp, 0));
+ __ SmiScale(at, a1, kPointerSizeLog2);
+
+ __ Daddu(a3, a2, Operand(at));
+
+ __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+ __ SmiUntag(a1);
+
+ __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ Allocate(a1, v0, a2, a3, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current native context.
+ __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+ __ ld(a4, MemOperand(a4, Context::SlotOffset(
+ Context::STRICT_ARGUMENTS_MAP_INDEX)));
+
+ __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ ld(a1, MemOperand(sp, 0 * kPointerSize));
+ __ AssertSmi(a1);
+ __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+
+ Label done;
+ __ Branch(&done, eq, a1, Operand(zero_reg));
+
+ // Get the parameters pointer from the stack.
+ __ ld(a2, MemOperand(sp, 1 * kPointerSize));
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+ __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ // Untag the length for the loop.
+ __ SmiUntag(a1);
+
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Set up a4 to point to the first array slot.
+ __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ ld(a3, MemOperand(a2));
+ // Post-increment a4 with kPointerSize on each iteration.
+ __ sd(a3, MemOperand(a4));
+ __ Daddu(a4, a4, Operand(kPointerSize));
+ __ Dsubu(a1, a1, Operand(1));
+ __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ DropAndRet(3);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime;
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ // MIPS - using s0..s2, since we are not using CEntry Stub.
+ Register subject = s0;
+ Register regexp_data = s1;
+ Register last_match_info_elements = s2;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(
+ isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ li(a0, Operand(address_of_regexp_stack_memory_size));
+ __ ld(a0, MemOperand(a0, 0));
+ __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+ // Check that the first argument is a JSRegExp object.
+ __ ld(a0, MemOperand(sp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ SmiTst(regexp_data, a4);
+ __ Check(nz,
+ kUnexpectedTypeForRegExpDataFixedArrayExpected,
+ a4,
+ Operand(zero_reg));
+ __ GetObjectType(regexp_data, a0, a0);
+ __ Check(eq,
+ kUnexpectedTypeForRegExpDataFixedArrayExpected,
+ a0,
+ Operand(FIXED_ARRAY_TYPE));
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ ld(a2,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Or number_of_captures <= offsets vector size / 2 - 1
+ // Multiplying by 2 comes for free since a2 is smi-tagged.
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
+ __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
+
+ // Reset offset for possibly sliced string.
+ __ mov(t0, zero_reg);
+ __ ld(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ mov(a3, subject); // Make a copy of the original subject string.
+ __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // subject: subject string
+ // a3: subject string
+ // a0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label check_underlying; // (4)
+ Label seq_string; // (5)
+ Label not_seq_nor_cons; // (6)
+ Label external_string; // (7)
+ Label not_long_external; // (8)
+
+ // (1) Sequential string? If yes, go to (5).
+ __ And(a1,
+ a0,
+ Operand(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ // Go to (6).
+ __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
+
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
+ __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ LoadRoot(a1, Heap::kempty_stringRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(a1));
+ __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
+ __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, a0, Operand(kStringRepresentationMask));
+ // The underlying external string is never a short external string.
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ bind(&seq_string);
+ // subject: sequential subject string (or look-alike, external string)
+ // a3: original subject string
+ // Load previous index and check range before a3 is overwritten. We have to
+ // use a3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(a1, &runtime);
+ __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
+ __ Branch(&runtime, ls, a3, Operand(a1));
+ __ SmiUntag(a1);
+
+ STATIC_ASSERT(kStringEncodingMask == 4);
+ STATIC_ASSERT(kOneByteStringTag == 4);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
+ __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+ __ dsra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
+ __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+ __ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+ // (E) Carry on. String handling is done.
+ // t9: irregexp code
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(t9, &runtime);
+
+ // a1: previous index
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
+ 1, a0, a2);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ const int kRegExpExecuteArguments = 9;
+ const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers, meaning we
+ // treat the return address as argument 5. Thus every argument after that
+ // needs to be shifted back by 1. Since DirectCEntryStub will handle
+ // allocating space for the c argument slots, we don't need to calculate
+ // that into the argument positions on the stack. This is how the stack will
+ // look (sp meaning the value of sp at this moment):
+ // Abi n64:
+ // [sp + 1] - Argument 9
+ // [sp + 0] - saved ra
+ // Abi O32:
+ // [sp + 5] - Argument 9
+ // [sp + 4] - Argument 8
+ // [sp + 3] - Argument 7
+ // [sp + 2] - Argument 6
+ // [sp + 1] - Argument 5
+ // [sp + 0] - saved ra
+
+ if (kMipsAbi == kN64) {
+ // Argument 9: Pass current isolate address.
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(a0, MemOperand(sp, 1 * kPointerSize));
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ li(a7, Operand(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ ld(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ ld(a2, MemOperand(a2, 0));
+ __ daddu(a6, a0, a2);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(a5, zero_reg);
+
+ // Argument 5: static offsets vector buffer.
+ __ li(a4, Operand(
+ ExternalReference::address_of_static_offsets_vector(isolate())));
+ } else { // O32.
+ ASSERT(kMipsAbi == kO32);
+
+ // Argument 9: Pass current isolate address.
+ // CFunctionArgumentOperand handles MIPS stack argument slots.
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(a0, MemOperand(sp, 5 * kPointerSize));
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ li(a0, Operand(1));
+ __ sd(a0, MemOperand(sp, 4 * kPointerSize));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ ld(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ ld(a2, MemOperand(a2, 0));
+ __ daddu(a0, a0, a2);
+ __ sd(a0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(a0, zero_reg);
+ __ sd(a0, MemOperand(sp, 2 * kPointerSize));
+
+ // Argument 5: static offsets vector buffer.
+ __ li(a0, Operand(
+ ExternalReference::address_of_static_offsets_vector(isolate())));
+ __ sd(a0, MemOperand(sp, 1 * kPointerSize));
+ }
+
+ // For arguments 4 and 3 get string length, calculate start of string data
+ // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+ __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ // If slice offset is not 0, load the length from the original sliced string.
+ // Argument 4, a3: End of string data
+ // Argument 3, a2: Start of string data
+ // Prepare start and end index of the input.
+ __ dsllv(t1, t0, a3);
+ __ daddu(t0, t2, t1);
+ __ dsllv(t1, a1, a3);
+ __ daddu(a2, t0, t1);
+
+ __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
+
+ __ SmiUntag(t2);
+ __ dsllv(t1, t2, a3);
+ __ daddu(a3, t0, t1);
+ // Argument 2 (a1): Previous index.
+ // Already there
+
+ // Argument 1 (a0): Subject string.
+ __ mov(a0, subject);
+
+ // Locate the code entry and call it.
+ __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm, t9);
+
+ __ LeaveExitFrame(false, no_reg, true);
+
+ // v0: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+ // Check the result.
+ Label success;
+ __ Branch(&success, eq, v0, Operand(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
+ Label failure;
+ __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ li(a1, Operand(isolate()->factory()->the_hole_value()));
+ __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ ld(v0, MemOperand(a2, 0));
+ __ Branch(&runtime, eq, v0, Operand(a1));
+
+ __ sd(a1, MemOperand(a2, 0)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+ Label termination_exception;
+ __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+ __ Throw(v0);
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(v0);
+
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ li(v0, Operand(isolate()->factory()->null_value()));
+ __ DropAndRet(4);
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+
+ __ lw(a1, UntagSmiFieldMemOperand(
+ regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ Daddu(a1, a1, Operand(1));
+ __ dsll(a1, a1, 1); // Multiply by 2.
+
+ __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ ld(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(at));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ld(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+
+ __ SmiUntag(at, a0);
+ __ Branch(&runtime, gt, a2, Operand(at));
+
+ // a1: number of capture registers
+ // subject: subject string
+ // Store the capture count.
+ __ SmiTag(a2, a1); // To smi.
+ __ sd(a2, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ sd(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ mov(a2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ subject,
+ a7,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ mov(subject, a2);
+ __ sd(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ a7,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate());
+ __ li(a2, Operand(address_of_static_offsets_vector));
+
+ // a1: number of capture registers
+ // a2: offsets vector
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wrapping after zero.
+ __ Daddu(a0,
+ last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ bind(&next_capture);
+ __ Dsubu(a1, a1, Operand(1));
+ __ Branch(&done, lt, a1, Operand(zero_reg));
+ // Read the value from the static offsets vector buffer.
+ __ lw(a3, MemOperand(a2, 0));
+ __ daddiu(a2, a2, kIntSize);
+ // Store the smi value in the last match info.
+ __ SmiTag(a3);
+ __ sd(a3, MemOperand(a0, 0));
+ __ Branch(&next_capture, USE_DELAY_SLOT);
+ __ daddiu(a0, a0, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+
+ // Return last match info.
+ __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
+ __ DropAndRet(4);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(¬_seq_nor_cons);
+ // Go to (8).
+ __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ bind(&external_string);
+ __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ And(at, a0, Operand(kIsIndirectStringMask));
+ __ Assert(eq,
+ kExternalStringExpectedButNotFound,
+ at,
+ Operand(zero_reg));
+ }
+ __ ld(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Dsubu(subject,
+ subject,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ jmp(&seq_string); // Go to (5).
+
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(¬_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into t0 and replace subject string with parent.
+ __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ SmiUntag(t0);
+ __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a feedback vector slot. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // a0 : number of arguments to the construct function
+ // a1 : the function to call
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
+
+ // Load the cache state into a4.
+ __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Branch(&done, eq, a4, Operand(a1));
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ ld(a5, FieldMemOperand(a4, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, a5, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+ __ Branch(&megamorphic, ne, a1, Operand(a4));
+ __ jmp(&done);
+ }
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&initialize, eq, a4, Operand(at));
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ dsrl(a4, a3, 32- kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+ __ jmp(&done);
+
+ // An uninitialized cache is patched with the function.
+ __ bind(&initialize);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+ __ Branch(¬_array_function, ne, a1, Operand(a4));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
+
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
+
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
+
+ __ bind(¬_array_function);
+ }
+
+ __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sd(a1, MemOperand(a4, 0));
+
+ __ Push(a4, a2, a1);
+ __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(a4, a2, a1);
+
+ __ bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ int32_t strict_mode_function_mask =
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte ;
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
+
+ __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
+ __ And(at, a4, Operand(strict_mode_function_mask));
+ __ Branch(cont, ne, at, Operand(zero_reg));
+ __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
+ __ And(at, a4, Operand(native_mask));
+ __ Branch(cont, ne, at, Operand(zero_reg));
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
+ // Check for function proxy.
+ __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // put proxy as additional argument
+ __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
+ __ mov(a2, zero_reg);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(non_function);
+ __ sd(a1, MemOperand(sp, argc * kPointerSize));
+ __ li(a0, Operand(argc)); // Set up the number of arguments.
+ __ mov(a2, zero_reg);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(a1);
+ }
+ __ Branch(USE_DELAY_SLOT, cont);
+ __ sd(v0, MemOperand(sp, argc * kPointerSize));
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // a1 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, a4, a4);
+ __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+ }
+
+ // Fast-case: Invoke the function now.
+ // a1: pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ ld(a3, MemOperand(sp, argc * kPointerSize));
+
+ if (needs_checks) {
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // a0 : number of arguments
+ // a1 : the function to call
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+ Label slow, non_function_call;
+ // Check that the function is not a smi.
+ __ JumpIfSmi(a1, &non_function_call);
+ // Check that the function is a JSFunction.
+ __ GetObjectType(a1, a4, a4);
+ __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+
+ __ dsrl(at, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a5, a2, at);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into a2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by a3 + 1.
+ __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(a2, a5);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jmp_reg = a4;
+ __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+
+ // a0: number of arguments
+ // a1: called object
+ // a4: object type
+ Label do_call;
+ __ bind(&slow);
+ __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing r0).
+ __ li(a2, Operand(0, RelocInfo::NONE32));
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+// StringCharCodeAtGenerator.
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+ Label sliced_string;
+
+ ASSERT(!a4.is(index_));
+ ASSERT(!a4.is(result_));
+ ASSERT(!a4.is(object_));
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ And(a4, result_, Operand(kIsNotStringMask));
+ __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
+ __ Branch(index_out_of_range_, ls, a4, Operand(index_));
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
+
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ ld(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // a1 - function
+ // a3 - slot id
+ Label miss;
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ Branch(&miss, ne, a1, Operand(at));
+
+ __ li(a0, Operand(arg_count()));
+ __ dsrl(at, a3, 32 - kPointerSizeLog2);
+ __ Daddu(at, a2, Operand(at));
+ __ ld(a2, FieldMemOperand(at, FixedArray::kHeaderSize));
+ // Verify that a2 contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, at);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // a1 - function
+ // a3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ // The checks. First, does r1 match the recorded monomorphic target?
+ __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
+ __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ ld(a3, MemOperand(sp, argc * kPointerSize));
+
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ Branch(&slow_start, eq, a4, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&miss, eq, a4, Operand(at));
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+ __ Branch(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, a4, a4);
+ __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ ld(a4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(a4, a1, a2, a3);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to a1 and exit the internal frame.
+ __ mov(a1, v0);
+ }
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ result_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Consumed by runtime conversion function:
+ __ Push(object_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+
+ __ Move(index_, v0);
+ __ pop(object_);
+ // Reload the instance type.
+ __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ Branch(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+ ASSERT(!a4.is(result_));
+ ASSERT(!a4.is(code_));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ __ And(a4,
+ code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
+
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ SmiScale(a4, code_, kPointerSizeLog2);
+ __ Daddu(result_, result_, a4);
+ __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case_, eq, result_, Operand(a4));
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ Branch(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
+ __ And(scratch, dest, Operand(kPointerAlignmentMask));
+ __ Check(eq,
+ kDestinationOfCopyNotAligned,
+ scratch,
+ Operand(zero_reg));
+ }
+
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ __ Daddu(count, count, count);
+ }
+
+ Register limit = count; // Read until dest equals this.
+ __ Daddu(limit, dest, Operand(count));
+
+ Label loop_entry, loop;
+ // Copy bytes from src to dest until dest hits limit.
+ __ Branch(&loop_entry);
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ daddiu(src, src, 1);
+ __ sb(scratch, MemOperand(dest));
+ __ daddiu(dest, dest, 1);
+ __ bind(&loop_entry);
+ __ Branch(&loop, lt, dest, Operand(limit));
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash = seed + character + ((seed + character) << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ SmiUntag(hash);
+ __ addu(hash, hash, character);
+ __ sll(at, hash, 10);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 6;
+ __ srl(at, hash, 6);
+ __ xor_(hash, hash, at);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash += character;
+ __ addu(hash, hash, character);
+ // hash += hash << 10;
+ __ sll(at, hash, 10);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 6;
+ __ srl(at, hash, 6);
+ __ xor_(hash, hash, at);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ // hash += hash << 3;
+ __ sll(at, hash, 3);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 11;
+ __ srl(at, hash, 11);
+ __ xor_(hash, hash, at);
+ // hash += hash << 15;
+ __ sll(at, hash, 15);
+ __ addu(hash, hash, at);
+
+ __ li(at, Operand(String::kHashBitMask));
+ __ and_(hash, hash, at);
+
+ // if (hash == 0) hash = 27;
+ __ ori(at, zero_reg, StringHasher::kZeroHash);
+ __ Movz(hash, at, hash);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+ // Stack frame on entry.
+ // ra: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
+
+ __ ld(a2, MemOperand(sp, kToOffset));
+ __ ld(a3, MemOperand(sp, kFromOffset));
+// Does not needed?
+// STATIC_ASSERT(kFromOffset == kToOffset + 4);
+ STATIC_ASSERT(kSmiTag == 0);
+// Does not needed?
+// STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+ // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
+ // safe in this case.
+ __ JumpIfNotSmi(a2, &runtime);
+ __ JumpIfNotSmi(a3, &runtime);
+ // Both a2 and a3 are untagged integers.
+
+ __ SmiUntag(a2, a2);
+ __ SmiUntag(a3, a3);
+ __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
+
+ __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
+ __ Dsubu(a2, a2, a3);
+
+ // Make sure first argument is a string.
+ __ ld(v0, MemOperand(sp, kStringOffset));
+ __ JumpIfSmi(v0, &runtime);
+ __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ And(a4, a1, Operand(kIsNotStringMask));
+
+ __ Branch(&runtime, ne, a4, Operand(zero_reg));
+
+ Label single_char;
+ __ Branch(&single_char, eq, a2, Operand(1));
+
+ // Short-cut for the case of trivial substring.
+ Label return_v0;
+ // v0: original string
+ // a2: result string length
+ __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
+ __ SmiUntag(a4);
+ // Return original string.
+ __ Branch(&return_v0, eq, a2, Operand(a4));
+ // Longer than original string's length or negative: unsafe arguments.
+ __ Branch(&runtime, hi, a2, Operand(a4));
+ // Shorter than original string's length: an actual substring.
+
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into a5.
+ // v0: original string
+ // a1: instance type
+ // a2: length
+ // a3: from index (untagged)
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ And(a4, a1, Operand(kIsIndirectStringMask));
+ __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
+ // a4 is used as a scratch register and can be overwritten in either case.
+ __ And(a4, a1, Operand(kSlicedNotConsMask));
+ __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
+ __ LoadRoot(a4, Heap::kempty_stringRootIndex);
+ __ Branch(&runtime, ne, a5, Operand(a4));
+ __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
+ // Update instance type.
+ __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
+ __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+ __ SmiUntag(a4); // Add offset to index.
+ __ Daddu(a3, a3, a4);
+ // Update instance type.
+ __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(a5, v0);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // a5: underlying subject string
+ // a1: instance type of underlying subject string
+ // a2: length
+ // a3: adjusted start index (untagged)
+ // Short slice. Copy instead of slicing.
+ __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ And(a4, a1, Operand(kStringEncodingMask));
+ __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
+ __ AllocateAsciiSlicedString(v0, a2, a6, a7, &runtime);
+ __ jmp(&set_slice_header);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
+ __ bind(&set_slice_header);
+ __ SmiTag(a3);
+ __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
+ __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+ __ jmp(&return_v0);
+
+ __ bind(©_routine);
+ }
+
+ // a5: underlying subject string
+ // a1: instance type of underlying subject string
+ // a2: length
+ // a3: adjusted start index (untagged)
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(a4, a1, Operand(kExternalStringTag));
+ __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ And(a4, a1, Operand(kShortExternalStringTag));
+ __ Branch(&runtime, ne, a4, Operand(zero_reg));
+ __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
+ // a5 already points to the first character of underlying string.
+ __ jmp(&allocate_result);
+
+ __ bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&allocate_result);
+ // Sequential acii string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ And(a4, a1, Operand(kStringEncodingMask));
+ __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(v0, a2, a4, a6, a7, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Daddu(a5, a5, a3);
+
+ // Locate first character of result.
+ __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ // v0: result string
+ // a1: first character of result string
+ // a2: result string length
+ // a5: first character of substring to copy
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
+ __ jmp(&return_v0);
+
+ // Allocate and copy the resulting two-byte string.
+ __ bind(&two_byte_sequential);
+ __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
+
+ // Locate first character of substring to copy.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ dsll(a4, a3, 1);
+ __ Daddu(a5, a5, a4);
+ // Locate first character of result.
+ __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // v0: result string.
+ // a1: first character of result.
+ // a2: result length.
+ // a5: first character of substring to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
+
+ __ bind(&return_v0);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
+ __ DropAndRet(3);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // v0: original string
+ // a1: instance type
+ // a2: length
+ // a3: from index (untagged)
+ StringCharAtGenerator generator(
+ v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ DropAndRet(3);
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ ld(length, FieldMemOperand(left, String::kLengthOffset));
+ __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+ __ bind(&strings_not_equal);
+ // Can not put li in delayslot, it has multi instructions.
+ __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+ ASSERT(is_int16((intptr_t)Smi::FromInt(EQUAL)));
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+
+ // Compare characters.
+ __ bind(&compare_chars);
+
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, length, scratch2, scratch3, v0,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label result_not_equal, compare_lengths;
+ // Find minimum length and length difference.
+ __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Dsubu(scratch3, scratch1, Operand(scratch2));
+ Register length_delta = scratch3;
+ __ slt(scratch4, scratch2, scratch1);
+ __ Movn(scratch1, scratch2, scratch4);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4, v0,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use length_delta as result if it's zero.
+ __ mov(scratch2, length_delta);
+ __ mov(scratch4, zero_reg);
+ __ mov(v0, zero_reg);
+
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
+ Label ret;
+ __ Branch(&ret, eq, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(GREATER)));
+ __ Branch(&ret, gt, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(LESS)));
+ __ bind(&ret);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Daddu(scratch1, length,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ Daddu(left, left, Operand(scratch1));
+ __ Daddu(right, right, Operand(scratch1));
+ __ Dsubu(length, zero_reg, length);
+ Register index = length; // index = -length;
+
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ Daddu(scratch3, left, index);
+ __ lbu(scratch1, MemOperand(scratch3));
+ __ Daddu(scratch3, right, index);
+ __ lbu(scratch2, MemOperand(scratch3));
+ __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+ __ Daddu(index, index, 1);
+ __ Branch(&loop, ne, index, Operand(zero_reg));
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
+ __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+
+ Label not_same;
+ __ Branch(¬_same, ne, a0, Operand(a1));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+ __ DropAndRet(2);
+
+ __ bind(¬_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first.
+ __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+ __ Daddu(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, a4, a5);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
+
+ // Load a2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ li(a2, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ And(at, a2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
+ __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SMI);
+ Label miss;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Ret(USE_DELAY_SLOT);
+ __ Dsubu(v0, a0, a1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(a1);
+ __ SmiUntag(a0);
+ __ Ret(USE_DELAY_SLOT);
+ __ Dsubu(v0, a1, a0);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+
+ Label generic_stub;
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a0, &miss);
+ }
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(a0, &right_smi);
+ __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Dsubu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(a2, a0); // Can't clobber a0 yet.
+ FPURegister single_scratch = f6;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f2, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(a1, &left_smi);
+ __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Dsubu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(a2, a1); // Can't clobber a1 yet.
+ single_scratch = f8;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f0, single_scratch);
+
+ __ bind(&done);
+
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+ Label fpu_eq, fpu_lt;
+ // Test if equal, and also handle the unordered/NaN case.
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+
+ // Test if less (unordered case is already handled).
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+
+ // Otherwise it's greater, so just fall thru, and return.
+ ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(GREATER));
+
+ __ bind(&fpu_eq);
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(EQUAL));
+
+ __ bind(&fpu_lt);
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(LESS));
+
+ __ bind(&unordered);
+ __ bind(&generic_stub);
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&miss, ne, a0, Operand(at));
+ __ JumpIfSmi(a1, &unordered);
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ jmp(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&unordered, eq, a1, Operand(at));
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are internalized strings.
+ __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ Or(tmp1, tmp1, Operand(tmp2));
+ __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ Branch(&miss, ne, at, Operand(zero_reg));
+
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(v0, right);
+ // Internalized strings are compared by identity.
+ __ Ret(ne, left, Operand(right));
+ ASSERT(is_int16(EQUAL));
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueName(tmp1, &miss);
+ __ JumpIfNotUniqueName(tmp2, &miss);
+
+ // Use a0 as result
+ __ mov(v0, a0);
+
+ // Unique names are compared by identity.
+ Label done;
+ __ Branch(&done, ne, left, Operand(right));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+ Register tmp3 = a4;
+ Register tmp4 = a5;
+ Register tmp5 = a6;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Or(tmp3, tmp1, tmp2);
+ __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+ __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+ // Fast check for identical strings.
+ Label left_ne_right;
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&left_ne_right, ne, left, Operand(right));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // In the delay slot.
+ __ bind(&left_ne_right);
+
+ // Handle not identical strings.
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ Or(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
+ Label is_symbol;
+ __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In the delay slot.
+ __ bind(&is_symbol);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ tmp1, tmp2, tmp3, tmp4, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ }
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ Push(left, right);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &miss);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+ ASSERT(GetCondition() == eq);
+ __ Ret(USE_DELAY_SLOT);
+ __ dsubu(v0, a0, a1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ And(a2, a1, a0);
+ __ JumpIfSmi(a2, &miss);
+ __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a2, Operand(known_map_));
+ __ Branch(&miss, ne, a3, Operand(known_map_));
+
+ __ Ret(USE_DELAY_SLOT);
+ __ dsubu(v0, a0, a1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ {
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a0);
+ __ Push(ra, a1, a0);
+ __ li(a4, Operand(Smi::FromInt(op_)));
+ __ daddiu(sp, sp, -kPointerSize);
+ __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+ __ sd(a4, MemOperand(sp)); // In the delay slot.
+ // Compute the entry point of the rewritten stub.
+ __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ Pop(a1, a0, ra);
+ }
+ __ Jump(a2);
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // Make place for arguments to fit C calling convention. Most of the callers
+ // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+ // so they handle stack restoring and we don't have to do that here.
+ // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+ // kCArgsSlotsSize stack space after the call.
+ __ daddiu(sp, sp, -kCArgsSlotsSize);
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Call(t9); // Call the C++ function.
+ __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
+
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC.
+ // Dereference the address and check for this.
+ __ Uld(a4, MemOperand(t9));
+ __ Assert(ne, kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ }
+ __ Jump(t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ intptr_t loc =
+ reinterpret_cast<intptr_t>(GetCode().location());
+ __ Move(t9, target);
+ __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+ __ Call(ra);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
+ __ Dsubu(index, index, Operand(1));
+ __ And(index, index,
+ Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ dsll(at, index, 1);
+ __ Daddu(index, index, at); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+
+ __ dsll(scratch0, index, kPointerSizeLog2);
+ __ Daddu(tmp, properties, scratch0);
+ __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ Branch(done, eq, entity_name, Operand(tmp));
+
+ // Load the hole ready for use below:
+ __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
+ // Stop if found the property.
+ __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
+
+ Label good;
+ __ Branch(&good, eq, entity_name, Operand(tmp));
+
+ // Check if the entry name is not a unique name.
+ __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ lbu(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ bind(&good);
+
+ // Restore the properties.
+ __ ld(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+
+ const int spill_mask =
+ (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
+ a2.bit() | a1.bit() | a0.bit() | v0.bit());
+
+ __ MultiPush(spill_mask);
+ __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ li(a1, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ mov(at, v0);
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, eq, at, Operand(zero_reg));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+}
+
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!elements.is(scratch1));
+ ASSERT(!elements.is(scratch2));
+ ASSERT(!name.is(scratch1));
+ ASSERT(!name.is(scratch2));
+
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ SmiUntag(scratch1);
+ __ Dsubu(scratch1, scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Daddu(scratch2, scratch2, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ dsrl(scratch2, scratch2, Name::kHashShift);
+ __ And(scratch2, scratch1, scratch2);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+
+ __ dsll(at, scratch2, 1);
+ __ Daddu(scratch2, scratch2, at);
+
+ // Check if the key is identical to the name.
+ __ dsll(at, scratch2, kPointerSizeLog2);
+ __ Daddu(scratch2, elements, at);
+ __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Branch(done, eq, name, Operand(at));
+ }
+
+ const int spill_mask =
+ (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
+ a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ MultiPush(spill_mask);
+ if (name.is(a0)) {
+ ASSERT(!elements.is(a1));
+ __ Move(a1, name);
+ __ Move(a0, elements);
+ } else {
+ __ Move(a0, elements);
+ __ Move(a1, name);
+ }
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ mov(scratch2, a2);
+ __ mov(at, v0);
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ Branch(miss, eq, at, Operand(zero_reg));
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ // Registers:
+ // result: NameDictionary to probe
+ // a1: key
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Register result = v0;
+ Register dictionary = a0;
+ Register key = a1;
+ Register index = a2;
+ Register mask = a3;
+ Register hash = a4;
+ Register undefined = a5;
+ Register entry_key = a6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ SmiUntag(mask);
+ __ Dsubu(mask, mask, Operand(1));
+
+ __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Daddu(index, hash, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ } else {
+ __ mov(index, hash);
+ }
+ __ dsrl(index, index, Name::kHashShift);
+ __ And(index, mask, index);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ dsll(index, index, 1);
+ __ Daddu(index, index, at);
+
+
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ dsll(index, index, kPointerSizeLog2);
+ __ Daddu(index, index, dictionary);
+ __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
+
+ // Stop if found the property.
+ __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ lbu(entry_key,
+ FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(result, zero_reg);
+ }
+
+ __ bind(&in_dictionary);
+ __ Ret(USE_DELAY_SLOT);
+ __ li(result, 1);
+
+ __ bind(¬_in_dictionary);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(result, zero_reg);
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two branch+nop instructions are generated with labels so as to
+ // get the offset fixed up correctly by the bind(Label*) call. We patch it
+ // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
+ // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
+ // incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
+ __ nop();
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
+ __ nop();
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(a0));
+ __ Move(address, regs_.address());
+ __ Move(a0, regs_.object());
+ __ Move(a1, address);
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ ld(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
+ __ sd(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : element value to store
+ // -- a3 : element index as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers a1, a2, a4
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ // Get array literal index, array literal and its map.
+ __ ld(a4, MemOperand(sp, 0 * kPointerSize));
+ __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+ __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+
+ __ CheckFastElements(a2, a5, &double_elements);
+ // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
+ __ JumpIfSmi(a0, &smi_element);
+ __ CheckFastSmiElements(a2, a5, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ // call.
+ __ Push(a1, a3, a0);
+ __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
+ __ Push(a5, a4);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ SmiScale(a6, a3, kPointerSizeLog2);
+ __ Daddu(a6, a5, a6);
+ __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sd(a0, MemOperand(a6, 0));
+ // Update the write barrier for the array store.
+ __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ bind(&smi_element);
+ __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ SmiScale(a6, a3, kPointerSizeLog2);
+ __ Daddu(a6, a5, a6);
+ __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+
+ // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ ld(a1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Daddu(a1, a1, Operand(1));
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ dsll(a1, a1, kPointerSizeLog2);
+ __ Ret(USE_DELAY_SLOT);
+ __ Daddu(sp, sp, a1);
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub(masm->isolate());
+ __ push(ra);
+ __ CallStub(&stub);
+ __ pop(ra);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push ra" instruction, followed by a call.
+ // Note: on MIPS "push" is 2 instruction
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+
+ // This should contain all kJSCallerSaved registers.
+ const RegList kSavedRegs =
+ kJSCallerSaved | // Caller saved registers.
+ s5.bit(); // Saved stack pointer.
+
+ // We also save ra, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ MultiPush(kSavedRegs | ra.bit());
+
+ // Compute the function's address for the first argument.
+ __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(s5, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ And(sp, sp, Operand(-frame_alignment));
+ }
+
+ __ Dsubu(sp, sp, kCArgsSlotsSize);
+#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
+ int64_t entry_hook =
+ reinterpret_cast<int64_t>(isolate()->function_entry_hook());
+ __ li(t9, Operand(entry_hook));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter.
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ li(t9, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ isolate())));
+#endif
+ // Call C function through t9 to conform ABI for PIC.
+ __ Call(t9);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, s5);
+ } else {
+ __ Daddu(sp, sp, kCArgsSlotsSize);
+ }
+
+ // Also pop ra to get Ret(0).
+ __ MultiPop(kSavedRegs | ra.bit());
+ __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // a0 - number of arguments
+ // a1 - constructor?
+ // sp[0] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ And(at, a3, Operand(1));
+ __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+ }
+ // look at the first argument
+ __ ld(a5, MemOperand(sp, 0));
+ __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ Daddu(a3, a3, Operand(1));
+
+ if (FLAG_debug_code) {
+ __ ld(a5, FieldMemOperand(a2, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
+ }
+
+ // Save the resulting elements kind in type info. We can't just store a3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things.
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(¬_zero_case);
+ __ Branch(¬_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(¬_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc (only if argument_count_ == ANY)
+ // -- a1 : constructor
+ // -- a2 : AllocationSite or undefined
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ SmiTst(a4, at);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+ at, Operand(zero_reg));
+ __ GetObjectType(a4, a4, a5);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+ a5, Operand(MAP_TYPE));
+
+ // We should either have undefined in a2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, a4);
+ }
+
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&no_info, eq, a2, Operand(at));
+
+ __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(a3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lo, a0, Operand(1));
+
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, hi, a0, Operand(1));
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument.
+ __ ld(at, MemOperand(sp, 0));
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
+ }
+
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- a1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ SmiTst(a3, at);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+ at, Operand(zero_reg));
+ __ GetObjectType(a3, a3, a4);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+ a4, Operand(MAP_TYPE));
+ }
+
+ // Figure out the right elements kind.
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(a3);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
+ __ Assert(
+ eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(FAST_HOLEY_ELEMENTS));
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : callee
+ // -- a4 : call_data
+ // -- a2 : holder
+ // -- a1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = a0;
+ Register call_data = a4;
+ Register holder = a2;
+ Register api_function_address = a1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // Save context, callee and call data.
+ __ Push(context, callee, call_data);
+ // Load context from callee.
+ __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // Push return value and default return value.
+ __ Push(scratch, scratch);
+ __ li(scratch,
+ Operand(ExternalReference::isolate_address(isolate())));
+ // Push isolate and holder.
+ __ Push(scratch, holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
+ // a0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Daddu(a0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc));
+ __ sd(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- a2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = a2;
+
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // a1 (internal::Object** args_) as the data.
+ __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_CODE_STUBS_ARM_H_
+#define V8_MIPS_CODE_STUBS_ARM_H_
+
+#include "src/ic-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() const { return StoreBufferOverflow; }
+ int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ private:
+ Major MajorKey() const { return SubString; }
+ int MinorKey() const { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() const { return StoreRegistersState; }
+ int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+class RestoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() const { return RestoreRegistersState; }
+ int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+
+ // Compare two flat ASCII strings and returns result in v0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compares two flat ASCII strings for equality and returns result
+ // in v0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() const { return StringCompare; }
+ virtual int MinorKey() const { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal);
+};
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Isolate* isolate,
+ Register the_int,
+ Register the_heap_number,
+ Register scratch,
+ Register scratch2)
+ : PlatformCodeStub(isolate),
+ the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch),
+ sign_(scratch2) {
+ ASSERT(IntRegisterBits::is_valid(the_int_.code()));
+ ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
+ ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
+ ASSERT(SignRegisterBits::is_valid(sign_.code()));
+ }
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+ Register sign_;
+
+ // Minor key encoding in 16 bits.
+ class IntRegisterBits: public BitField<int, 0, 4> {};
+ class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+ class ScratchRegisterBits: public BitField<int, 8, 4> {};
+ class SignRegisterBits: public BitField<int, 12, 4> {};
+
+ Major MajorKey() const { return WriteInt32ToHeapNumber; }
+ int MinorKey() const {
+ // Encode the parameters in a unique 16 bit value.
+ return IntRegisterBits::encode(the_int_.code())
+ | HeapNumberRegisterBits::encode(the_heap_number_.code())
+ | ScratchRegisterBits::encode(scratch_.code())
+ | SignRegisterBits::encode(sign_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ RecordWriteStub(Isolate* isolate,
+ Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : PlatformCodeStub(isolate),
+ object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+ masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
+ (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+ ASSERT(Assembler::IsBne(masm->instr_at(pos)));
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+ masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
+ (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+ ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ 2 * Assembler::kInstrSize);
+
+ if (Assembler::IsBeq(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(Assembler::IsBne(first_instruction));
+
+ if (Assembler::IsBeq(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(Assembler::IsBne(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL,
+ stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CpuFeatures::FlushICache(stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->pop(scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ masm->MultiPushFPU(kCallerSavedFPU);
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ masm->MultiPopFPU(kCallerSavedFPU);
+ }
+ masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ Major MajorKey() const { return RecordWrite; }
+
+ int MinorKey() const {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() const { return DirectCEntry; }
+ int MinorKey() const { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() const { return NameDictionaryLookup; }
+
+ int MinorKey() const { return LookupModeBits::encode(mode_); }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODE_STUBS_ARM_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/simulator-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_mips_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFP(
+ fast_exp_mips_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ DoubleRegister input = f12;
+ DoubleRegister result = f0;
+ DoubleRegister double_scratch1 = f4;
+ DoubleRegister double_scratch2 = f6;
+ Register temp1 = a4;
+ Register temp2 = a5;
+ Register temp3 = a6;
+
+ if (!IsMipsSoftFloatABI) {
+ // Input value is in f12 anyway, nothing to do.
+ } else {
+ __ Move(input, a0, a1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (!IsMipsSoftFloatABI) {
+ // Result is already in f0, nothing to do.
+ } else {
+ __ Move(v0, v1, result);
+ }
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_mips_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+#if defined(V8_HOST_ARCH_MIPS)
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ Label lastb, unaligned, aligned, chkw,
+ loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
+ leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
+ ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+ // The size of each prefetch.
+ uint32_t pref_chunk = 32;
+ // The maximum size of a prefetch, it must not be less then pref_chunk.
+ // If the real size of a prefetch is greater then max_pref_size and
+ // the kPrefHintPrepareForStore hint is used, the code will not work
+ // correctly.
+ uint32_t max_pref_size = 128;
+ ASSERT(pref_chunk < max_pref_size);
+
+ // pref_limit is set based on the fact that we never use an offset
+ // greater then 5 on a store pref and that a single pref can
+ // never be larger then max_pref_size.
+ uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+ int32_t pref_hint_load = kPrefHintLoadStreamed;
+ int32_t pref_hint_store = kPrefHintPrepareForStore;
+ uint32_t loadstore_chunk = 4;
+
+ // The initial prefetches may fetch bytes that are before the buffer being
+ // copied. Start copies with an offset of 4 so avoid this situation when
+ // using kPrefHintPrepareForStore.
+ ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
+ pref_chunk * 4 >= max_pref_size);
+ // If the size is less than 8, go to lastb. Regardless of size,
+ // copy dst pointer to v0 for the retuen value.
+ __ slti(a6, a2, 2 * loadstore_chunk);
+ __ bne(a6, zero_reg, &lastb);
+ __ mov(v0, a0); // In delay slot.
+
+ // If src and dst have different alignments, go to unaligned, if they
+ // have the same alignment (but are not actually aligned) do a partial
+ // load/store to make them aligned. If they are both already aligned
+ // we can start copying at aligned.
+ __ xor_(t8, a1, a0);
+ __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
+ __ bne(t8, zero_reg, &unaligned);
+ __ subu(a3, zero_reg, a0); // In delay slot.
+
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &aligned); // Already aligned.
+ __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
+
+ __ lwr(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swr(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+
+ // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+ // count how many bytes we have to copy after all the 64 byte chunks are
+ // copied and a3 to the dst pointer after all the 64 byte chunks have been
+ // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&aligned);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &chkw); // Less than 64?
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
+
+ // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+ // in this case the a0+x should be past the "a4-32" address. This means:
+ // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
+ // x=64 the last "safe" a0 address is "a4-96". In the current version we
+ // will use "pref hint, 128(a0)", so "a4-160" is the limit.
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(a4, a0, a2); // a4 is the "past the end" address.
+ __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address.
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+ __ bind(&loop16w);
+ __ lw(a4, MemOperand(a1));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
+ __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&skip_pref);
+ __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+ __ sw(a4, MemOperand(a0));
+ __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+
+ __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
+ __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
+ __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
+ __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
+ __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+ __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go.
+ // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+ // down to chk1w to handle the tail end of the copy.
+ __ bind(&chkw);
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ andi(t8, a2, 0x1f);
+ __ beq(a2, t8, &chk1w); // Less than 32?
+ __ nop(); // In delay slot.
+ __ lw(a4, MemOperand(a1));
+ __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(a4, MemOperand(a0));
+ __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Here we have less than 32 bytes to copy. Set up for a loop to copy
+ // one word at a time. Set a2 to count how many bytes we have to copy
+ // after all the word chunks are copied and a3 to the dst pointer after
+ // all the word chunks have been copied. We will loop, incrementing a0
+ // and a1 untill a0 equals a3.
+ __ bind(&chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &lastb);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&wordCopy_loop);
+ __ lw(a7, MemOperand(a1));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &wordCopy_loop);
+ __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ __ bind(&lastb);
+ __ Branch(&leave, le, a2, Operand(zero_reg));
+ __ addu(a3, a0, a2);
+
+ __ bind(&lastbloop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &lastbloop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ bind(&leave);
+ __ jr(ra);
+ __ nop();
+
+ // Unaligned case. Only the dst gets aligned so we need to do partial
+ // loads of the source followed by normal stores to the dst (once we
+ // have aligned the destination).
+ __ bind(&unaligned);
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &ua_chk16w);
+ __ subu(a2, a2, a3); // In delay slot.
+
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swr(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+
+ // Now the dst (but not the source) is aligned. Set a2 to count how many
+ // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+ // the dst pointer after all the 64 byte chunks have been copied. We will
+ // loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&ua_chk16w);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &ua_chkw);
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(a4, a0, a2);
+ __ Subu(t9, a4, pref_limit);
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+
+ __ bind(&ua_loop16w);
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ __ lwr(a4, MemOperand(a1));
+ __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(a4,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a5,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a6,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a7,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t0,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+ __ sw(a4, MemOperand(a0));
+ __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
+ __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
+ __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
+ __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
+ __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
+ __ lwl(a4,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a5,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a6,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a7,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t0,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+ __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &ua_loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here less than 64-bytes. Check for
+ // a 32 byte chunk and copy if there is one. Otherwise jump down to
+ // ua_chk1w to handle the tail end of the copy.
+ __ bind(&ua_chkw);
+ __ Pref(pref_hint_load, MemOperand(a1));
+ __ andi(t8, a2, 0x1f);
+
+ __ beq(a2, t8, &ua_chk1w);
+ __ nop(); // In delay slot.
+ __ lwr(a4, MemOperand(a1));
+ __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(a4,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a5,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a6,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a7,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t0,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(a4, MemOperand(a0));
+ __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Less than 32 bytes to copy. Set up for a loop to
+ // copy one word at a time.
+ __ bind(&ua_chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &ua_smallCopy);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&ua_wordCopy_loop);
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &ua_wordCopy_loop);
+ __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ // Copy the last 8 bytes.
+ __ bind(&ua_smallCopy);
+ __ beq(a2, zero_reg, &leave);
+ __ addu(a3, a0, a2); // In delay slot.
+
+ __ bind(&ua_smallCopy_loop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &ua_smallCopy_loop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ jr(ra);
+ __ nop();
+ }
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<MemCopyUint8Function>(buffer);
+#endif
+}
+#endif
+
+UnaryMathFunction CreateSqrtFunction() {
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ __ MovFromFloatParameter(f12);
+ __ sqrt_d(f0, f12);
+ __ MovToFloatResult(f0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
+}
+
+#undef __
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : target map, scratch for subsequent call
+ // -- a4 : scratch (elements)
+ // -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(a2, a4, allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : target map, scratch for subsequent call
+ // -- a4 : scratch (elements)
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
+
+ Register scratch = t2;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(a2, a4, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ ld(a4, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ Branch(&only_change_map, eq, at, Operand(a4));
+
+ __ push(ra);
+ __ ld(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ // a4: source FixedArray
+ // a5: number of elements (smi-tagged)
+
+ // Allocate new FixedDoubleArray.
+ __ SmiScale(scratch, a5, kDoubleSizeLog2);
+ __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
+ __ Allocate(scratch, a6, a7, t1, &gc_required, DOUBLE_ALIGNMENT);
+ // a6: destination FixedDoubleArray, not tagged as heap object
+
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(t1, Heap::kFixedDoubleArrayMapRootIndex);
+ __ sd(a5, MemOperand(a6, FixedDoubleArray::kLengthOffset));
+ __ sd(t1, MemOperand(a6, HeapObject::kMapOffset));
+ // Update receiver's map.
+
+ __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t1,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Daddu(a3, a6, Operand(kHeapObjectTag));
+ __ sd(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ RecordWriteField(a2,
+ JSObject::kElementsOffset,
+ a3,
+ t1,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+
+ // Prepare for conversion loop.
+ __ Daddu(a3, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Daddu(a7, a6, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiScale(a6, a5, kDoubleSizeLog2);
+ __ Daddu(a6, a6, a7);
+ __ li(a4, Operand(kHoleNanLower32));
+ __ li(a5, Operand(kHoleNanUpper32));
+ // a4: kHoleNanLower32
+ // a5: kHoleNanUpper32
+ // a6: end of destination FixedDoubleArray, not tagged
+ // a7: begin of FixedDoubleArray element fields, not tagged
+
+ __ Branch(&entry);
+
+ __ bind(&only_change_map);
+ __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Branch(&done);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(ra);
+ __ Branch(fail);
+
+ // Convert and copy elements.
+ __ bind(&loop);
+ __ ld(t1, MemOperand(a3));
+ __ Daddu(a3, a3, kIntSize);
+ // t1: current element
+ __ JumpIfNotSmi(t1, &convert_hole);
+ __ SmiUntag(t1);
+
+ // Normal smi, convert to double and store.
+ __ mtc1(t1, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(a7));
+ __ Daddu(a7, a7, kDoubleSize);
+
+ __ Branch(&entry);
+
+ // Hole found, store the-hole NaN.
+ __ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ // Restore a "smi-untagged" heap object.
+ __ Or(t1, t1, Operand(1));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t1));
+ }
+ __ sw(a4, MemOperand(a7)); // mantissa
+ __ sw(a5, MemOperand(a7, kIntSize)); // exponent
+ __ Daddu(a7, a7, kDoubleSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, a7, Operand(a6));
+
+ __ pop(ra);
+ __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : target map, scratch for subsequent call
+ // -- a4 : scratch (elements)
+ // -----------------------------------
+ Label entry, loop, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(a2, a4, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ ld(a4, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ Branch(&only_change_map, eq, at, Operand(a4));
+
+ __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+ __ ld(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ // a4: source FixedArray
+ // a5: number of elements (smi-tagged)
+
+ // Allocate new FixedArray.
+ __ SmiScale(a0, a5, kPointerSizeLog2);
+ __ Daddu(a0, a0, FixedDoubleArray::kHeaderSize);
+ __ Allocate(a0, a6, a7, t1, &gc_required, NO_ALLOCATION_FLAGS);
+ // a6: destination FixedArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sd(a5, MemOperand(a6, FixedDoubleArray::kLengthOffset));
+ __ sd(t1, MemOperand(a6, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+ __ Daddu(a4, a4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ Daddu(a3, a6, Operand(FixedArray::kHeaderSize));
+ __ Daddu(a6, a6, Operand(kHeapObjectTag));
+ __ SmiScale(a5, a5, kPointerSizeLog2);
+ __ Daddu(a5, a3, a5);
+ __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses.
+ // a3: begin of destination FixedArray element fields, not tagged
+ // a4: begin of source FixedDoubleArray element fields, not tagged, +4
+ // a5: end of destination FixedArray, not tagged
+ // a6: destination FixedArray
+ // a7: the-hole pointer
+ // t1: heap number map
+ __ Branch(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+ __ Branch(fail);
+
+ __ bind(&loop);
+ __ lw(a1, MemOperand(a4));
+ __ Daddu(a4, a4, kDoubleSize);
+ // a1: current element's upper 32 bit
+ // a4: address of next element's upper 32 bit
+ __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(a2, a0, t2, t1, &gc_required);
+ // a2: new heap number
+ __ lw(a0, MemOperand(a4, -12));
+ __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
+ __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
+ __ mov(a0, a3);
+ __ sd(a2, MemOperand(a3));
+ __ Daddu(a3, a3, kPointerSize);
+ __ RecordWrite(a6,
+ a0,
+ a2,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Branch(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ sd(a7, MemOperand(a3));
+ __ Daddu(a3, a3, kPointerSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, a3, Operand(a5));
+
+ __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ sd(a6, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ RecordWriteField(a2,
+ JSObject::kElementsOffset,
+ a6,
+ t1,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(ra);
+
+ __ bind(&only_change_map);
+ // Update receiver's map.
+ __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ And(at, result, Operand(kIsIndirectStringMask));
+ __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ And(at, result, Operand(kSlicedNotConsMask));
+ __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ dsra32(at, result, 0);
+ __ Daddu(index, index, at);
+ __ jmp(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(at, Heap::kempty_stringRootIndex);
+ __ Branch(call_runtime, ne, result, Operand(at));
+ // Get the first of the two strings and load its instance type.
+ __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, result, Operand(kStringRepresentationMask));
+ __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Daddu(string,
+ string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ jmp(&check_encoding);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ And(at, result, Operand(kIsIndirectStringMask));
+ __ Assert(eq, kExternalStringExpectedButNotFound,
+ at, Operand(zero_reg));
+ }
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ And(at, result, Operand(kShortExternalStringMask));
+ __ Branch(call_runtime, ne, at, Operand(zero_reg));
+ __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ And(at, result, Operand(kStringEncodingMask));
+ __ Branch(&ascii, ne, at, Operand(zero_reg));
+ // Two-byte string.
+ __ dsll(at, index, 1);
+ __ Daddu(at, string, at);
+ __ lhu(result, MemOperand(at));
+ __ jmp(&done);
+ __ bind(&ascii);
+ // Ascii string.
+ __ Daddu(at, string, index);
+ __ lbu(result, MemOperand(at));
+ __ bind(&done);
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch1));
+ ASSERT(!input.is(double_scratch2));
+ ASSERT(!result.is(double_scratch1));
+ ASSERT(!result.is(double_scratch2));
+ ASSERT(!double_scratch1.is(double_scratch2));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(!temp1.is(temp3));
+ ASSERT(!temp2.is(temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label zero, infinity, done;
+ __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ ldc1(double_scratch1, ExpConstant(0, temp3));
+ __ BranchF(&zero, NULL, ge, double_scratch1, input);
+
+ __ ldc1(double_scratch2, ExpConstant(1, temp3));
+ __ BranchF(&infinity, NULL, ge, input, double_scratch2);
+
+ __ ldc1(double_scratch1, ExpConstant(3, temp3));
+ __ ldc1(result, ExpConstant(4, temp3));
+ __ mul_d(double_scratch1, double_scratch1, input);
+ __ add_d(double_scratch1, double_scratch1, result);
+ __ FmoveLow(temp2, double_scratch1);
+ __ sub_d(double_scratch1, double_scratch1, result);
+ __ ldc1(result, ExpConstant(6, temp3));
+ __ ldc1(double_scratch2, ExpConstant(5, temp3));
+ __ mul_d(double_scratch1, double_scratch1, double_scratch2);
+ __ sub_d(double_scratch1, double_scratch1, input);
+ __ sub_d(result, result, double_scratch1);
+ __ mul_d(double_scratch2, double_scratch1, double_scratch1);
+ __ mul_d(result, result, double_scratch2);
+ __ ldc1(double_scratch2, ExpConstant(7, temp3));
+ __ mul_d(result, result, double_scratch2);
+ __ sub_d(result, result, double_scratch1);
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ Move(double_scratch2, 1);
+ __ add_d(result, result, double_scratch2);
+ __ dsrl(temp1, temp2, 11);
+ __ Ext(temp2, temp2, 0, 11);
+ __ Daddu(temp1, temp1, Operand(0x3ff));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ dsll(at, temp2, 3);
+ __ Daddu(temp3, temp3, Operand(at));
+ __ lwu(temp2, MemOperand(temp3, 0));
+ __ lwu(temp3, MemOperand(temp3, kIntSize));
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ dsll(at, temp1, 20);
+ __ Or(temp1, temp3, at);
+ __ Move(double_scratch1, temp2, temp1);
+ } else {
+ __ dsll(at, temp1, 20);
+ __ Or(temp1, temp2, at);
+ __ Move(double_scratch1, temp3, temp1);
+ }
+ __ mul_d(result, result, double_scratch1);
+ __ BranchShort(&done);
+
+ __ bind(&zero);
+ __ Move(result, kDoubleRegZero);
+ __ BranchShort(&done);
+
+ __ bind(&infinity);
+ __ ldc1(result, ExpConstant(2, temp3));
+
+ __ bind(&done);
+}
+
+#ifdef DEBUG
+// nop(CODE_AGE_MARKER_NOP)
+static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+#endif
+
+
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before MIPS simulator ICache is setup.
+ SmartPointer<CodePatcher> patcher(
+ new CodePatcher(young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->Push(ra, fp, cp, a1);
+ patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher->masm()->Daddu(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Assembler::target_address_at(
+ sequence + Assembler::kInstrSize);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+ if (age == kNoAgeCodeAge) {
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+ CpuFeatures::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ // Mark this code sequence for FindPlatformCodeAgeSequence().
+ patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ patcher.masm()->li(
+ t9,
+ Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
+ ADDRESS_LOAD);
+ patcher.masm()->nop(); // Prevent jalr to jal optimization.
+ patcher.masm()->jalr(t9, a0);
+ patcher.masm()->nop(); // Branch delay slot nop.
+ patcher.masm()->nop(); // Pad the empty space.
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_H_
+#define V8_MIPS_CODEGEN_MIPS_H_
+
+
+#include "src/ast.h"
+#include "src/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+ // Register input isn't modified. All other registers are clobbered.
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODEGEN_MIPS_H_
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/mips64/constants-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+ "zero_reg",
+ "at",
+ "v0", "v1",
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+ "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9",
+ "k0", "k1",
+ "gp",
+ "sp",
+ "fp",
+ "ra",
+ "LO", "HI",
+ "pc"
+};
+
+
+// List of alias names which can be used when referring to MIPS registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {0, "zero"},
+ {23, "cp"},
+ {30, "s8"},
+ {30, "s8_fp"},
+ {kInvalidRegister, NULL}
+};
+
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kInvalidRegister;
+}
+
+
+const char* FPURegisters::names_[kNumFPURegisters] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
+ "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+};
+
+
+// List of alias names which can be used when referring to MIPS registers.
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
+ {kInvalidRegister, NULL}
+};
+
+
+const char* FPURegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+
+int FPURegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidFPURegister;
+}
+
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+bool Instruction::IsForbiddenInBranchDelay() const {
+ const int op = OpcodeFieldRaw();
+ switch (op) {
+ case J:
+ case JAL:
+ case BEQ:
+ case BNE:
+ case BLEZ:
+ case BGTZ:
+ case BEQL:
+ case BNEL:
+ case BLEZL:
+ case BGTZL:
+ return true;
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BLTZ:
+ case BGEZ:
+ case BLTZAL:
+ case BGEZAL:
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JR:
+ case JALR:
+ return true;
+ default:
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+}
+
+
+bool Instruction::IsLinkingInstruction() const {
+ const int op = OpcodeFieldRaw();
+ switch (op) {
+ case JAL:
+ return true;
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ default:
+ return false;
+ }
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JALR:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+
+bool Instruction::IsTrap() const {
+ if (OpcodeFieldRaw() != SPECIAL) {
+ return false;
+ } else {
+ switch (FunctionFieldRaw()) {
+ case BREAK:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+
+Instruction::Type Instruction::InstructionType() const {
+ switch (OpcodeFieldRaw()) {
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JR:
+ case JALR:
+ case BREAK:
+ case SLL:
+ case DSLL:
+ case DSLL32:
+ case SRL:
+ case DSRL:
+ case DSRL32:
+ case SRA:
+ case DSRA:
+ case DSRA32:
+ case SLLV:
+ case DSLLV:
+ case SRLV:
+ case DSRLV:
+ case SRAV:
+ case DSRAV:
+ case MFHI:
+ case MFLO:
+ case MULT:
+ case DMULT:
+ case MULTU:
+ case DMULTU:
+ case DIV:
+ case DDIV:
+ case DIVU:
+ case DDIVU:
+ case ADD:
+ case DADD:
+ case ADDU:
+ case DADDU:
+ case SUB:
+ case DSUB:
+ case SUBU:
+ case DSUBU:
+ case AND:
+ case OR:
+ case XOR:
+ case NOR:
+ case SLT:
+ case SLTU:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ case MOVZ:
+ case MOVN:
+ case MOVCI:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ break;
+ case SPECIAL2:
+ switch (FunctionFieldRaw()) {
+ case MUL:
+ case CLZ:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ break;
+ case SPECIAL3:
+ switch (FunctionFieldRaw()) {
+ case INS:
+ case EXT:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ break;
+ case COP1: // Coprocessor instructions.
+ switch (RsFieldRawNoAssert()) {
+ case BC1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ }
+ break;
+ case COP1X:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case REGIMM:
+ case BEQ:
+ case BNE:
+ case BLEZ:
+ case BGTZ:
+ case ADDI:
+ case DADDI:
+ case ADDIU:
+ case DADDIU:
+ case SLTI:
+ case SLTIU:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LUI:
+ case BEQL:
+ case BNEL:
+ case BLEZL:
+ case BGTZL:
+ case LB:
+ case LH:
+ case LWL:
+ case LW:
+ case LWU:
+ case LD:
+ case LBU:
+ case LHU:
+ case LWR:
+ case SB:
+ case SH:
+ case SWL:
+ case SW:
+ case SD:
+ case SWR:
+ case LWC1:
+ case LDC1:
+ case SWC1:
+ case SDC1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case J:
+ case JAL:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ }
+ return kUnsupported;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_CONSTANTS_H_
+#define V8_MIPS_CONSTANTS_H_
+
+// UNIMPLEMENTED_ macro for MIPS.
+#ifdef DEBUG
+#define UNIMPLEMENTED_MIPS() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_MIPS()
+#endif
+
+#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+
+enum ArchVariants {
+ kMips32r2,
+ kMips32r1,
+ kLoongson,
+ kMips64r2
+};
+
+
+#ifdef _MIPS_ARCH_MIPS64R2
+ static const ArchVariants kArchVariant = kMips64r2;
+#elif _MIPS_ARCH_LOONGSON
+// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
+// which predates (and is a subset of) the mips32r2 and r1 architectures.
+ static const ArchVariants kArchVariant = kLoongson;
+#else
+ static const ArchVariants kArchVariant = kMips64r1;
+#endif
+
+
+// TODO(plind): consider deriving ABI from compiler flags or build system.
+
+// ABI-dependent definitions are made with #define in simulator-mips64.h,
+// so the ABI choice must be available to the pre-processor. However, in all
+// other cases, we should use the enum AbiVariants with normal if statements.
+
+#define MIPS_ABI_N64 1
+// #define MIPS_ABI_O32 1
+
+// The only supported Abi's are O32, and n64.
+enum AbiVariants {
+ kO32,
+ kN64 // Use upper case N for 'n64' ABI to conform to style standard.
+};
+
+#ifdef MIPS_ABI_N64
+static const AbiVariants kMipsAbi = kN64;
+#else
+static const AbiVariants kMipsAbi = kO32;
+#endif
+
+
+// TODO(plind): consider renaming these ...
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// This flag is raised when -msoft-float is passed to the compiler.
+// Although FPU is a base requirement for v8, soft-float ABI is used
+// on soft-float systems with FPU kernel emulation.
+const bool IsMipsSoftFloatABI = true;
+#else
+const bool IsMipsSoftFloatABI = true;
+#endif
+
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate MIPS32 instructions.
+//
+// See: MIPS32 Architecture For Programmers
+// Volume II: The MIPS32 Instruction Set
+// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegisters.
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
+
+// Number of registers with HI, LO, and pc.
+const int kNumSimuRegisters = 35;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ static const int64_t kMaxValue = 0x7fffffffffffffffl;
+ static const int64_t kMinValue = 0x8000000000000000l;
+
+ private:
+ static const char* names_[kNumSimuRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumFPURegisters];
+ static const RegisterAlias aliases_[];
+};
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On MIPS all instructions are 32 bits.
+typedef int32_t Instr;
+
+// Special Software Interrupt codes when used in the presence of the MIPS
+// simulator.
+enum SoftwareInterruptCodes {
+ // Transition to C code.
+ call_rt_redirected = 0xfffff
+};
+
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+
+// ----- Fields offset and length.
+const int kOpcodeShift = 26;
+const int kOpcodeBits = 6;
+const int kRsShift = 21;
+const int kRsBits = 5;
+const int kRtShift = 16;
+const int kRtBits = 5;
+const int kRdShift = 11;
+const int kRdBits = 5;
+const int kSaShift = 6;
+const int kSaBits = 5;
+const int kFunctionShift = 0;
+const int kFunctionBits = 6;
+const int kLuiShift = 16;
+
+const int kImm16Shift = 0;
+const int kImm16Bits = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits = 32;
+
+// In branches and jumps immediate fields point to words, not bytes,
+// and are therefore shifted by 2.
+const int kImmFieldShift = 2;
+
+const int kFrBits = 5;
+const int kFrShift = 21;
+const int kFsShift = 11;
+const int kFsBits = 5;
+const int kFtShift = 16;
+const int kFtBits = 5;
+const int kFdShift = 6;
+const int kFdBits = 5;
+const int kFCccShift = 8;
+const int kFCccBits = 3;
+const int kFBccShift = 18;
+const int kFBccBits = 3;
+const int kFBtrueShift = 16;
+const int kFBtrueBits = 1;
+
+// ----- Miscellaneous useful masks.
+// Instruction bit masks.
+const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
+const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
+const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
+// Misc masks.
+const int kHiMask = 0xffff << 16;
+const int kLoMask = 0xffff;
+const int kSignMask = 0x80000000;
+const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int64_t kHi16MaskOf64 = (int64_t)0xffff << 48;
+const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32;
+const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
+
+// ----- MIPS Opcodes and Function Fields.
+// We use this presentation to stay close to the table representation in
+// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
+enum Opcode {
+ SPECIAL = 0 << kOpcodeShift,
+ REGIMM = 1 << kOpcodeShift,
+
+ J = ((0 << 3) + 2) << kOpcodeShift,
+ JAL = ((0 << 3) + 3) << kOpcodeShift,
+ BEQ = ((0 << 3) + 4) << kOpcodeShift,
+ BNE = ((0 << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0 << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0 << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1 << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1 << 3) + 1) << kOpcodeShift,
+ SLTI = ((1 << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1 << 3) + 3) << kOpcodeShift,
+ ANDI = ((1 << 3) + 4) << kOpcodeShift,
+ ORI = ((1 << 3) + 5) << kOpcodeShift,
+ XORI = ((1 << 3) + 6) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift,
+
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2 << 3) + 4) << kOpcodeShift,
+ BNEL = ((2 << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2 << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2 << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3 << 3) + 0) << kOpcodeShift,
+ DADDIU = ((3 << 3) + 1) << kOpcodeShift,
+ LDL = ((3 << 3) + 2) << kOpcodeShift,
+ LDR = ((3 << 3) + 3) << kOpcodeShift,
+ SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
+
+ LB = ((4 << 3) + 0) << kOpcodeShift,
+ LH = ((4 << 3) + 1) << kOpcodeShift,
+ LWL = ((4 << 3) + 2) << kOpcodeShift,
+ LW = ((4 << 3) + 3) << kOpcodeShift,
+ LBU = ((4 << 3) + 4) << kOpcodeShift,
+ LHU = ((4 << 3) + 5) << kOpcodeShift,
+ LWR = ((4 << 3) + 6) << kOpcodeShift,
+ LWU = ((4 << 3) + 7) << kOpcodeShift,
+
+ SB = ((5 << 3) + 0) << kOpcodeShift,
+ SH = ((5 << 3) + 1) << kOpcodeShift,
+ SWL = ((5 << 3) + 2) << kOpcodeShift,
+ SW = ((5 << 3) + 3) << kOpcodeShift,
+ SDL = ((5 << 3) + 4) << kOpcodeShift,
+ SDR = ((5 << 3) + 5) << kOpcodeShift,
+ SWR = ((5 << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6 << 3) + 1) << kOpcodeShift,
+ LLD = ((6 << 3) + 4) << kOpcodeShift,
+ LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ LD = ((6 << 3) + 7) << kOpcodeShift,
+
+ PREF = ((6 << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7 << 3) + 1) << kOpcodeShift,
+ SCD = ((7 << 3) + 4) << kOpcodeShift,
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+ SD = ((7 << 3) + 7) << kOpcodeShift,
+
+ COP1X = ((1 << 4) + 3) << kOpcodeShift
+};
+
+enum SecondaryField {
+ // SPECIAL Encoding of Function Field.
+ SLL = ((0 << 3) + 0),
+ MOVCI = ((0 << 3) + 1),
+ SRL = ((0 << 3) + 2),
+ SRA = ((0 << 3) + 3),
+ SLLV = ((0 << 3) + 4),
+ SRLV = ((0 << 3) + 6),
+ SRAV = ((0 << 3) + 7),
+
+ JR = ((1 << 3) + 0),
+ JALR = ((1 << 3) + 1),
+ MOVZ = ((1 << 3) + 2),
+ MOVN = ((1 << 3) + 3),
+ BREAK = ((1 << 3) + 5),
+
+ MFHI = ((2 << 3) + 0),
+ MFLO = ((2 << 3) + 2),
+ DSLLV = ((2 << 3) + 4),
+ DSRLV = ((2 << 3) + 6),
+ DSRAV = ((2 << 3) + 7),
+
+ MULT = ((3 << 3) + 0),
+ MULTU = ((3 << 3) + 1),
+ DIV = ((3 << 3) + 2),
+ DIVU = ((3 << 3) + 3),
+ DMULT = ((3 << 3) + 4),
+ DMULTU = ((3 << 3) + 5),
+ DDIV = ((3 << 3) + 6),
+ DDIVU = ((3 << 3) + 7),
+
+ ADD = ((4 << 3) + 0),
+ ADDU = ((4 << 3) + 1),
+ SUB = ((4 << 3) + 2),
+ SUBU = ((4 << 3) + 3),
+ AND = ((4 << 3) + 4),
+ OR = ((4 << 3) + 5),
+ XOR = ((4 << 3) + 6),
+ NOR = ((4 << 3) + 7),
+
+ SLT = ((5 << 3) + 2),
+ SLTU = ((5 << 3) + 3),
+ DADD = ((5 << 3) + 4),
+ DADDU = ((5 << 3) + 5),
+ DSUB = ((5 << 3) + 6),
+ DSUBU = ((5 << 3) + 7),
+
+ TGE = ((6 << 3) + 0),
+ TGEU = ((6 << 3) + 1),
+ TLT = ((6 << 3) + 2),
+ TLTU = ((6 << 3) + 3),
+ TEQ = ((6 << 3) + 4),
+ TNE = ((6 << 3) + 6),
+
+ DSLL = ((7 << 3) + 0),
+ DSRL = ((7 << 3) + 2),
+ DSRA = ((7 << 3) + 3),
+ DSLL32 = ((7 << 3) + 4),
+ DSRL32 = ((7 << 3) + 6),
+ DSRA32 = ((7 << 3) + 7),
+ // drotr in special4?
+
+ // SPECIAL2 Encoding of Function Field.
+ MUL = ((0 << 3) + 2),
+ CLZ = ((4 << 3) + 0),
+ CLO = ((4 << 3) + 1),
+
+ // SPECIAL3 Encoding of Function Field.
+ EXT = ((0 << 3) + 0),
+ DEXTM = ((0 << 3) + 1),
+ DEXTU = ((0 << 3) + 2),
+ DEXT = ((0 << 3) + 3),
+ INS = ((0 << 3) + 4),
+ DINSM = ((0 << 3) + 5),
+ DINSU = ((0 << 3) + 6),
+ DINS = ((0 << 3) + 7),
+
+ DSBH = ((4 << 3) + 4),
+
+ // REGIMM encoding of rt Field.
+ BLTZ = ((0 << 3) + 0) << 16,
+ BGEZ = ((0 << 3) + 1) << 16,
+ BLTZAL = ((2 << 3) + 0) << 16,
+ BGEZAL = ((2 << 3) + 1) << 16,
+
+ // COP1 Encoding of rs Field.
+ MFC1 = ((0 << 3) + 0) << 21,
+ DMFC1 = ((0 << 3) + 1) << 21,
+ CFC1 = ((0 << 3) + 2) << 21,
+ MFHC1 = ((0 << 3) + 3) << 21,
+ MTC1 = ((0 << 3) + 4) << 21,
+ DMTC1 = ((0 << 3) + 5) << 21,
+ CTC1 = ((0 << 3) + 6) << 21,
+ MTHC1 = ((0 << 3) + 7) << 21,
+ BC1 = ((1 << 3) + 0) << 21,
+ S = ((2 << 3) + 0) << 21,
+ D = ((2 << 3) + 1) << 21,
+ W = ((2 << 3) + 4) << 21,
+ L = ((2 << 3) + 5) << 21,
+ PS = ((2 << 3) + 6) << 21,
+ // COP1 Encoding of Function Field When rs=S.
+ ROUND_L_S = ((1 << 3) + 0),
+ TRUNC_L_S = ((1 << 3) + 1),
+ CEIL_L_S = ((1 << 3) + 2),
+ FLOOR_L_S = ((1 << 3) + 3),
+ ROUND_W_S = ((1 << 3) + 4),
+ TRUNC_W_S = ((1 << 3) + 5),
+ CEIL_W_S = ((1 << 3) + 6),
+ FLOOR_W_S = ((1 << 3) + 7),
+ CVT_D_S = ((4 << 3) + 1),
+ CVT_W_S = ((4 << 3) + 4),
+ CVT_L_S = ((4 << 3) + 5),
+ CVT_PS_S = ((4 << 3) + 6),
+ // COP1 Encoding of Function Field When rs=D.
+ ADD_D = ((0 << 3) + 0),
+ SUB_D = ((0 << 3) + 1),
+ MUL_D = ((0 << 3) + 2),
+ DIV_D = ((0 << 3) + 3),
+ SQRT_D = ((0 << 3) + 4),
+ ABS_D = ((0 << 3) + 5),
+ MOV_D = ((0 << 3) + 6),
+ NEG_D = ((0 << 3) + 7),
+ ROUND_L_D = ((1 << 3) + 0),
+ TRUNC_L_D = ((1 << 3) + 1),
+ CEIL_L_D = ((1 << 3) + 2),
+ FLOOR_L_D = ((1 << 3) + 3),
+ ROUND_W_D = ((1 << 3) + 4),
+ TRUNC_W_D = ((1 << 3) + 5),
+ CEIL_W_D = ((1 << 3) + 6),
+ FLOOR_W_D = ((1 << 3) + 7),
+ CVT_S_D = ((4 << 3) + 0),
+ CVT_W_D = ((4 << 3) + 4),
+ CVT_L_D = ((4 << 3) + 5),
+ C_F_D = ((6 << 3) + 0),
+ C_UN_D = ((6 << 3) + 1),
+ C_EQ_D = ((6 << 3) + 2),
+ C_UEQ_D = ((6 << 3) + 3),
+ C_OLT_D = ((6 << 3) + 4),
+ C_ULT_D = ((6 << 3) + 5),
+ C_OLE_D = ((6 << 3) + 6),
+ C_ULE_D = ((6 << 3) + 7),
+ // COP1 Encoding of Function Field When rs=W or L.
+ CVT_S_W = ((4 << 3) + 0),
+ CVT_D_W = ((4 << 3) + 1),
+ CVT_S_L = ((4 << 3) + 0),
+ CVT_D_L = ((4 << 3) + 1),
+ // COP1 Encoding of Function Field When rs=PS.
+ // COP1X Encoding of Function Field.
+ MADD_D = ((4 << 3) + 1),
+
+ NULLSF = 0
+};
+
+
+// ----- Emulated conditions.
+// On MIPS we use this enum to abstract from conditionnal branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Oppposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
+enum Condition {
+ // Any value < 0 is considered no_condition.
+ kNoCondition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal= 3,
+ equal = 4,
+ not_equal = 5,
+ Uless_equal = 6,
+ Ugreater = 7,
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+ ueq = 16, // Unordered or Equal.
+ nue = 17, // Not (Unordered or Equal).
+
+ cc_always = 18,
+
+ // Aliases.
+ carry = Uless,
+ not_carry = Ugreater_equal,
+ zero = equal,
+ eq = equal,
+ not_zero = not_equal,
+ ne = not_equal,
+ nz = not_equal,
+ sign = negative,
+ not_sign = positive,
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
+
+ cc_default = kNoCondition
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
+ switch (cc) {
+ case Uless:
+ return Ugreater;
+ case Ugreater:
+ return Uless;
+ case Ugreater_equal:
+ return Uless_equal;
+ case Uless_equal:
+ return Ugreater_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ }
+}
+
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+
+ F = 0, // False.
+ UN = 1, // Unordered.
+ EQ = 2, // Equal.
+ UEQ = 3, // Unordered or Equal.
+ OLT = 4, // Ordered or Less Than.
+ ULT = 5, // Unordered or Less Than.
+ OLE = 6, // Ordered or Less Than or Equal.
+ ULE = 7 // Unordered or Less Than or Equal.
+};
+
+
+// FPU rounding modes.
+enum FPURoundingMode {
+ RN = 0 << 0, // Round to Nearest.
+ RZ = 1 << 0, // Round towards zero.
+ RP = 2 << 0, // Round towards Plus Infinity.
+ RM = 3 << 0, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM
+};
+
+const uint32_t kFPURoundingModeMask = 3 << 0;
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the MIPS. They are defined so that they can
+// appear in shared function signatures, but will be ignored in MIPS
+// implementations.
+enum Hint {
+ no_hint = 0
+};
+
+
+inline Hint NegateHint(Hint hint) {
+ return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-mips.cc, as they use named
+// registers and other constants.
+
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+extern const Instr kPopInstruction;
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+extern const Instr kPushInstruction;
+// sw(r, MemOperand(sp, 0))
+extern const Instr kPushRegPattern;
+// lw(r, MemOperand(sp, 0))
+extern const Instr kPopRegPattern;
+extern const Instr kLwRegFpOffsetPattern;
+extern const Instr kSwRegFpOffsetPattern;
+extern const Instr kLwRegFpNegOffsetPattern;
+extern const Instr kSwRegFpNegOffsetPattern;
+// A mask for the Rt register for push, pop, lw, sw instructions.
+extern const Instr kRtMask;
+extern const Instr kLwSwInstrTypeMask;
+extern const Instr kLwSwInstrArgumentMask;
+extern const Instr kLwSwOffsetMask;
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
+// A nop instruction. (Encoding of sll 0 0 0).
+const Instr nopInstr = 0;
+
+class Instruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const {
+ return (InstructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kRegisterType,
+ kImmediateType,
+ kJumpType,
+ kUnsupported = -1
+ };
+
+ // Get the encoding type of the instruction.
+ Type InstructionType() const;
+
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline Opcode OpcodeValue() const {
+ return static_cast<Opcode>(
+ Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
+ }
+
+ inline int RsValue() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return Bits(kRsShift + kRsBits - 1, kRsShift);
+ }
+
+ inline int RtValue() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return Bits(kRtShift + kRtBits - 1, kRtShift);
+ }
+
+ inline int RdValue() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int SaValue() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return Bits(kSaShift + kSaBits - 1, kSaShift);
+ }
+
+ inline int FunctionValue() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+ }
+
+ inline int FdValue() const {
+ return Bits(kFdShift + kFdBits - 1, kFdShift);
+ }
+
+ inline int FsValue() const {
+ return Bits(kFsShift + kFsBits - 1, kFsShift);
+ }
+
+ inline int FtValue() const {
+ return Bits(kFtShift + kFtBits - 1, kFtShift);
+ }
+
+ inline int FrValue() const {
+ return Bits(kFrShift + kFrBits -1, kFrShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int FCccValue() const {
+ return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int FBccValue() const {
+ return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int FBtrueValue() const {
+ return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+ }
+
+ inline int RsFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return InstructionBits() & kRsFieldMask;
+ }
+
+ // Same as above function, but safe to call within InstructionType().
+ inline int RsFieldRawNoAssert() const {
+ return InstructionBits() & kRsFieldMask;
+ }
+
+ inline int RtFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return InstructionBits() & kRtFieldMask;
+ }
+
+ inline int RdFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return InstructionBits() & kRdFieldMask;
+ }
+
+ inline int SaFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return InstructionBits() & kSaFieldMask;
+ }
+
+ inline int FunctionFieldRaw() const {
+ return InstructionBits() & kFunctionFieldMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int SecondaryValue() const {
+ Opcode op = OpcodeFieldRaw();
+ switch (op) {
+ case SPECIAL:
+ case SPECIAL2:
+ return FunctionValue();
+ case COP1:
+ return RsValue();
+ case REGIMM:
+ return RtValue();
+ default:
+ return NULLSF;
+ }
+ }
+
+ inline int32_t Imm16Value() const {
+ ASSERT(InstructionType() == kImmediateType);
+ return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+ }
+
+ inline int32_t Imm26Value() const {
+ ASSERT(InstructionType() == kJumpType);
+ return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool IsForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool IsLinkingInstruction() const;
+ // Say if the instruction is a break or a trap.
+ bool IsTrap() const;
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
+
+// TODO(plind): below should be based on kPointerSize
+// TODO(plind): find all usages and remove the needless instructions for n64.
+const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
+
+const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+
+} } // namespace v8::internal
+
+#endif // #ifndef V8_MIPS_CONSTANTS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifdef __mips
+#include <asm/cachectl.h>
+#endif // #ifdef __mips
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/simulator.h" // For cache flushing.
+
+namespace v8 {
+namespace internal {
+
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if !defined (USE_SIMULATOR)
+#if defined(ANDROID) && !defined(__LP64__)
+ // Bionic cacheflush can typically run in userland, avoiding kernel call.
+ char *end = reinterpret_cast<char *>(start) + size;
+ cacheflush(
+ reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
+#else // ANDROID
+ int res;
+ // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
+ res = syscall(__NR_cacheflush, start, size, ICACHE);
+ if (res) {
+ V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
+ }
+#endif // ANDROID
+#else // USE_SIMULATOR.
+ // Not generating mips instructions for C-code. This means that we are
+ // building a mips emulator based target. We should notify the simulator
+ // that the Icache was flushed.
+ // None of this code ends up in the snapshot so there are no issues
+ // around whether or not to generate the code when building snapshots.
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#endif // USE_SIMULATOR.
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Mips return sequence:
+ // mov sp, fp
+ // lw fp, sp(0)
+ // lw ra, sp(4)
+ // addiu sp, sp, 8
+ // addiu sp, sp, N
+ // jr ra
+ // nop (in branch delay slot)
+
+ // Make sure this constant matches the number if instructions we emit.
+ ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+ CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ // li and Call pseudo-instructions emit 6 + 2 instructions.
+ patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int64_t>(
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())),
+ ADDRESS_LOAD);
+ patcher.masm()->Call(v8::internal::t9);
+ // Place nop to match return sequence size.
+ patcher.masm()->nop();
+ // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+ // patcher.masm()->bkpt(0);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceInstructions);
+}
+
+
+// A debug break in the exit code is identified by the JS frame exit code
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Patch the code changing the debug break slot code from:
+ // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // to a call to the debug break slot code.
+ // li t9, address (4-instruction sequence on mips64)
+ // call t9 (jalr t9 / nop instruction pair)
+ CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ patcher.masm()->li(v8::internal::t9,
+ Operand(reinterpret_cast<int64_t>(
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())),
+ ADDRESS_LOAD);
+ patcher.masm()->Call(v8::internal::t9);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ __ Dsubu(sp, sp,
+ Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
+ for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
+ __ sd(at, MemOperand(sp, kPointerSize * i));
+ }
+ __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+ __ push(at);
+
+
+ // TODO(plind): This needs to be revised to store pairs of smi's per
+ // the other 64-bit arch's.
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ PushRegisterAsTwoSmis(reg);
+ }
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ PrepareCEntryArgs(0); // No arguments.
+ __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ PopRegisterAsTwoSmis(reg, at);
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
+ }
+ }
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ li(t9, Operand(after_break_target));
+ __ ld(t9, MemOperand(t9));
+ __ Jump(t9);
+}
+
+
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- a3 : slot in feedback array (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ // Registers a0, a1, and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for keyed IC load (from ic-arm.cc).
+ GenerateLoadICDebugBreak(masm);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that v0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
+}
+
+
+
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ // -- a2 : feedback array
+ // -- a3 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ li(at, Operand(restarter_frame_function_slot));
+ __ sw(zero_reg, MemOperand(at, 0));
+
+ // We do not know our frame height, but set sp based on fp.
+ __ Dsubu(sp, fp, Operand(kPointerSize));
+
+ __ Pop(ra, fp, a1); // Return address, Frame, Function.
+
+ // Load context from the function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, a1 is function, cp is context.
+ __ Jump(t9);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+ const int kCallInstructionSizeInWords = 6;
+ return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
+ }
+ }
+
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+ RelocInfo::NONE32);
+ int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+ ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+ ASSERT(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(call_address, call_size_in_words);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler());
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(s0.code(), params);
+ output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
+ output_frame->SetRegister(s2.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on MIPS in the input frame.
+ return false;
+}
+
+
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize =
+ kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+
+ // Save all FPU registers before messing with them.
+ __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id from the stack.
+ __ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ mov(a3, ra);
+ // Correct one word for bailout id.
+ __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ __ Dsubu(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to O32 or n64 ABI. a0..a3 are same for both.
+ __ li(a1, Operand(type())); // bailout type,
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ if (kMipsAbi == kN64) {
+ // a4: already has fp-to-sp delta.
+ __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+ } else { // O32 abi.
+ // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+ __ sd(a4, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
+ __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(a5, CFunctionArgumentOperand(6)); // Isolate.
+ }
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ ld(a2, MemOperand(sp, i * kPointerSize));
+ __ sd(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ sd(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Daddu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ BranchShort(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(a4);
+ __ sd(a4, MemOperand(a3, 0));
+ __ daddiu(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
+ __ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ ld(a2, MemOperand(a4, 0)); // output_[ix]
+ __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
+ __ Daddu(a6, a2, Operand(a3));
+ __ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Daddu(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push state, pc, and continuation from the last output frame.
+ __ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
+ __ push(a6);
+
+ __ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(a6);
+ __ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(a6);
+
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ ASSERT(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ ld(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ __ InitializeRootRegister();
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop("Unreachable.");
+}
+
+
+// Maximum size of a table entry generated below.
+const int Deoptimizer::table_entry_size_ = 11 * Assembler::kInstrSize;
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label table_start;
+ __ bind(&table_start);
+ for (int i = 0; i < count(); i++) {
+ Label start;
+ __ bind(&start);
+ __ daddiu(sp, sp, -1 * kPointerSize);
+ // Jump over the remaining deopt entries (including this one).
+ // This code is always reached by calling Jump, which puts the target (label
+ // start) into t9.
+ const int remaining_entries = (count() - i) * table_entry_size_;
+ __ Daddu(t9, t9, remaining_entries);
+ // 'at' was clobbered so we can only load the current entry value here.
+ __ li(t8, i);
+ __ jr(t9); // Expose delay slot.
+ __ sd(t8, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
+
+ // Pad the rest of the code.
+ while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
+ __ nop();
+ }
+
+ ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+ }
+
+ ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+ count() * table_entry_size_);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// NameConverter converter;
+// Disassembler d(converter);
+// for (byte* pc = begin; pc < end;) {
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/constants-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::internal::Vector<char> out_buffer)
+ : converter_(converter),
+ out_buffer_(out_buffer),
+ out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintRs(Instruction* instr);
+ void PrintRt(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintFs(Instruction* instr);
+ void PrintFt(Instruction* instr);
+ void PrintFd(Instruction* instr);
+ void PrintSa(Instruction* instr);
+ void PrintSd(Instruction* instr);
+ void PrintSs1(Instruction* instr);
+ void PrintSs2(Instruction* instr);
+ void PrintBc(Instruction* instr);
+ void PrintCc(Instruction* instr);
+ void PrintFunction(Instruction* instr);
+ void PrintSecondaryField(Instruction* instr);
+ void PrintUImm16(Instruction* instr);
+ void PrintSImm16(Instruction* instr);
+ void PrintXImm16(Instruction* instr);
+ void PrintXImm26(Instruction* instr);
+ void PrintCode(Instruction* instr); // For break and trap instructions.
+ // Printing of instruction name.
+ void PrintInstructionName(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+ int DecodeBreakInstr(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type.
+ int DecodeTypeRegister(Instruction* instr);
+ void DecodeTypeImmediate(Instruction* instr);
+ void DecodeTypeJump(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::internal::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+ out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+
+void Decoder::PrintRs(Instruction* instr) {
+ int reg = instr->RsValue();
+ PrintRegister(reg);
+}
+
+
+void Decoder::PrintRt(Instruction* instr) {
+ int reg = instr->RtValue();
+ PrintRegister(reg);
+}
+
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+
+void Decoder::PrintFs(Instruction* instr) {
+ int freg = instr->RsValue();
+ PrintFPURegister(freg);
+}
+
+
+void Decoder::PrintFt(Instruction* instr) {
+ int freg = instr->RtValue();
+ PrintFPURegister(freg);
+}
+
+
+void Decoder::PrintFd(Instruction* instr) {
+ int freg = instr->RdValue();
+ PrintFPURegister(freg);
+}
+
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa(Instruction* instr) {
+ int sa = instr->SaValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
+// Print the integer value of the rd field, when it is not used as reg.
+void Decoder::PrintSd(Instruction* instr) {
+ int sd = instr->RdValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+}
+
+
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+ int ss = instr->RdValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+ int ss = instr->RdValue();
+ int pos = instr->SaValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
+// Print the integer value of the cc field for the bc1t/f instructions.
+void Decoder::PrintBc(Instruction* instr) {
+ int cc = instr->FBccValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+}
+
+
+// Print the integer value of the cc field for the FP compare instructions.
+void Decoder::PrintCc(Instruction* instr) {
+ int cc = instr->FCccValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
+}
+
+
+// Print 16-bit unsigned immediate value.
+void Decoder::PrintUImm16(Instruction* instr) {
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
+}
+
+
+// Print 16-bit signed immediate value.
+void Decoder::PrintSImm16(Instruction* instr) {
+ int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+
+// Print 16-bit hexa immediate value.
+void Decoder::PrintXImm16(Instruction* instr) {
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintXImm26(Instruction* instr) {
+ uint32_t imm = instr->Imm26Value() << kImmFieldShift;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintCode(Instruction* instr) {
+ if (instr->OpcodeFieldRaw() != SPECIAL)
+ return; // Not a break or trap instruction.
+ switch (instr->FunctionFieldRaw()) {
+ case BREAK: {
+ int32_t code = instr->Bits(25, 6);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%05x (%d)", code, code);
+ break;
+ }
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE: {
+ int32_t code = instr->Bits(15, 6);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ break;
+ }
+ default: // Not a break or trap instruction.
+ break;
+ }
+}
+
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'r');
+ if (format[1] == 's') { // 'rs: Rs register.
+ int reg = instr->RsValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'rt: rt register.
+ int reg = instr->RtValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'f');
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'c': { // 'code for break or trap instructions.
+ ASSERT(STRING_STARTS_WITH(format, "code"));
+ PrintCode(instr);
+ return 4;
+ }
+ case 'i': { // 'imm16u or 'imm26.
+ if (format[3] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "imm16"));
+ if (format[5] == 's') {
+ ASSERT(STRING_STARTS_WITH(format, "imm16s"));
+ PrintSImm16(instr);
+ } else if (format[5] == 'u') {
+ ASSERT(STRING_STARTS_WITH(format, "imm16u"));
+ PrintSImm16(instr);
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "imm16x"));
+ PrintXImm16(instr);
+ }
+ return 6;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "imm26x"));
+ PrintXImm26(instr);
+ return 6;
+ }
+ }
+ case 'r': { // 'r: registers.
+ return FormatRegister(instr, format);
+ }
+ case 'f': { // 'f: FPUregisters.
+ return FormatFPURegister(instr, format);
+ }
+ case 's': { // 'sa.
+ switch (format[1]) {
+ case 'a': {
+ ASSERT(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ case 'd': {
+ ASSERT(STRING_STARTS_WITH(format, "sd"));
+ PrintSd(instr);
+ return 2;
+ }
+ case 's': {
+ if (format[2] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "ss1")); /* ext size */
+ PrintSs1(instr);
+ return 3;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "ss2")); /* ins size */
+ PrintSs2(instr);
+ return 3;
+ }
+ }
+ }
+ }
+ case 'b': { // 'bc - Special for bc1 cc field.
+ ASSERT(STRING_STARTS_WITH(format, "bc"));
+ PrintBc(instr);
+ return 2;
+ }
+ case 'C': { // 'Cc - Special for c.xx.d cc field.
+ ASSERT(STRING_STARTS_WITH(format, "Cc"));
+ PrintCc(instr);
+ return 2;
+ }
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) {
+ Format(instr, "unknown");
+}
+
+
+int Decoder::DecodeBreakInstr(Instruction* instr) {
+ // This is already known to be BREAK instr, just extract the code.
+ if (instr->Bits(25, 6) == static_cast<int>(kMaxStopCode)) {
+ // This is stop(msg).
+ Format(instr, "break, code: 'code");
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "\n%p %08lx stop msg: %s",
+ static_cast<void*>
+ (reinterpret_cast<int32_t*>(instr
+ + Instruction::kInstrSize)),
+ reinterpret_cast<uint64_t>
+ (*reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize)),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize));
+ // Size 3: the break_ instr, plus embedded 64-bit char pointer.
+ return 3 * Instruction::kInstrSize;
+ } else {
+ Format(instr, "break, code: 'code");
+ return Instruction::kInstrSize;
+ }
+}
+
+
+int Decoder::DecodeTypeRegister(Instruction* instr) {
+ switch (instr->OpcodeFieldRaw()) {
+ case COP1: // Coprocessor instructions.
+ switch (instr->RsFieldRaw()) {
+ case BC1: // bc1 handled in DecodeTypeImmediate.
+ UNREACHABLE();
+ break;
+ case MFC1:
+ Format(instr, "mfc1 'rt, 'fs");
+ break;
+ case DMFC1:
+ Format(instr, "dmfc1 'rt, 'fs");
+ break;
+ case MFHC1:
+ Format(instr, "mfhc1 'rt, 'fs");
+ break;
+ case MTC1:
+ Format(instr, "mtc1 'rt, 'fs");
+ break;
+ case DMTC1:
+ Format(instr, "dmtc1 'rt, 'fs");
+ break;
+ // These are called "fs" too, although they are not FPU registers.
+ case CTC1:
+ Format(instr, "ctc1 'rt, 'fs");
+ break;
+ case CFC1:
+ Format(instr, "cfc1 'rt, 'fs");
+ break;
+ case MTHC1:
+ Format(instr, "mthc1 'rt, 'fs");
+ break;
+ case D:
+ switch (instr->FunctionFieldRaw()) {
+ case ADD_D:
+ Format(instr, "add.d 'fd, 'fs, 'ft");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'fd, 'fs, 'ft");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'fd, 'fs, 'ft");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'fd, 'fs, 'ft");
+ break;
+ case ABS_D:
+ Format(instr, "abs.d 'fd, 'fs");
+ break;
+ case MOV_D:
+ Format(instr, "mov.d 'fd, 'fs");
+ break;
+ case NEG_D:
+ Format(instr, "neg.d 'fd, 'fs");
+ break;
+ case SQRT_D:
+ Format(instr, "sqrt.d 'fd, 'fs");
+ break;
+ case CVT_W_D:
+ Format(instr, "cvt.w.d 'fd, 'fs");
+ break;
+ case CVT_L_D:
+ Format(instr, "cvt.l.d 'fd, 'fs");
+ break;
+ case TRUNC_W_D:
+ Format(instr, "trunc.w.d 'fd, 'fs");
+ break;
+ case TRUNC_L_D:
+ Format(instr, "trunc.l.d 'fd, 'fs");
+ break;
+ case ROUND_W_D:
+ Format(instr, "round.w.d 'fd, 'fs");
+ break;
+ case ROUND_L_D:
+ Format(instr, "round.l.d 'fd, 'fs");
+ break;
+ case FLOOR_W_D:
+ Format(instr, "floor.w.d 'fd, 'fs");
+ break;
+ case FLOOR_L_D:
+ Format(instr, "floor.l.d 'fd, 'fs");
+ break;
+ case CEIL_W_D:
+ Format(instr, "ceil.w.d 'fd, 'fs");
+ break;
+ case CEIL_L_D:
+ Format(instr, "ceil.l.d 'fd, 'fs");
+ break;
+ case CVT_S_D:
+ Format(instr, "cvt.s.d 'fd, 'fs");
+ break;
+ case C_F_D:
+ Format(instr, "c.f.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UN_D:
+ Format(instr, "c.un.d 'fs, 'ft, 'Cc");
+ break;
+ case C_EQ_D:
+ Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UEQ_D:
+ Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLT_D:
+ Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULT_D:
+ Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLE_D:
+ Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULE_D:
+ Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
+ break;
+ default:
+ Format(instr, "unknown.cop1.d");
+ break;
+ }
+ break;
+ case S:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case W:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_S_W: // Convert word to float (single).
+ Format(instr, "cvt.s.w 'fd, 'fs");
+ break;
+ case CVT_D_W: // Convert word to double.
+ Format(instr, "cvt.d.w 'fd, 'fs");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case L:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L:
+ Format(instr, "cvt.d.l 'fd, 'fs");
+ break;
+ case CVT_S_L:
+ Format(instr, "cvt.s.l 'fd, 'fs");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PS:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case SPECIAL:
+ switch (instr->FunctionFieldRaw()) {
+ case JR:
+ Format(instr, "jr 'rs");
+ break;
+ case JALR:
+ Format(instr, "jalr 'rs");
+ break;
+ case SLL:
+ if ( 0x0 == static_cast<int>(instr->InstructionBits()))
+ Format(instr, "nop");
+ else
+ Format(instr, "sll 'rd, 'rt, 'sa");
+ break;
+ case DSLL:
+ Format(instr, "dsll 'rd, 'rt, 'sa");
+ break;
+ case DSLL32:
+ Format(instr, "dsll32 'rd, 'rt, 'sa");
+ break;
+ case SRL:
+ if (instr->RsValue() == 0) {
+ Format(instr, "srl 'rd, 'rt, 'sa");
+ } else {
+ if (kArchVariant == kMips64r2) {
+ Format(instr, "rotr 'rd, 'rt, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ }
+ break;
+ case DSRL:
+ if (instr->RsValue() == 0) {
+ Format(instr, "dsrl 'rd, 'rt, 'sa");
+ } else {
+ if (kArchVariant == kMips64r2) {
+ Format(instr, "drotr 'rd, 'rt, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ }
+ break;
+ case DSRL32:
+ Format(instr, "dsrl32 'rd, 'rt, 'sa");
+ break;
+ case SRA:
+ Format(instr, "sra 'rd, 'rt, 'sa");
+ break;
+ case DSRA:
+ Format(instr, "dsra 'rd, 'rt, 'sa");
+ break;
+ case DSRA32:
+ Format(instr, "dsra32 'rd, 'rt, 'sa");
+ break;
+ case SLLV:
+ Format(instr, "sllv 'rd, 'rt, 'rs");
+ break;
+ case DSLLV:
+ Format(instr, "dsllv 'rd, 'rt, 'rs");
+ break;
+ case SRLV:
+ if (instr->SaValue() == 0) {
+ Format(instr, "srlv 'rd, 'rt, 'rs");
+ } else {
+ if (kArchVariant == kMips64r2) {
+ Format(instr, "rotrv 'rd, 'rt, 'rs");
+ } else {
+ Unknown(instr);
+ }
+ }
+ break;
+ case DSRLV:
+ if (instr->SaValue() == 0) {
+ Format(instr, "dsrlv 'rd, 'rt, 'rs");
+ } else {
+ if (kArchVariant == kMips64r2) {
+ Format(instr, "drotrv 'rd, 'rt, 'rs");
+ } else {
+ Unknown(instr);
+ }
+ }
+ break;
+ case SRAV:
+ Format(instr, "srav 'rd, 'rt, 'rs");
+ break;
+ case DSRAV:
+ Format(instr, "dsrav 'rd, 'rt, 'rs");
+ break;
+ case MFHI:
+ Format(instr, "mfhi 'rd");
+ break;
+ case MFLO:
+ Format(instr, "mflo 'rd");
+ break;
+ case MULT:
+ Format(instr, "mult 'rs, 'rt");
+ break;
+ case DMULT:
+ Format(instr, "dmult 'rs, 'rt");
+ break;
+ case MULTU:
+ Format(instr, "multu 'rs, 'rt");
+ break;
+ case DMULTU:
+ Format(instr, "dmultu 'rs, 'rt");
+ break;
+ case DIV:
+ Format(instr, "div 'rs, 'rt");
+ break;
+ case DDIV:
+ Format(instr, "ddiv 'rs, 'rt");
+ break;
+ case DIVU:
+ Format(instr, "divu 'rs, 'rt");
+ break;
+ case DDIVU:
+ Format(instr, "ddivu 'rs, 'rt");
+ break;
+ case ADD:
+ Format(instr, "add 'rd, 'rs, 'rt");
+ break;
+ case DADD:
+ Format(instr, "dadd 'rd, 'rs, 'rt");
+ break;
+ case ADDU:
+ Format(instr, "addu 'rd, 'rs, 'rt");
+ break;
+ case DADDU:
+ Format(instr, "daddu 'rd, 'rs, 'rt");
+ break;
+ case SUB:
+ Format(instr, "sub 'rd, 'rs, 'rt");
+ break;
+ case DSUB:
+ Format(instr, "dsub 'rd, 'rs, 'rt");
+ break;
+ case SUBU:
+ Format(instr, "subu 'rd, 'rs, 'rt");
+ break;
+ case DSUBU:
+ Format(instr, "dsubu 'rd, 'rs, 'rt");
+ break;
+ case AND:
+ Format(instr, "and 'rd, 'rs, 'rt");
+ break;
+ case OR:
+ if (0 == instr->RsValue()) {
+ Format(instr, "mov 'rd, 'rt");
+ } else if (0 == instr->RtValue()) {
+ Format(instr, "mov 'rd, 'rs");
+ } else {
+ Format(instr, "or 'rd, 'rs, 'rt");
+ }
+ break;
+ case XOR:
+ Format(instr, "xor 'rd, 'rs, 'rt");
+ break;
+ case NOR:
+ Format(instr, "nor 'rd, 'rs, 'rt");
+ break;
+ case SLT:
+ Format(instr, "slt 'rd, 'rs, 'rt");
+ break;
+ case SLTU:
+ Format(instr, "sltu 'rd, 'rs, 'rt");
+ break;
+ case BREAK:
+ return DecodeBreakInstr(instr);
+ case TGE:
+ Format(instr, "tge 'rs, 'rt, code: 'code");
+ break;
+ case TGEU:
+ Format(instr, "tgeu 'rs, 'rt, code: 'code");
+ break;
+ case TLT:
+ Format(instr, "tlt 'rs, 'rt, code: 'code");
+ break;
+ case TLTU:
+ Format(instr, "tltu 'rs, 'rt, code: 'code");
+ break;
+ case TEQ:
+ Format(instr, "teq 'rs, 'rt, code: 'code");
+ break;
+ case TNE:
+ Format(instr, "tne 'rs, 'rt, code: 'code");
+ break;
+ case MOVZ:
+ Format(instr, "movz 'rd, 'rs, 'rt");
+ break;
+ case MOVN:
+ Format(instr, "movn 'rd, 'rs, 'rt");
+ break;
+ case MOVCI:
+ if (instr->Bit(16)) {
+ Format(instr, "movt 'rd, 'rs, 'bc");
+ } else {
+ Format(instr, "movf 'rd, 'rs, 'bc");
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case SPECIAL2:
+ switch (instr->FunctionFieldRaw()) {
+ case MUL:
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ break;
+ case CLZ:
+ Format(instr, "clz 'rd, 'rs");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: {
+ if (kArchVariant == kMips64r2) {
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case EXT: {
+ if (kArchVariant == kMips64r2) {
+ Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return Instruction::kInstrSize;
+}
+
+
+void Decoder::DecodeTypeImmediate(Instruction* instr) {
+ switch (instr->OpcodeFieldRaw()) {
+ // ------------- REGIMM class.
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1:
+ if (instr->FBtrueValue()) {
+ Format(instr, "bc1t 'bc, 'imm16u");
+ } else {
+ Format(instr, "bc1f 'bc, 'imm16u");
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break; // Case COP1.
+ case REGIMM:
+ switch (instr->RtFieldRaw()) {
+ case BLTZ:
+ Format(instr, "bltz 'rs, 'imm16u");
+ break;
+ case BLTZAL:
+ Format(instr, "bltzal 'rs, 'imm16u");
+ break;
+ case BGEZ:
+ Format(instr, "bgez 'rs, 'imm16u");
+ break;
+ case BGEZAL:
+ Format(instr, "bgezal 'rs, 'imm16u");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break; // Case REGIMM.
+ // ------------- Branch instructions.
+ case BEQ:
+ Format(instr, "beq 'rs, 'rt, 'imm16u");
+ break;
+ case BNE:
+ Format(instr, "bne 'rs, 'rt, 'imm16u");
+ break;
+ case BLEZ:
+ Format(instr, "blez 'rs, 'imm16u");
+ break;
+ case BGTZ:
+ Format(instr, "bgtz 'rs, 'imm16u");
+ break;
+ // ------------- Arithmetic instructions.
+ case ADDI:
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
+ break;
+ case DADDI:
+ Format(instr, "daddi 'rt, 'rs, 'imm16s");
+ break;
+ case ADDIU:
+ Format(instr, "addiu 'rt, 'rs, 'imm16s");
+ break;
+ case DADDIU:
+ Format(instr, "daddiu 'rt, 'rs, 'imm16s");
+ break;
+ case SLTI:
+ Format(instr, "slti 'rt, 'rs, 'imm16s");
+ break;
+ case SLTIU:
+ Format(instr, "sltiu 'rt, 'rs, 'imm16u");
+ break;
+ case ANDI:
+ Format(instr, "andi 'rt, 'rs, 'imm16x");
+ break;
+ case ORI:
+ Format(instr, "ori 'rt, 'rs, 'imm16x");
+ break;
+ case XORI:
+ Format(instr, "xori 'rt, 'rs, 'imm16x");
+ break;
+ case LUI:
+ Format(instr, "lui 'rt, 'imm16x");
+ break;
+ // ------------- Memory instructions.
+ case LB:
+ Format(instr, "lb 'rt, 'imm16s('rs)");
+ break;
+ case LH:
+ Format(instr, "lh 'rt, 'imm16s('rs)");
+ break;
+ case LWL:
+ Format(instr, "lwl 'rt, 'imm16s('rs)");
+ break;
+ case LDL:
+ Format(instr, "ldl 'rt, 'imm16s('rs)");
+ break;
+ case LW:
+ Format(instr, "lw 'rt, 'imm16s('rs)");
+ break;
+ case LWU:
+ Format(instr, "lwu 'rt, 'imm16s('rs)");
+ break;
+ case LD:
+ Format(instr, "ld 'rt, 'imm16s('rs)");
+ break;
+ case LBU:
+ Format(instr, "lbu 'rt, 'imm16s('rs)");
+ break;
+ case LHU:
+ Format(instr, "lhu 'rt, 'imm16s('rs)");
+ break;
+ case LWR:
+ Format(instr, "lwr 'rt, 'imm16s('rs)");
+ break;
+ case LDR:
+ Format(instr, "ldr 'rt, 'imm16s('rs)");
+ break;
+ case PREF:
+ Format(instr, "pref 'rt, 'imm16s('rs)");
+ break;
+ case SB:
+ Format(instr, "sb 'rt, 'imm16s('rs)");
+ break;
+ case SH:
+ Format(instr, "sh 'rt, 'imm16s('rs)");
+ break;
+ case SWL:
+ Format(instr, "swl 'rt, 'imm16s('rs)");
+ break;
+ case SW:
+ Format(instr, "sw 'rt, 'imm16s('rs)");
+ break;
+ case SD:
+ Format(instr, "sd 'rt, 'imm16s('rs)");
+ break;
+ case SWR:
+ Format(instr, "swr 'rt, 'imm16s('rs)");
+ break;
+ case LWC1:
+ Format(instr, "lwc1 'ft, 'imm16s('rs)");
+ break;
+ case LDC1:
+ Format(instr, "ldc1 'ft, 'imm16s('rs)");
+ break;
+ case SWC1:
+ Format(instr, "swc1 'ft, 'imm16s('rs)");
+ break;
+ case SDC1:
+ Format(instr, "sdc1 'ft, 'imm16s('rs)");
+ break;
+ default:
+ printf("a 0x%x \n", instr->OpcodeFieldRaw());
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void Decoder::DecodeTypeJump(Instruction* instr) {
+ switch (instr->OpcodeFieldRaw()) {
+ case J:
+ Format(instr, "j 'imm26x");
+ break;
+ case JAL:
+ Format(instr, "jal 'imm26x");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+// All instructions are one word long, except for the simulator
+// psuedo-instruction stop(msg). For that one special case, we return
+// size larger than one kInstrSize.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kRegisterType: {
+ return DecodeTypeRegister(instr);
+ }
+ case Instruction::kImmediateType: {
+ DecodeTypeImmediate(instr);
+ break;
+ }
+ case Instruction::kJumpType: {
+ DecodeTypeJump(instr);
+ break;
+ }
+ default: {
+ Format(instr, "UNSUPPORTED");
+ UNSUPPORTED_MIPS();
+ }
+ }
+ return Instruction::kInstrSize;
+}
+
+
+} } // namespace v8::internal
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Registers::Name(reg);
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return v8::internal::FPURegisters::Name(reg);
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // MIPS does not have the concept of a byte register.
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+
+// The MIPS assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+ return -1;
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+
+#undef UNSUPPORTED
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/mips64/assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#ifndef V8_MIPS_FRAMES_MIPS_H_
+#define V8_MIPS_FRAMES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+// Register lists.
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved =
+ 1 << 2 | // v0
+ 1 << 3 | // v1
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ 1 << 8 | // a4
+ 1 << 9 | // a5
+ 1 << 10 | // a6
+ 1 << 11 | // a7
+ 1 << 12 | // t0
+ 1 << 13 | // t1
+ 1 << 14 | // t2
+ 1 << 15; // t3
+
+const int kNumJSCallerSaved = 14;
+
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved =
+ 1 << 16 | // s0
+ 1 << 17 | // s1
+ 1 << 18 | // s2
+ 1 << 19 | // s3
+ 1 << 20 | // s4
+ 1 << 21 | // s5
+ 1 << 22 | // s6 (roots in Javascript code)
+ 1 << 23 | // s7 (cp in Javascript code)
+ 1 << 30; // fp/s8
+
+const int kNumCalleeSaved = 9;
+
+const RegList kCalleeSavedFPU =
+ 1 << 20 | // f20
+ 1 << 22 | // f22
+ 1 << 24 | // f24
+ 1 << 26 | // f26
+ 1 << 28 | // f28
+ 1 << 30; // f30
+
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU =
+ 1 << 0 | // f0
+ 1 << 2 | // f2
+ 1 << 4 | // f4
+ 1 << 6 | // f6
+ 1 << 8 | // f8
+ 1 << 10 | // f10
+ 1 << 12 | // f12
+ 1 << 14 | // f14
+ 1 << 16 | // f16
+ 1 << 18; // f18
+
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 24;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters =
+ kNumJSCallerSaved + kNumCalleeSaved;
+
+const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+ kUndefIndex, // zero_reg
+ kUndefIndex, // at
+ 0, // v0
+ 1, // v1
+ 2, // a0
+ 3, // a1
+ 4, // a2
+ 5, // a3
+ 6, // a4
+ 7, // a5
+ 8, // a6
+ 9, // a7
+ 10, // t0
+ 11, // t1
+ 12, // t2
+ 13, // t3
+ 14, // s0
+ 15, // s1
+ 16, // s2
+ 17, // s3
+ 18, // s4
+ 19, // s5
+ 20, // s6
+ 21, // s7
+ kUndefIndex, // t8
+ kUndefIndex, // t9
+ kUndefIndex, // k0
+ kUndefIndex, // k1
+ kUndefIndex, // gp
+ kUndefIndex, // sp
+ 22, // fp
+ kUndefIndex
+};
+
+
+// ----------------------------------------------------
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerFPOffset = +0 * kPointerSize;
+ // The calling JS function is between FP and PC.
+ static const int kCallerPCOffset = +1 * kPointerSize;
+
+ // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+ static const int kCallerSPOffset = +2 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP.
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used.
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+// Note on Mips implementation:
+//
+// The result_register() for mips is the 'v0' register, which is defined
+// by the ABI to contain function return values. However, the first
+// parameter to a function is defined to be 'a0'. So there are many
+// places where we have to move a previous result in v0 to a0 for the
+// next call: mov(a0, v0). This is not needed on the other architectures.
+
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
+
+#include "src/mips64/code-stubs-mips64.h"
+#include "src/mips64/macro-assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(&patch_site_);
+ __ andi(at, reg, 0);
+ // Always taken before patched.
+ __ BranchShort(target, eq, at, Operand(zero_reg));
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ andi(at, reg, 0);
+ // Never taken before patched.
+ __ BranchShort(target, ne, at, Operand(zero_reg));
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+ __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o a1: the JS function object being called (i.e. ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop-at");
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ ld(at, MemOperand(sp, receiver_offset));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ ld(a2, GlobalObjectOperand());
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+
+ __ sd(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(info->IsCodePreAgingActive());
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ Dsubu(t1, sp, Operand(locals_count * kPointerSize));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, t1, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ li(a2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ __ Dsubu(sp, sp, Operand(kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ sd(t1, MemOperand(sp, i * kPointerSize));
+ }
+ // Continue loop if not done.
+ __ Dsubu(a2, a2, Operand(1));
+ __ Branch(&loop_header, ne, a2, Operand(zero_reg));
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ Dsubu(sp, sp, Operand(remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ sd(t1, MemOperand(sp, i * kPointerSize));
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate context");
+ // Argument to NewContext is the function, which is still in a1.
+ bool need_write_barrier = true;
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(a1);
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(a1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ld(a0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextOperand(cp, var->index());
+ __ sd(a0, target);
+
+ // Update the write barrier.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, a0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ }
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(a3, a1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Daddu(a2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(a1, Operand(Smi::FromInt(num_parameters)));
+ __ Push(a3, a2, a1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(isolate(), type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, v0, a1, a2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm_,
+ masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+
+ VisitStatements(function()->body());
+
+ ASSERT(loop_depth() == 0);
+ }
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ li(a2, Operand(profiling_counter_));
+ __ ld(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Dsubu(a3, a3, Operand(Smi::FromInt(delta)));
+ __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->is_debug()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ li(a2, Operand(profiling_counter_));
+ __ li(a3, Operand(Smi::FromInt(reset_value)));
+ __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+ // to make sure it is constant. Branch may emit a skip-or-jump sequence
+ // instead of the normal Branch. It seems that the "skip" part of that
+ // sequence is about as long as this Branch would be so it is safe to ignore
+ // that.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Label ok;
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // Call will emit a li t9 first, so it is safe to use the delay slot.
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in v0.
+ __ push(v0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ __ push(v0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(v0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ masm_->mov(sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+ masm_->Daddu(sp, sp, Operand(sp_delta));
+ masm_->Jump(ra);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+
+#ifdef DEBUG
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceInstructions <=
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ li(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ li(result_register(), Operand(lit));
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ li(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ sd(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ Branch(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ // Push the value as the following branch can clobber at in long branch mode.
+ __ push(at);
+ __ Branch(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ push(at);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(at, value_root_index);
+ __ push(at);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ __ mov(a0, result_register());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ mov(at, zero_reg);
+ Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cc,
+ Register lhs,
+ const Operand& rhs,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ Branch(if_true, cc, lhs, rhs);
+ } else if (if_true == fall_through) {
+ __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+ } else {
+ __ Branch(if_true, cc, lhs, rhs);
+ __ Branch(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ ASSERT(var->IsStackAllocated());
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ ld(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!scratch0.is(src));
+ ASSERT(!scratch0.is(scratch1));
+ ASSERT(!scratch1.is(src));
+ MemOperand location = VarOperand(var, scratch0);
+ __ sd(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ Label skip;
+ if (should_normalize) __ Branch(&skip);
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ LoadRoot(a4, Heap::kTrueValueRootIndex);
+ Split(eq, a0, Operand(a4), if_true, if_false, NULL);
+ __ bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ LoadRoot(a4, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext,
+ a1, Operand(a4));
+ __ LoadRoot(a4, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext,
+ a1, Operand(a4));
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ sd(a4, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ sd(at, ContextOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ li(a2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+ __ li(a1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, a2, a1, a0);
+ } else {
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
+ __ Push(cp, a2, a1, a0);
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ sd(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ sd(result_register(), ContextOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ a2,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ li(a2, Operand(variable->name()));
+ __ li(a1, Operand(Smi::FromInt(NONE)));
+ __ Push(cp, a2, a1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ ld(a1, ContextOperand(a1, variable->interface()->Index()));
+ __ ld(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ sd(a1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ a1,
+ a3,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ // The context is the first argument.
+ __ li(a1, Operand(pairs));
+ __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
+ __ Push(cp, a1, a0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+ __ mov(a0, result_register()); // CompareStub requires args in a0, a1.
+
+ // Perform the comparison as if via '==='.
+ __ ld(a1, MemOperand(sp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ or_(a2, a1, a0);
+ patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+ __ Branch(&next_test, ne, a1, Operand(a0));
+ __ Drop(1); // Switch value is no longer needed.
+ __ Branch(clause->body_target());
+
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ Branch(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&next_test, ne, v0, Operand(at));
+ __ Drop(1);
+ __ Branch(clause->body_target());
+ __ bind(&skip);
+
+ __ Branch(&next_test, ne, v0, Operand(zero_reg));
+ __ Drop(1); // Switch value is no longer needed.
+ __ Branch(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ Branch(nested_statement.break_label());
+ } else {
+ __ Branch(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&exit, eq, a0, Operand(at));
+ Register null_value = a5;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Branch(&exit, eq, a0, Operand(null_value));
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ __ mov(a0, v0);
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ bind(&convert);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0);
+ __ bind(&done_convert);
+ __ push(a0);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ ld(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Branch(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(a0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kMetaMapRootIndex);
+ __ Branch(&fixed_array, ne, a2, Operand(at));
+
+ // We got a map in register v0. Get the enumeration cache from it.
+ Label no_descriptors;
+ __ bind(&use_cache);
+
+ __ EnumLength(a1, v0);
+ __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadInstanceDescriptors(v0, a2);
+ __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
+ __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ li(a0, Operand(Smi::FromInt(0)));
+ // Push map, enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(v0, a2, a1, a0);
+ __ jmp(&loop);
+
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ jmp(&exit);
+
+ // We got a fixed array in register v0. Iterate through that.
+ Label non_proxy;
+ __ bind(&fixed_array);
+
+ __ li(a1, FeedbackVector());
+ __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
+
+ __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
+ __ ld(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
+ __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ Push(a1, v0); // Smi and array
+ __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ li(a0, Operand(Smi::FromInt(0)));
+ __ Push(a1, a0); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ bind(&loop);
+ // Load the current count to a0, load the length to a1.
+ __ ld(a0, MemOperand(sp, 0 * kPointerSize));
+ __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(loop_statement.break_label(), hs, a0, Operand(a1));
+
+ // Get the current entry of the array into register a3.
+ __ ld(a2, MemOperand(sp, 2 * kPointerSize));
+ __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiScale(a4, a0, kPointerSizeLog2);
+ __ daddu(a4, a2, a4); // Array base + scaled (smi) index.
+ __ ld(a3, MemOperand(a4)); // Current entry.
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register a2.
+ __ ld(a2, MemOperand(sp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ ld(a1, MemOperand(sp, 4 * kPointerSize));
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&update_each, eq, a4, Operand(a2));
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ __ Branch(&update_each, eq, a2, Operand(zero_reg));
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(a1, a3); // Enumerable and current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ mov(a3, result_register());
+ __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register a3.
+ __ bind(&update_each);
+ __ mov(result_register(), a3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for the going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_label());
+ __ pop(a0);
+ __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
+ __ push(a0);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ Branch(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
+ __ mov(a0, v0);
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
+
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
+ __ li(a2, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ li(a0, Operand(info));
+ __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, a0, a1);
+ __ CallRuntime(Runtime::kNewClosure, 3);
+ }
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = a1;
+ Register temp = a2;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ ld(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ }
+ // Load next context in chain.
+ __ ld(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at native context.
+ __ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ LoadRoot(a4, Heap::kNativeContextMapRootIndex);
+ __ Branch(&fast, eq, temp, Operand(a4));
+ // Check that extension is NULL.
+ __ ld(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Load next context in chain.
+ __ ld(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ Branch(&loop);
+ __ bind(&fast);
+ }
+
+ __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadIC::NameRegister(), Operand(var->name()));
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = cp;
+ Register next = a3;
+ Register temp = a4;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ }
+ __ ld(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ Branch(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ ld(v0, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
+ } else { // LET || CONST
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+ }
+ __ Branch(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ Global variable");
+ // Use inline caching. Variable name is passed in a2 and the global
+ // object (receiver) in a0.
+ __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadIC::NameRegister(), Operand(var->name()));
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(v0);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(v0, var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST_LEGACY);
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
+ }
+ context()->Plug(v0);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ bind(&slow);
+ __ li(a1, Operand(var->name()));
+ __ Push(cp, a1); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ bind(&done);
+ context()->Plug(v0);
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // a5 = materialized value (RegExp literal)
+ // a4 = JS function, literals array
+ // a3 = literal index
+ // a2 = RegExp pattern
+ // a1 = RegExp flags
+ // a0 = RegExp literal clone
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ld(a5, FieldMemOperand(a4, literal_offset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&materialized, ne, a5, Operand(at));
+
+ // Create regexp literal using runtime function.
+ // Result will be in v0.
+ __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(expr->pattern()));
+ __ li(a1, Operand(expr->flags()));
+ __ Push(a4, a3, a2, a1);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(a5, v0);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ Push(a5, a0);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(a5);
+
+ __ bind(&allocated);
+
+ // After this, registers are used as follows:
+ // v0: Newly allocated regexp.
+ // a5: Materialized regexp.
+ // a2: temp.
+ __ CopyFields(v0, a5, a2.bit(), size / kPointerSize);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ push(a1);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(constant_properties));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ li(a0, Operand(Smi::FromInt(flags)));
+ int properties_count = constant_properties->length() / 2;
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(a3, a2, a1, a0);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in v0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(v0); // Save result on stack.
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ mov(a0, result_register());
+ __ li(a2, Operand(key->value()));
+ __ ld(a1, MemOperand(sp));
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Duplicate receiver on stack.
+ __ ld(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
+ __ push(a0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ ld(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ ld(a0, MemOperand(sp)); // Duplicate receiver.
+ __ push(a0);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ ld(a0, MemOperand(sp));
+ __ push(a0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(v0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements =
+ IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ mov(a0, result_register());
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(constant_elements));
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ li(a0, Operand(Smi::FromInt(flags)));
+ __ Push(a3, a2, a1, a0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ } else {
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ push(v0); // array literal
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal.
+ __ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset));
+ __ sd(result_register(), FieldMemOperand(a1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(a1, offset, result_register(), a2,
+ kRAHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ li(a3, Operand(Smi::FromInt(i)));
+ __ mov(a0, result_register());
+ StoreArrayLiteralElementStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+ if (result_saved) {
+ __ Pop(); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(v0);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the register.
+ VisitForStackValue(property->obj());
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ // We need the key and receiver on both the stack and in v0 and a1.
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
+ __ ld(LoadIC::NameRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ push(v0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+ __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+ __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ Branch(&post_runtime, eq, sp, Operand(a1));
+ __ push(v0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ sd(a1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call;
+ Register load_receiver = LoadIC::ReceiverRegister();
+ Register load_name = LoadIC::NameRegister();
+ // Initial send value is undefined.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ __ mov(a0, v0);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
+ __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "throw", iter, except
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(a0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(a0); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ mov(a0, v0);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ ld(a0, MemOperand(sp, generator_object_depth));
+ __ push(a0); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
+ __ sd(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(v0); // result
+ EmitReturnSequence();
+ __ mov(a0, v0);
+ __ bind(&l_resume); // received in a0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ bind(&l_next);
+ __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
+ __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, a3, a0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ __ ld(load_receiver, MemOperand(sp, kPointerSize));
+ __ ld(load_name, MemOperand(sp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(a0, v0);
+ __ mov(a1, a0);
+ __ sd(a1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Move(load_receiver, v0);
+
+ __ push(load_receiver); // save result
+ __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
+ __ mov(a0, v0);
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Branch(&l_try, eq, v0, Operand(zero_reg));
+
+ // result.value
+ __ pop(load_receiver); // result
+ __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
+ context()->DropAndPlug(2, v0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in a0, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // a1 will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(a1);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ Branch(&closed_state, eq, a3, Operand(zero_reg));
+ __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
+
+ // Load suspended function and context.
+ __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+ __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ push(a2);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ // The argument count is stored as int32_t on 64-bit platforms.
+ // TODO(plind): Smi on 32-bit platforms.
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ Dsubu(a3, a3, Operand(1));
+ __ Branch(&push_frame, lt, a3, Operand(zero_reg));
+ __ push(a2);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ Call(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ // ra = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // a4 = callee's JS function.
+ __ Push(ra, fp, cp, a4);
+ // Adjust FP to point to saved FP.
+ __ Daddu(fp, sp, 2 * kPointerSize);
+
+ // Load the operand stack size.
+ __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+ __ ld(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ __ SmiUntag(a3);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kCodeEntryOffset));
+ __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(a2);
+ __ Daddu(a3, a3, Operand(a2));
+ __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(a3);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ Dsubu(a3, a3, Operand(1));
+ __ Branch(&call_resume, lt, a3, Operand(zero_reg));
+ __ push(a2);
+ __ Branch(&push_operand_holes);
+ __ bind(&call_resume);
+ ASSERT(!result_register().is(a1));
+ __ Push(a1, result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ jmp(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(a1);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+
+ __ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ ld(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ li(a1, Operand(map));
+ __ pop(a2);
+ __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ li(a4, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sd(a2,
+ FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ sd(a3,
+ FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(v0, JSGeneratorObject::kResultValuePropertyOffset,
+ a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ li(LoadIC::NameRegister(), Operand(key->value()));
+ // Call load IC. It has register arguments receiver and property.
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ // Call keyed load IC. It has register arguments receiver and key.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+
+ // Get the arguments.
+ Register left = a1;
+ Register right = a0;
+ __ pop(left);
+ __ mov(a0, result_register());
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ switch (op) {
+ case Token::SAR:
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ dsrav(right, left, scratch1);
+ __ And(v0, right, Operand(0xffffffff00000000L));
+ break;
+ case Token::SHL: {
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ dsllv(scratch1, scratch1, scratch2);
+ __ SmiTag(v0, scratch1);
+ break;
+ }
+ case Token::SHR: {
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ dsrlv(scratch1, scratch1, scratch2);
+ __ And(scratch2, scratch1, 0x80000000);
+ __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ break;
+ }
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ BranchOnOverflow(&stub_call, scratch1);
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ BranchOnOverflow(&stub_call, scratch1);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(scratch1, right);
+ __ Dmult(left, scratch1);
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ dsra32(scratch1, scratch1, 31);
+ __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
+ __ mflo(v0);
+ __ Branch(&done, ne, v0, Operand(zero_reg));
+ __ Daddu(scratch2, right, left);
+ __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Or(v0, left, Operand(right));
+ break;
+ case Token::BIT_AND:
+ __ And(v0, left, Operand(right));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(v0, left, Operand(right));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ mov(a0, result_register());
+ __ pop(a1);
+ BinaryOpICStub stub(isolate(), op, mode);
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ ASSERT(expr->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(result_register()); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ mov(a1, result_register());
+ __ pop(a0); // Restore value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(result_register()); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(a1, result_register());
+ __ Pop(a0, a2); // a0 = restored value.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ sd(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Move(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ li(a1, Operand(name));
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ mov(a0, result_register());
+ __ li(a2, Operand(var->name()));
+ __ ld(a1, GlobalObjectOperand());
+ CallStoreIC();
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ li(a0, Operand(var->name()));
+ __ Push(v0, cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, a1);
+ __ ld(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, a1);
+ __ ld(a3, location);
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ Branch(&assign, ne, a3, Operand(a4));
+ __ li(a3, Operand(var->name()));
+ __ push(a3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ // Perform the assignment.
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
+ MemOperand location = VarOperand(var, a1);
+ if (generate_debug_code_ && op == Token::INIT_LET) {
+ // Check for an uninitialized let binding.
+ __ ld(a2, location);
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->IsLiteral());
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(a0, result_register()); // Load the value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->value()));
+ __ pop(a1);
+
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // Call keyed store IC.
+ // The arguments are:
+ // - a0 is the value,
+ // - a1 is the key,
+ // - a2 is the receiver.
+ __ mov(a0, result_register());
+ __ Pop(a2, a1); // a1 = key.
+
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadIC::ReceiverRegister(), v0);
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(v0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadIC::NameRegister(), v0);
+ __ pop(LoadIC::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(v0);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId id) {
+ ic_total_count_++;
+ __ Call(code, RelocInfo::CODE_TARGET, id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ ld(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sd(v0, MemOperand(sp, kPointerSize));
+ }
+
+ EmitCall(expr, call_type);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadIC::NameRegister(), v0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ ld(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sd(v0, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, CallIC::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ // a6: copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ ld(a6, MemOperand(sp, arg_count * kPointerSize));
+ } else {
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+ }
+
+ // a5: the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ ld(a5, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // a4: the strict mode.
+ __ li(a4, Operand(Smi::FromInt(strict_mode())));
+
+ // a1: the start position of the scope the calls resides in.
+ __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+
+ // Do the runtime call.
+ __ Push(a6, a5, a4, a1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ { PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in v0 (function) and
+ // v1 (receiver). Touch up the stack with the right values.
+ __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ sd(v1, MemOperand(sp, arg_count * kPointerSize));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in v0)
+ // and the object holding it (returned in v1).
+ ASSERT(!context_register().is(a2));
+ __ li(a2, Operand(proxy->name()));
+ __ Push(context_register(), a2);
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Push(v0, v1); // Function, receiver.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ Branch(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(v0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ push(a1);
+ __ bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ push(a1);
+ // Emit function call.
+ EmitCall(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into a1 and a0.
+ __ li(a0, Operand(arg_count));
+ __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ SmiTst(v0, a4);
+ Split(eq, a4, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ NonNegativeSmiTst(v0, at);
+ Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(if_false, ne, at, Operand(zero_reg));
+ __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ AssertNotSmi(v0);
+
+ __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ And(a4, a4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Branch(&skip_lookup, ne, a4, Operand(zero_reg));
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ ld(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(a4, Heap::kHashTableMapRootIndex);
+ __ Branch(if_false, eq, a2, Operand(a4));
+
+ // Look for valueOf name in the descriptor array, and indicate false if
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(a3, a1);
+ __ Branch(&done, eq, a3, Operand(zero_reg));
+
+ __ LoadInstanceDescriptors(a1, a4);
+ // a4: descriptor array.
+ // a3: valid entries in the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+// Does not need?
+// STATIC_ASSERT(kPointerSize == 4);
+ __ li(at, Operand(DescriptorArray::kDescriptorSize));
+ __ Dmul(a3, a3, at);
+ // Calculate location of the first key name.
+ __ Daddu(a4, a4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+ // Calculate the end of the descriptor array.
+ __ mov(a2, a4);
+ __ dsll(a5, a3, kPointerSizeLog2);
+ __ Daddu(a2, a2, a5);
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ // The use of a6 to store the valueOf string assumes that it is not otherwise
+ // used in the loop below.
+ __ li(a6, Operand(isolate()->factory()->value_of_string()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ld(a3, MemOperand(a4, 0));
+ __ Branch(if_false, eq, a3, Operand(a6));
+ __ Daddu(a4, a4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ bind(&entry);
+ __ Branch(&loop, ne, a4, Operand(a2));
+
+ __ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its
+ // prototype is the un-modified String prototype. If not result is false.
+ __ ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ __ JumpIfSmi(a2, if_false);
+ __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ ld(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ld(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
+ __ ld(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a2);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_false);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ lwu(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ lwu(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ li(a4, 0x80000000);
+ Label not_nan;
+ __ Branch(¬_nan, ne, a2, Operand(a4));
+ __ mov(a4, zero_reg);
+ __ mov(a2, a1);
+ __ bind(¬_nan);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(a4), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_ARRAY_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne,
+ a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ pop(a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in a1 and the formal
+ // parameter count in a0.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a1, v0);
+ __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&exit, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ ld(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(v0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ GetObjectType(v0, v0, a1); // Map is now in v0.
+ __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ ld(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+ // v0 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ ld(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ Branch(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ LoadRoot(v0, Heap::kObject_stringRootIndex);
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(v0, &done);
+ // If the object is not a value type, return the object.
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+ __ ld(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = v0;
+ Register result = v0;
+ Register scratch0 = t1;
+ Register scratch1 = a1;
+
+ __ JumpIfSmi(object, ¬_date_object);
+ __ GetObjectType(object, scratch1, scratch1);
+ __ Branch(¬_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
+
+ if (index->value() == 0) {
+ __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ li(scratch1, Operand(stamp));
+ __ ld(scratch1, MemOperand(scratch1));
+ __ ld(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Branch(&runtime, ne, scratch1, Operand(scratch0));
+ __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch1);
+ __ li(a1, Operand(index));
+ __ Move(a0, object);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ jmp(&done);
+ }
+
+ __ bind(¬_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = v0;
+ Register index = a1;
+ Register value = a2;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
+
+ if (FLAG_debug_code) {
+ __ SmiTst(value, at);
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+ __ SmiTst(index, at);
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+ __ SmiUntag(index, index);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ Register scratch = t1;
+ __ EmitSeqStringSetCharCheck(
+ string, index, value, scratch, one_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value, value);
+ __ Daddu(at,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(index);
+ __ Daddu(at, at, index);
+ __ sb(value, MemOperand(at));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = v0;
+ Register index = a1;
+ Register value = a2;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
+
+ if (FLAG_debug_code) {
+ __ SmiTst(value, at);
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+ __ SmiTst(index, at);
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+ __ SmiUntag(index, index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ Register scratch = t1;
+ __ EmitSeqStringSetCharCheck(
+ string, index, value, scratch, two_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value, value);
+ __ Daddu(at,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ dsra(index, index, 32 - 1);
+ __ Daddu(at, at, index);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ sh(value, MemOperand(at));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ pop(a1); // v0 = value. a1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(a1, &done);
+
+ // If the object is not a value type, return the value.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+ // Store the value.
+ __ sd(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ mov(a2, v0);
+ __ RecordWriteField(
+ a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into a0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
+
+ NumberToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(v0, a1);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(a1);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+ __ mov(a0, result_register());
+
+ Register object = a1;
+ Register index = a0;
+ Register result = v0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+ __ mov(a0, result_register());
+
+ Register object = a1;
+ Register index = a0;
+ Register scratch = a3;
+ Register result = v0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ li(result, Operand(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ pop(a1);
+ __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(v0, &runtime);
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+ // InvokeFunction requires the function in a1. Move it in there.
+ __ mov(a1, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&runtime);
+ __ push(v0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ mov(a0, result_register());
+ __ pop(a1);
+ __ pop(a2);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(v0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = v0;
+ Register cache = a1;
+ __ ld(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ld(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ ld(cache,
+ ContextOperand(
+ cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ ld(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+ Label done, not_found;
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ ld(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // a2 now holds finger offset as a smi.
+ __ Daddu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // a3 now points to the start of fixed array elements.
+ __ SmiScale(at, a2, kPointerSizeLog2);
+ __ daddu(a3, a3, at);
+ // a3 now points to key of indexed element of cache.
+ __ ld(a2, MemOperand(a3));
+ __ Branch(¬_found, ne, key, Operand(a2));
+
+ __ ld(v0, MemOperand(a3, kPointerSize));
+ __ Branch(&done);
+
+ __ bind(¬_found);
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ lwu(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+ __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(v0);
+
+ __ lwu(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+ __ IndexFromHash(v0, v0);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = v0;
+ Register elements = no_reg; // Will be v0.
+ Register result = no_reg; // Will be v0.
+ Register separator = a1;
+ Register array_length = a2;
+ Register result_pos = no_reg; // Will be a2.
+ Register string_length = a3;
+ Register string = a4;
+ Register element = a5;
+ Register elements_end = a6;
+ Register scratch1 = a7;
+ Register scratch2 = t1;
+ Register scratch3 = t0;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ GetObjectType(array, scratch1, scratch2);
+ __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ ld(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+ __ Branch(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ mov(string_length, zero_reg);
+ __ Daddu(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ dsll(elements_end, array_length, kPointerSizeLog2);
+ __ Daddu(elements_end, element, elements_end);
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (generate_debug_code_) {
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
+ array_length, Operand(zero_reg));
+ }
+ __ bind(&loop);
+ __ ld(string, MemOperand(element));
+ __ Daddu(element, element, kPointerSize);
+ __ JumpIfSmi(string, &bailout);
+ __ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+ __ BranchOnOverflow(&bailout, scratch3);
+ __ Branch(&loop, lt, element, Operand(elements_end));
+
+ // If array_length is 1, return elements[0], a string.
+ __ Branch(¬_size_one_array, ne, array_length, Operand(1));
+ __ ld(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ Branch(&done);
+
+ __ bind(¬_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string. array_length is not
+ // smi but the other values are, so the result is a smi.
+ __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ Dsubu(string_length, string_length, Operand(scratch1));
+ __ SmiUntag(scratch1);
+ __ Dmult(array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ mfhi(scratch2);
+ __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+ __ mflo(scratch2);
+ __ SmiUntag(string_length);
+ __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+ __ BranchOnOverflow(&bailout, scratch3);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ Daddu(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateAsciiString(result,
+ string_length,
+ scratch1,
+ scratch2,
+ elements_end,
+ &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ dsll(elements_end, array_length, kPointerSizeLog2);
+ __ Daddu(elements_end, element, elements_end);
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ Daddu(result_pos,
+ result,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ li(at, Operand(Smi::FromInt(1)));
+ __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+ __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+ // Empty separator case.
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ ld(string, MemOperand(element));
+ __ Daddu(element, element, kPointerSize);
+ __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ // One-character separator case.
+ __ bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator.
+ __ jmp(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ASCII char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ sb(separator, MemOperand(result_pos));
+ __ Daddu(result_pos, result_pos, 1);
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ ld(string, MemOperand(element));
+ __ Daddu(element, element, kPointerSize);
+ __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ ld(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Daddu(string,
+ separator,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ ld(string, MemOperand(element));
+ __ Daddu(element, element, kPointerSize);
+ __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ __ bind(&bailout);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ __ li(at, Operand(debug_is_active));
+ __ lbu(v0, MemOperand(at));
+ __ SmiTag(v0);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ Register receiver = LoadIC::ReceiverRegister();
+ __ ld(receiver, GlobalObjectOperand());
+ __ ld(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+ __ push(receiver);
+
+ // Load the function from the receiver.
+ __ li(LoadIC::NameRegister(), Operand(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ ld(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sd(v0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, v0);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(v0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ li(a1, Operand(Smi::FromInt(strict_mode())));
+ __ push(a1);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(v0);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ ld(a2, GlobalObjectOperand());
+ __ li(a1, Operand(var->name()));
+ __ li(a0, Operand(Smi::FromInt(SLOPPY)));
+ __ Push(a2, a1, a0);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(v0);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ ASSERT(!context_register().is(a2));
+ __ li(a2, Operand(var->name()));
+ __ Push(context_register(), a2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(v0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(v0);
+ __ jmp(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(v0);
+ __ bind(&done);
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(v0);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ li(at, Operand(Smi::FromInt(0)));
+ __ push(at);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
+ __ ld(LoadIC::NameRegister(), MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ mov(a0, v0);
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(v0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(v0);
+ break;
+ case NAMED_PROPERTY:
+ __ sd(v0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ sd(v0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ Register scratch1 = a1;
+ Register scratch2 = a4;
+ __ li(scratch1, Operand(Smi::FromInt(count_value)));
+ __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
+ __ BranchOnNoOverflow(&done, scratch2);
+ // Call stub. Undo operation first.
+ __ Move(v0, a0);
+ __ jmp(&stub_call);
+ __ bind(&slow);
+ }
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(v0);
+ break;
+ case NAMED_PROPERTY:
+ __ sd(v0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ sd(v0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ bind(&stub_call);
+ __ mov(a1, v0);
+ __ li(a0, Operand(Smi::FromInt(count_value)));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in v0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(v0);
+ }
+ // For all contexts except EffectConstant we have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(a0, result_register()); // Value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
+ __ pop(a1); // Receiver.
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(v0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ mov(a0, result_register()); // Value.
+ __ Pop(a2, a1); // a1 = key, a2 = receiver.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(v0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "[ Global variable");
+ __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadIC::NameRegister(), Operand(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(v0);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ __ li(a0, Operand(proxy->name()));
+ __ Push(cp, a0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ bind(&done);
+
+ context()->Plug(v0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ __ JumpIfSmi(v0, if_true);
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ __ JumpIfSmi(v0, if_false);
+ // Check for undetectable objects => false.
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, a1, Operand(zero_reg),
+ if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, v0, a1);
+ Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(check, factory->null_string())) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ JumpIfSmi(v0, if_false);
+ // Check for undetectable objects => true.
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->function_string())) {
+ __ JumpIfSmi(v0, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
+ Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
+ if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->object_string())) {
+ __ JumpIfSmi(v0, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ }
+ // Check for JS objects => true.
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Check for undetectable objects => false.
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ LoadRoot(a4, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cc = CompareIC::ComputeCondition(op);
+ __ mov(a0, result_register());
+ __ pop(a1);
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ Or(a2, a0, Operand(a1));
+ patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+ Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ mov(a0, result_register());
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(a1, nil_value);
+ Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ld(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(v0);
+}
+
+
+Register FullCodeGenerator::result_register() {
+ return v0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ // ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ ASSERT(IsAligned(frame_offset, kPointerSize));
+ // __ sw(value, MemOperand(fp, frame_offset));
+ __ sd(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ ld(dst, ContextOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ li(at, Operand(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(at);
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(a1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta).
+ __ Dsubu(a1, ra, Operand(masm_->CodeObject()));
+ __ SmiTag(a1);
+
+ // Store result register while executing finally block.
+ __ push(a1);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ li(at, Operand(pending_message_obj));
+ __ ld(a1, MemOperand(at));
+ __ push(a1);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ li(at, Operand(has_pending_message));
+ __ ld(a1, MemOperand(at));
+ __ SmiTag(a1);
+ __ push(a1);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ li(at, Operand(pending_message_script));
+ __ ld(a1, MemOperand(at));
+ __ push(a1);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(a1));
+ // Restore pending message from stack.
+ __ pop(a1);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ li(at, Operand(pending_message_script));
+ __ sd(a1, MemOperand(at));
+
+ __ pop(a1);
+ __ SmiUntag(a1);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ li(at, Operand(has_pending_message));
+ __ sd(a1, MemOperand(at));
+
+ __ pop(a1);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ li(at, Operand(pending_message_obj));
+ __ sd(a1, MemOperand(at));
+
+ // Restore result register from stack.
+ __ pop(a1);
+
+ // Uncook return address and return.
+ __ pop(result_register());
+
+ __ SmiUntag(a1);
+ __ Daddu(at, a1, Operand(masm_->CodeObject()));
+ __ Jump(at);
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ ld(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+ __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ Call(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 8 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // slt at, a3, zero_reg (in case of count based interrupts)
+ // beq at, zero_reg, ok
+ // lui t9, <interrupt stub address> upper
+ // ori t9, <interrupt stub address> u-middle
+ // dsll t9, t9, 16
+ // ori t9, <interrupt stub address> lower
+ // jalr t9
+ // nop
+ // ok-label ----- pc_after points here
+ patcher.masm()->slt(at, a3, zero_reg);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // addiu at, zero_reg, 1
+ // beq at, zero_reg, ok ;; Not changed
+ // lui t9, <on-stack replacement address> upper
+ // ori t9, <on-stack replacement address> middle
+ // dsll t9, t9, 16
+ // ori t9, <on-stack replacement address> lower
+ // jalr t9 ;; Not changed
+ // nop ;; Not changed
+ // ok-label ----- pc_after points here
+ patcher.masm()->daddiu(at, zero_reg, 1);
+ break;
+ }
+ Address pc_immediate_load_address = pc - 6 * kInstrSize;
+ // Replace the stack check address in the load-immediate (6-instr sequence)
+ // with the entry address of the replacement code.
+ Assembler::set_target_address_at(pc_immediate_load_address,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 8 * kInstrSize;
+ Address pc_immediate_load_address = pc - 6 * kInstrSize;
+
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 7 * kInstrSize)));
+ if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
+ ASSERT(reinterpret_cast<uint64_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+ if (reinterpret_cast<uint64_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(reinterpret_cast<uint64_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // elements: holds the property dictionary on fall through.
+ // Scratch registers:
+ // scratch0: used to holds the receiver map.
+ // scratch1: used to holds the receiver instance type, receiver bit mask
+ // and elements map.
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ GetObjectType(receiver, scratch0, scratch1);
+ __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // If this assert fails, we have to check upper bound too.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+ // Check that the global object does not require access checks.
+ __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+ __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
+ __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+ __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at,
+ scratch1,
+ Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ __ ld(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask =
+ (PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY));
+ __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ sd(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ Branch(slow, ne, at, Operand(zero_reg));
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode (not dictionary).
+ __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(not_fast_array, ne, scratch1, Operand(at));
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // Check that the key (index) is within bounds.
+ __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+ // Fast case: Do the load.
+ __ Daddu(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ SmiScale(at, key, kPointerSizeLog2);
+ __ daddu(at, at, scratch1);
+ __ ld(scratch2, MemOperand(at));
+
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ Branch(out_of_range, eq, scratch2, Operand(at));
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
+ // The key is not a smi.
+ Label unique;
+ // Is it a name?
+ __ GetObjectType(key, map, hash);
+ __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+
+ // Is the string an array index, with cached numeric value?
+ __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
+ __ Branch(index_string, eq, at, Operand(zero_reg));
+
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
+ // map: key map
+ __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ And(at, hash, Operand(kIsNotInternalizedMask));
+ __ Branch(not_unique, ne, at, Operand(zero_reg));
+
+ __ bind(&unique);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is in lr.
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ ASSERT(receiver.is(a1));
+ ASSERT(name.is(a2));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, receiver, name, a3, a4, a5, a6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a1 : receiver
+ // -----------------------------------
+ ASSERT(a1.is(ReceiverRegister()));
+ ASSERT(a2.is(NameRegister()));
+
+ Label miss, slow;
+
+ GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, a4, &miss);
+
+ // a0: elements
+ GenerateDictionaryLoad(masm, &slow, a0, a2, v0, a3, a4);
+ __ Ret();
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return a3; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+
+ __ mov(LoadIC_TempRegister(), ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in ra.
+
+ __ mov(LoadIC_TempRegister(), ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), NameRegister());
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ GetObjectType(object, scratch1, scratch2);
+ __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Check that the key is a positive smi.
+ __ NonNegativeSmiTst(key, scratch1);
+ __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1,
+ scratch2,
+ arguments_map,
+ slow_case,
+ DONT_DO_SMI_CHECK);
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ SmiUntag(scratch3, key);
+ __ dsll(scratch3, scratch3, kPointerSizeLog2);
+ __ Daddu(scratch3, scratch3, Operand(kOffset));
+
+ __ Daddu(scratch2, scratch1, scratch3);
+ __ ld(scratch2, MemOperand(scratch2));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch3, scratch2);
+ __ dsll(scratch3, scratch3, kPointerSizeLog2);
+ __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ __ Daddu(scratch2, scratch1, scratch3);
+ return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ __ CheckMap(backing_store,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ slow_case,
+ DONT_DO_SMI_CHECK);
+ __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+ __ SmiUntag(scratch, key);
+ __ dsll(scratch, scratch, kPointerSizeLog2);
+ __ Daddu(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Daddu(scratch, backing_store, scratch);
+ return MemOperand(scratch);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // The return address is in ra.
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ ASSERT(receiver.is(a1));
+ ASSERT(key.is(a2));
+
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(
+ masm, receiver, key, a0, a3, a4, ¬in, &slow);
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, mapped_location);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a0, a0, a3, &slow);
+ __ ld(a0, unmapped_location);
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow, eq, a0, Operand(a3));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Store address is returned in register (of MemOperand) mapped_location.
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, a4, a5, ¬in, &slow);
+ __ sd(a0, mapped_location);
+ __ mov(t1, a0);
+ ASSERT_EQ(mapped_location.offset(), 0);
+ __ RecordWrite(a3, mapped_location.rm(), t1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a3.
+ // Store address is returned in register (of MemOperand) unmapped_location.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, a4, &slow);
+ __ sd(a0, unmapped_location);
+ __ mov(t1, a0);
+ ASSERT_EQ(unmapped_location.offset(), 0);
+ __ RecordWrite(a3, unmapped_location.rm(), t1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in ra.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+
+ __ Push(ReceiverRegister(), NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+// IC register specifications
+const Register LoadIC::ReceiverRegister() { return a1; }
+const Register LoadIC::NameRegister() { return a2; }
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in ra.
+
+ __ Push(ReceiverRegister(), NameRegister());
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // The return address is in ra.
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ ASSERT(key.is(a2));
+ ASSERT(receiver.is(a1));
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(a0, a3, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // a3: elements map
+ // a4: elements
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow, ne, a3, Operand(at));
+ __ dsra32(a0, key, 0);
+ __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
+ __ Ret();
+
+ // Slow case, key and receiver still in a2 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+ 1,
+ a4,
+ a3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow);
+
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&probe_dictionary, eq, a4, Operand(at));
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ dsll32(a3, a0, 0);
+ __ dsrl32(a3, a3, 0);
+ __ dsra(a3, a3, KeyedLookupCache::kMapHashShift);
+ __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ dsra(at, a4, Name::kHashShift);
+ __ xor_(a3, a3, at);
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(a3, a3, Operand(mask));
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+ __ li(a4, Operand(cache_keys));
+ __ dsll(at, a3, kPointerSizeLog2 + 1);
+ __ daddu(a4, a4, at);
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ ld(a5, MemOperand(a4, kPointerSize * i * 2));
+ __ Branch(&try_next_entry, ne, a0, Operand(a5));
+ __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1)));
+ __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5));
+ __ bind(&try_next_entry);
+ }
+
+ __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2));
+ __ Branch(&slow, ne, a0, Operand(a5));
+ __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
+ __ Branch(&slow, ne, key, Operand(a5));
+
+ // Get field offset.
+ // a0 : receiver's map
+ // a3 : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ __ li(a4, Operand(cache_field_offsets));
+
+ // TODO(yy) This data structure does NOT follow natural pointer size.
+ __ dsll(at, a3, kPointerSizeLog2 - 1);
+ __ daddu(at, a4, at);
+ __ lwu(a5, MemOperand(at, kPointerSize / 2 * i));
+
+ __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
+ __ Dsubu(a5, a5, a6);
+ __ Branch(&property_array_property, ge, a5, Operand(zero_reg));
+ if (i != 0) {
+ __ Branch(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+ // Index from start of object.
+ __ daddu(a6, a6, a5);
+ // Remove the heap tag.
+ __ Dsubu(receiver, receiver, Operand(kHeapObjectTag));
+ __ dsll(at, a6, kPointerSizeLog2);
+ __ daddu(at, receiver, at);
+ __ ld(v0, MemOperand(at));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a4,
+ a3);
+ __ Ret();
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ dsll(v0, a5, kPointerSizeLog2);
+ __ Daddu(v0, v0, a1);
+ __ ld(v0, MemOperand(v0));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a4,
+ a3);
+ __ Ret();
+
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // a3: elements
+ __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
+ // Load the property to v0.
+ GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1,
+ a4,
+ a3);
+ __ Ret();
+
+ __ bind(&index_name);
+ __ IndexFromHash(a3, key);
+ // Now jump to the place where smi keys are handled.
+ __ Branch(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // Return address is in ra.
+ Label miss;
+
+ Register receiver = ReceiverRegister();
+ Register index = NameRegister();
+ Register scratch = a3;
+ Register result = v0;
+ ASSERT(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+ __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(a0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch_value = a4;
+ Register address = a5;
+ if (check_map == kCheckMap) {
+ __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(fast_double, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element.
+ Label holecheck_passed1;
+ __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiScale(at, key, kPointerSizeLog2);
+ __ daddu(address, address, at);
+ __ ld(scratch_value, MemOperand(address));
+
+ __ Branch(&holecheck_passed1, ne, scratch_value,
+ Operand(masm->isolate()->factory()->the_hole_value()));
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ Daddu(address, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiScale(scratch_value, key, kPointerSizeLog2);
+ __ Daddu(address, address, scratch_value);
+ __ sd(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Daddu(address, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiScale(scratch_value, key, kPointerSizeLog2);
+ __ Daddu(address, address, scratch_value);
+ __ sd(value, MemOperand(address));
+ // Update write barrier for the elements array address.
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+ __ Branch(slow, ne, elements_map, Operand(at));
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ Daddu(address, elements,
+ Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
+ - kHeapObjectTag));
+ __ SmiScale(at, key, kPointerSizeLog2);
+ __ daddu(address, address, at);
+ __ lw(scratch_value, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ Operand(kHoleNanUpper32));
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ elements, // Overwritten.
+ a3, // Scratch regs...
+ a4,
+ a5,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&non_double_value, ne, a4, Operand(at));
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ a4,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ a4,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ a4,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
+
+ // Register usage.
+ Register value = a0;
+ Register key = a1;
+ Register receiver = a2;
+ Register receiver_map = a3;
+ Register elements_map = a6;
+ Register elements = a7; // Elements array of the receiver.
+ // a4 and a5 are used as general scratch registers.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+ // Get the map of the object.
+ __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded |
+ 1 << Map::kIsObserved));
+ __ Branch(&slow, ne, a4, Operand(zero_reg));
+ // Check if the object is a JS array or not.
+ __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Object case: Check key against length in the elements array.
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&fast_object, lo, key, Operand(a4));
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+ // Entry registers are intact.
+ // a0: value.
+ // a1: key.
+ // a2: receiver.
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // Condition code from comparing key and array length is still available.
+ // Only support writing to array[array.length].
+ __ Branch(&slow, ne, key, Operand(a4));
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&slow, hs, key, Operand(a4));
+ __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(
+ &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
+
+ __ jmp(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+ __ jmp(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&extra, hs, key, Operand(a4));
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // Return address is in ra.
+ Label slow;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register scratch1 = a3;
+ Register scratch2 = a4;
+ ASSERT(!scratch1.is(receiver) && !scratch1.is(key));
+ ASSERT(!scratch2.is(receiver) && !scratch2.is(key));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(a4, key, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, a4, Operand(zero_reg));
+
+ // Get the map of the receiver.
+ __ ld(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
+ __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor));
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a2 : key
+ // -- a1 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a1, a2, a0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ // We can't use MultiPush as the order of the registers is important.
+ __ Push(a2, a1, a0);
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, a4, a5, a6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, a1, a3, a4, a5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, a3, a2, a0, a4, a5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(a0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ return Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ if (!(Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetImmediate16(instr);
+ delta += Assembler::GetRs(instr) * kImm16Mask;
+ // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+ // signals that nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
+ address, andi_instruction_address, delta);
+ }
+
+ Address patch_address =
+ andi_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ // This is patching a conditional "jump if not smi/jump if smi" site.
+ // Enabling by changing from
+ // andi at, rx, 0
+ // Branch <target>, eq, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, ne, at, Operand(zero_reg)
+ // and vice-versa to be disabled again.
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+ ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ } else {
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+ patcher.masm()->andi(at, reg, 0);
+ }
+ ASSERT(Assembler::IsBranch(branch_instr));
+ if (Assembler::IsBeq(branch_instr)) {
+ patcher.ChangeBranchCondition(ne);
+ } else {
+ ASSERT(Assembler::IsBne(branch_instr));
+ patcher.ChangeBranchCondition(eq);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/mips64/lithium-gap-resolver-mips64.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() {}
+
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+
+ virtual void AfterCall() const V8_OVERRIDE {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
+#endif
+
+ // a1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ ld(a2, MemOperand(sp, receiver_offset));
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ ld(a2, GlobalObjectOperand());
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+
+ __ sd(a2, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
+ }
+ }
+
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ if (FLAG_debug_code) {
+ __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+ __ Push(a0, a1);
+ __ Daddu(a0, sp, Operand(slots * kPointerSize));
+ __ li(a1, Operand(kSlotsZapValue));
+ Label loop;
+ __ bind(&loop);
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ sd(a1, MemOperand(a0, 2 * kPointerSize));
+ __ Branch(&loop, ne, a0, Operand(sp));
+ __ Pop(a0, a1);
+ } else {
+ __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+ }
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Possibly allocate a local context.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is in a1.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(a1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in both v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ld(a0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextOperand(cp, var->index());
+ __ sd(a0, target);
+ // Update the write barrier. This clobbers a3 and a0.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, a0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Daddu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ Comment(";;; Deferred code");
+ }
+ code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ pop(at);
+ __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ frame_is_built_ = false;
+ }
+ __ jmp(code->exit());
+ }
+ }
+ // Deferred code is the last part of the instruction sequence. Mark
+ // the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ bind(&deopt_jump_table_[i].label);
+ Address entry = deopt_jump_table_[i].address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+ if (needs_frame.is_bound()) {
+ __ Branch(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Daddu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Call(t9);
+ }
+ } else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+ __ Call(t9);
+ }
+ }
+ __ RecordComment("]");
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ ASSERT(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+ if (op->IsRegister()) {
+ return ToRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
+ } else if (r.IsDouble()) {
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+ } else {
+ ASSERT(r.IsSmiOrTagged());
+ __ li(scratch, literal);
+ }
+ return scratch;
+ } else if (op->IsStackSlot()) {
+ __ ld(scratch, ToMemOperand(op));
+ return scratch;
+ }
+ UNREACHABLE();
+ return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ FloatRegister flt_scratch,
+ DoubleRegister dbl_scratch) {
+ if (op->IsDoubleRegister()) {
+ return ToDoubleRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+ __ mtc1(at, flt_scratch);
+ __ cvt_d_w(dbl_scratch, flt_scratch);
+ return dbl_scratch;
+ } else if (r.IsDouble()) {
+ Abort(kUnsupportedDoubleImmediate);
+ } else if (r.IsTagged()) {
+ Abort(kUnsupportedTaggedImmediate);
+ }
+ } else if (op->IsStackSlot()) {
+ MemOperand mem_op = ToMemOperand(op);
+ __ ldc1(dbl_scratch, mem_op);
+ return dbl_scratch;
+ }
+ UNREACHABLE();
+ return dbl_scratch;
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ // return ToRepresentation(op, Representation::Integer32());
+ HConstant* constant = chunk_->LookupConstant(op);
+ return constant->Integer32Value();
+}
+
+
+int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int64_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ ASSERT(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand((int64_t)0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand((int64_t)0);
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ // return MemOperand(
+ // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ return MemOperand(
+ sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ ASSERT(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ ld(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type,
+ Register src1,
+ const Operand& src2) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return;
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ Push(a1, scratch);
+ __ li(scratch, Operand(count));
+ __ lw(a1, MemOperand(scratch));
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
+ __ li(a1, Operand(FLAG_deopt_every_n_times));
+ __ sw(a1, MemOperand(scratch));
+ __ Pop(a1, scratch);
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&no_deopt);
+ __ sw(a1, MemOperand(scratch));
+ __ Pop(a1, scratch);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label skip;
+ if (condition != al) {
+ __ Branch(&skip, NegateCondition(condition), src1, src2);
+ }
+ __ stop("trap_on_deopt");
+ __ bind(&skip);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length();
+ i < length;
+ i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+ kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+ resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->result()).is(v0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg));
+ // Note: The code below even works when right contains kMinInt.
+ __ dsubu(dividend, zero_reg, dividend);
+ __ And(dividend, dividend, Operand(mask));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ dsubu(dividend, zero_reg, dividend);
+ }
+
+ __ bind(÷nd_is_not_negative);
+ __ And(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Dmul(result, result, Operand(Abs(divisor)));
+ __ Dsubu(result, dividend, Operand(result));
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ const Register left_reg = ToRegister(instr->left());
+ const Register right_reg = ToRegister(instr->right());
+ const Register result_reg = ToRegister(instr->result());
+
+ // div runs in the background while we check for special cases.
+ __ ddiv(left_reg, right_reg);
+
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ }
+
+ // Check for kMinInt % -1, div will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ } else {
+ __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(result_reg, zero_reg);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
+
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ And(at, dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Dsubu(result, zero_reg, dividend);
+ return;
+ }
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Move(result, dividend);
+ } else if (shift == 1) {
+ __ dsrl32(result, dividend, 31);
+ __ Daddu(result, dividend, Operand(result));
+ } else {
+ __ dsra32(result, dividend, 31);
+ __ dsrl32(result, result, 32 - shift);
+ __ Daddu(result, dividend, Operand(result));
+ }
+ if (shift > 0) __ dsra(result, result, shift);
+ if (divisor < 0) __ Dsubu(result, zero_reg, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ Dmul(scratch0(), result, Operand(divisor));
+ __ Dsubu(scratch0(), scratch0(), dividend);
+ DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ const Register result = ToRegister(instr->result());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ ddiv(dividend, divisor);
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ __ mfhi(result);
+ DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+ } else {
+ __ mflo(result);
+ }
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
+
+ __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ Register scratch = result.is(dividend) ? scratch0() : dividend;
+ ASSERT(!result.is(dividend) || !scratch.is(dividend));
+
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Move(result, dividend);
+ return;
+ }
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ dsra(result, dividend, shift);
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ // Dividend can be the same register as result so save the value of it
+ // for checking overflow.
+ __ Move(scratch, dividend);
+
+ __ Dsubu(result, zero_reg, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+
+ __ Xor(scratch, scratch, result);
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ }
+ return;
+ }
+
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ dsra(result, result, shift);
+ return;
+ }
+
+ Label no_overflow, done;
+ __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
+ __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
+ __ Branch(&done);
+ __ bind(&no_overflow);
+ __ dsra(result, result, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Dsubu(result, zero_reg, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+ dividend, Operand(zero_reg));
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Dsubu(result, zero_reg, result);
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Dsubu(result, zero_reg, result);
+ __ Dsubu(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ const Register result = ToRegister(instr->result());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ ddiv(dividend, divisor);
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ // We performed a truncating division. Correct the result if necessary.
+ Label done;
+ Register remainder = scratch0();
+ __ mfhi(remainder);
+ __ mflo(result);
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(remainder, remainder, Operand(divisor));
+ __ Branch(&done, ge, remainder, Operand(zero_reg));
+ __ Dsubu(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
+
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ }
+
+ switch (constant) {
+ case -1:
+ if (overflow) {
+ __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+ DeoptimizeIf(gt, instr->environment(), scratch, Operand(kMaxInt));
+ } else {
+ __ Dsubu(result, zero_reg, left);
+ }
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ }
+ __ mov(result, zero_reg);
+ break;
+ case 1:
+ // Nothing to do.
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ dsll(result, left, shift);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Dsubu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ dsll(scratch, left, shift);
+ __ Daddu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Dsubu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ dsll(scratch, left, shift);
+ __ Dsubu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Dsubu(result, zero_reg, result);
+ } else {
+ // Generate standard code.
+ __ li(at, constant);
+ __ Dmul(result, left, at);
+ }
+ }
+
+ } else {
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
+
+ if (overflow) {
+ // hi:lo = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ dmult(result, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ } else {
+ __ dmult(left, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ }
+ __ dsra32(at, result, 31);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ if (!instr->hydrogen()->representation().IsSmi()) {
+ DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ DeoptimizeIf(lt, instr->environment(), result, Operand(kMinInt));
+ }
+ } else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Dmul(result, result, right);
+ } else {
+ __ Dmul(result, left, right);
+ }
+ }
+
+ if (bailout_on_minus_zero) {
+ Label done;
+ __ Xor(at, left, right);
+ __ Branch(&done, ge, at, Operand(zero_reg));
+ // Bail out if the result is minus zero.
+ DeoptimizeIf(eq,
+ instr->environment(),
+ result,
+ Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
+ ASSERT(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
+
+ if (right_op->IsStackSlot()) {
+ right = Operand(EmitLoadRegister(right_op, at));
+ } else {
+ ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
+ }
+
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_OR:
+ __ Or(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ Nor(result, zero_reg, left);
+ } else {
+ __ Xor(result, left, right);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+
+ if (right_op->IsRegister()) {
+ // No need to mask the right operand on MIPS, it is built into the variable
+ // shift instructions.
+ switch (instr->op()) {
+ case Token::ROR:
+ __ Ror(result, left, Operand(ToRegister(right_op)));
+ break;
+ case Token::SAR:
+ __ srav(result, left, ToRegister(right_op));
+ break;
+ case Token::SHR:
+ __ srlv(result, left, ToRegister(right_op));
+ if (instr->can_deopt()) {
+ // TODO(yy): (-1) >>> 0. anything else?
+ DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ }
+ break;
+ case Token::SHL:
+ __ sllv(result, left, ToRegister(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ Ror(result, left, Operand(shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sra(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ srl(result, left, shift_count);
+ } else {
+ if (instr->can_deopt()) {
+ __ And(at, left, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ dsll(result, left, shift_count);
+ } else {
+ __ sll(result, left, shift_count);
+ }
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ if (right->IsStackSlot()) {
+ Register right_reg = EmitLoadRegister(right, at);
+ __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg));
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ if (right->IsStackSlot() || right->IsConstantOperand()) {
+ Register right_reg = EmitLoadRegister(right, scratch);
+ __ SubuAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ right_reg,
+ overflow); // Reg at also used as scratch.
+ } else {
+ ASSERT(right->IsRegister());
+ // Due to overflow check macros not supporting constant operands,
+ // handling the IsConstantOperand case was moved to prev if clause.
+ __ SubuAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ ToRegister(right),
+ overflow); // Reg at also used as scratch.
+ }
+ DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ if (!instr->hydrogen()->representation().IsSmi()) {
+ DeoptimizeIf(gt, instr->environment(),
+ ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr->environment(),
+ ToRegister(result), Operand(kMinInt));
+ }
+ }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ double v = instr->value();
+ __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ li(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(a0));
+ ASSERT(result.is(v0));
+ ASSERT(!scratch.is(scratch0()));
+ ASSERT(!scratch.is(object));
+
+ __ SmiTst(object, at);
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ __ GetObjectType(object, scratch, scratch);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
+
+ if (index->value() == 0) {
+ __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ li(scratch, Operand(stamp));
+ __ ld(scratch, MemOperand(scratch));
+ __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Branch(&runtime, ne, scratch, Operand(scratch0()));
+ __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ li(a1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ Register scratch = scratch0();
+ ASSERT(!scratch.is(string));
+ ASSERT(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Daddu(scratch, string, ToRegister(index));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ dsll(scratch, ToRegister(index), 1);
+ __ Daddu(scratch, string, scratch);
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ Register scratch = scratch0();
+ __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ And(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ lbu(result, operand);
+ } else {
+ __ lhu(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register scratch = scratch0();
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ sb(value, operand);
+ } else {
+ __ sh(value, operand);
+ }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ if (right->IsStackSlot()) {
+ Register right_reg = EmitLoadRegister(right, at);
+ __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg));
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ if (right->IsStackSlot() ||
+ right->IsConstantOperand()) {
+ Register right_reg = EmitLoadRegister(right, scratch);
+ __ AdduAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ right_reg,
+ overflow); // Reg at also used as scratch.
+ } else {
+ ASSERT(right->IsRegister());
+ // Due to overflow check macros not supporting constant operands,
+ // handling the IsConstantOperand case was moved to prev if clause.
+ __ AdduAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ ToRegister(right),
+ overflow); // Reg at also used as scratch.
+ }
+ DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ // if not smi, it must int32.
+ if (!instr->hydrogen()->representation().IsSmi()) {
+ DeoptimizeIf(gt, instr->environment(),
+ ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr->environment(),
+ ToRegister(result), Operand(kMinInt));
+ }
+ }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Register left_reg = ToRegister(left);
+ Register right_reg = EmitLoadRegister(right, scratch0());
+ Register result_reg = ToRegister(instr->result());
+ Label return_right, done;
+ Register scratch = scratch1();
+ __ Slt(scratch, left_reg, Operand(right_reg));
+ if (condition == ge) {
+ __ Movz(result_reg, left_reg, scratch);
+ __ Movn(result_reg, right_reg, scratch);
+ } else {
+ ASSERT(condition == le);
+ __ Movn(result_reg, left_reg, scratch);
+ __ Movz(result_reg, right_reg, scratch);
+ }
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPURegister left_reg = ToDoubleRegister(left);
+ FPURegister right_reg = ToDoubleRegister(right);
+ FPURegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
+ __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
+ __ Branch(&return_right);
+
+ __ bind(&check_zero);
+ // left == right != 0.
+ __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ neg_d(left_reg, left_reg);
+ __ sub_d(result_reg, left_reg, right_reg);
+ __ neg_d(result_reg, result_reg);
+ } else {
+ __ add_d(result_reg, left_reg, right_reg);
+ }
+ __ Branch(&done);
+
+ __ bind(&check_nan_left);
+ // left == NaN.
+ __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ mov_d(result_reg, right_reg);
+ }
+ __ Branch(&done);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ mov_d(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ switch (instr->op()) {
+ case Token::ADD:
+ __ add_d(result, left, right);
+ break;
+ case Token::SUB:
+ __ sub_d(result, left, right);
+ break;
+ case Token::MUL:
+ __ mul_d(result, left, right);
+ break;
+ case Token::DIV:
+ __ div_d(result, left, right);
+ break;
+ case Token::MOD: {
+ // Save a0-a3 on the stack.
+ RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ __ MultiPush(saved_regs);
+
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ MovToFloatParameters(left, right);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(result);
+
+ // Restore saved register.
+ __ MultiPop(saved_regs);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ // Other arch use a nop here, to signal that there is no inlined
+ // patchable code. Mips does not need the nop, since our marker
+ // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+ if (right_block == left_block || condition == al) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ Branch(chunk_->GetAssemblyLabel(right_block),
+ NegateCondition(condition), src1, src2);
+ } else if (right_block == next_block) {
+ __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+ } else {
+ __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+ NegateCondition(condition), src1, src2);
+ } else if (right_block == next_block) {
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+ condition, src1, src2);
+ } else {
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+ condition, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
+ condition, src1, src2);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ stop("LDebugBreak");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsInteger32() || r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ EmitBranch(instr, ne, reg, Operand(zero_reg));
+ } else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
+ DoubleRegister reg = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ EmitBranchF(instr, nue, reg, kDoubleRegZero);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq, reg, Operand(at));
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, ne, reg, Operand(zero_reg));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ DoubleRegister dbl_scratch = double_scratch0();
+ __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+ EmitBranch(instr, ne, at, Operand(zero_reg));
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+ }
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+ }
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ SmiTst(reg, at);
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ }
+
+ const Register map = scratch0();
+ if (expected.NeedsMap()) {
+ __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, at, Operand(1 << Map::kIsUndetectable));
+ __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(instr->TrueLabel(chunk_),
+ ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+ __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+ __ Branch(instr->FalseLabel(chunk_));
+ __ bind(¬_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ const Register scratch = scratch1();
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ DoubleRegister dbl_scratch = double_scratch0();
+ Label not_heap_number;
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(¬_heap_number, ne, map, Operand(at));
+ __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ ne, dbl_scratch, kDoubleRegZero);
+ // Falls through if dbl_scratch == 0.
+ __ Branch(instr->FalseLabel(chunk_));
+ __ bind(¬_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ }
+ }
+ }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = kNoCondition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ FPURegister left_reg = ToDoubleRegister(left);
+ FPURegister right_reg = ToDoubleRegister(right);
+
+ // If a NaN is involved, i.e. the result is unordered,
+ // jump to false block label.
+ __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
+ left_reg, right_reg);
+
+ EmitBranchF(instr, cond, left_reg, right_reg);
+ } else {
+ Register cmp_left;
+ Operand cmp_right = Operand((int64_t)0);
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(Smi::FromInt(value));
+ } else {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(value);
+ }
+ } else if (left->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(Smi::FromInt(value));
+ } else {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(value);
+ }
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
+ } else {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(ToRegister(right));
+ }
+
+ EmitBranch(instr, cond, cmp_left, cmp_right);
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ EmitBranch(instr, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ li(at, Operand(factory()->the_hole_value()));
+ EmitBranch(instr, eq, input_reg, Operand(at));
+ return;
+ }
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->object());
+ EmitFalseBranchF(instr, eq, input_reg, input_reg);
+
+ Register scratch = scratch0();
+ __ FmoveHigh(scratch, input_reg);
+ EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
+ __ FmoveHigh(scratch, value);
+ // Only use low 32-bits of value.
+ __ dsll32(scratch, scratch, 0);
+ __ dsrl32(scratch, scratch, 0);
+ __ li(at, 0x80000000);
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value,
+ scratch,
+ Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()),
+ DO_SMI_CHECK);
+ __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
+ __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ __ mov(at, zero_reg);
+ }
+ EmitBranch(instr, eq, scratch, Operand(at));
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object) {
+ __ JumpIfSmi(input, is_not_object);
+
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ Branch(is_object, eq, input, Operand(temp2));
+
+ // Load map.
+ __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+ __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+ // Load instance type and check that it is in object type range.
+ __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ Branch(is_not_object,
+ lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+ return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = scratch0();
+
+ Condition true_cond =
+ EmitIsObject(reg, temp1, temp2,
+ instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
+
+ EmitBranch(instr, true_cond, temp2,
+ Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ GetObjectType(input, temp1, temp1);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond, temp1,
+ Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register input_reg = EmitLoadRegister(instr->value(), at);
+ __ And(at, input_reg, kSmiTagMask);
+ EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+ EmitBranch(instr, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+
+ EmitBranch(instr, condition, v0, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->value());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+
+ __ GetObjectType(input, scratch, scratch);
+ EmitBranch(instr,
+ BranchCondition(instr->hydrogen()),
+ scratch,
+ Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ lwu(scratch,
+ FieldMemOperand(input, String::kHashFieldOffset));
+ __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+ EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with the answer in flags. Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+ Label* is_false,
+ Handle<String>class_name,
+ Register input,
+ Register temp,
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
+
+ __ JumpIfSmi(input, is_false);
+
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ __ GetObjectType(input, temp, temp2);
+ __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ } else {
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ GetObjectType(input, temp, temp2);
+ __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ GetObjectType(temp, temp2, temp2);
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ } else {
+ __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(temp, FieldMemOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+
+ // End with the address of this class_name instance in temp register.
+ // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->temp());
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
+
+ EmitBranch(instr, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ EmitBranch(instr, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label true_label, done;
+ ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
+ ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(v0));
+
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ __ Branch(&true_label, eq, result, Operand(zero_reg));
+ __ li(result, Operand(factory()->false_value()));
+ __ Branch(&done);
+ __ bind(&true_label);
+ __ li(result, Operand(factory()->true_value()));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+
+ ASSERT(object.is(a0));
+ ASSERT(result.is(v0));
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = temp;
+ __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ li(at, Operand(Handle<Object>(cell)));
+ __ ld(at, FieldMemOperand(at, PropertyCell::kValueOffset));
+ __ BranchShort(&cache_miss, ne, map, Operand(at));
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false. The distance from map check has to be constant.
+ __ li(result, Operand(factory()->the_hole_value()));
+ __ Branch(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ LoadRoot(temp, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, object, Operand(temp));
+
+ // String values is not instance of anything.
+ Condition cc = __ IsObjectStringType(object, temp, temp);
+ __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+ // Go to the deferred code.
+ __ Branch(deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(v0));
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(isolate(), flags);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+
+ // Get the temp register reserved by the instruction. This needs to be a4 as
+ // its slot of the pushing of safepoint registers is used to communicate the
+ // offset to the location of the map check.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(temp.is(a4));
+ __ li(InstanceofStub::right(), instr->function());
+ static const int kAdditionalDelta = 13;
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE);
+ __ StoreToSafepointRegisterSlot(temp, temp);
+ }
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ // Put the result value into the result register slot and
+ // restore all registers.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+ Condition condition = ComputeCompareCondition(op);
+ // A minor optimization that relies on LoadRoot always emitting one
+ // instruction.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+ Label done, check;
+ __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+ __ bind(&check);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ push(v0);
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ mov(sp, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(ra, fp);
+ }
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ Daddu(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ dsll(at, reg, kPointerSizeLog2);
+ __ Daddu(sp, sp, at);
+ }
+
+ __ Jump(ra);
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ ld(result, FieldMemOperand(at, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ __ li(LoadIC::NameRegister(), Operand(instr->name()));
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = scratch0();
+
+ // Load the cell.
+ __ li(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ // We use a temp to check the payload.
+ Register payload = ToRegister(instr->temp());
+ __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+ }
+
+ // Store the value.
+ __ sd(value, FieldMemOperand(cell, Cell::kValueOffset));
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+
+ __ ld(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ } else {
+ Label is_not_hole;
+ __ Branch(&is_not_hole, ne, result, Operand(at));
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ bind(&is_not_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ MemOperand target = ContextOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ld(scratch, target);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ } else {
+ __ Branch(&skip_assignment, ne, scratch, Operand(at));
+ }
+ }
+
+ __ sd(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch0(),
+ GetRAState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+
+ __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ __ Load(result, operand, access.representation());
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ ldc1(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+
+ Representation representation = access.representation();
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ if (FLAG_debug_code) {
+ // Verify this is really an Smi.
+ Register scratch = scratch0();
+ __ Load(scratch, FieldMemOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+ }
+
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+ __ Load(result, FieldMemOperand(object, offset), representation);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ // Name is always in a2.
+ __ li(LoadIC::NameRegister(), Operand(instr->name()));
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register scratch = scratch0();
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function. Load map into the
+ // result register.
+ __ GetObjectType(function, result, scratch);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+ // Get the prototype or initial map from the function.
+ __ ld(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ GetObjectType(result, scratch, scratch);
+ __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+ // Get the prototype from the initial map.
+ __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ Branch(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ __ bind(&non_instance);
+ __ ld(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ ld(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ li(at, Operand(const_length + 1));
+ __ Dsubu(result, at, index);
+ __ dsll(at, result, kPointerSizeLog2);
+ __ Daddu(at, arguments, at);
+ __ ld(result, MemOperand(at));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ Dsubu(result, length, Operand(loc));
+ __ dsll(at, result, kPointerSizeLog2);
+ __ Daddu(at, arguments, at);
+ __ ld(result, MemOperand(at));
+ } else {
+ __ dsll(at, length, kPointerSizeLog2);
+ __ Daddu(at, arguments, at);
+ __ ld(result, MemOperand(at));
+ }
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ __ Dsubu(result, length, index);
+ __ Daddu(result, result, 1);
+ __ dsll(at, result, kPointerSizeLog2);
+ __ Daddu(at, arguments, at);
+ __ ld(result, MemOperand(at));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+ : element_size_shift;
+ int base_offset = instr->base_offset();
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset = instr->base_offset();
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Daddu(scratch0(), external_pointer,
+ constant_key << element_size_shift);
+ } else {
+ if (shift_size < 0) {
+ if (shift_size == -32) {
+ __ dsra32(scratch0(), key, 0);
+ } else {
+ __ dsra(scratch0(), key, -shift_size);
+ }
+ } else {
+ __ dsll(scratch0(), key, shift_size);
+ }
+ __ Daddu(scratch0(), scratch0(), external_pointer);
+ }
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), base_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), base_offset));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size, base_offset);
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ lw(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+ int base_offset = instr->base_offset();
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ base_offset += constant_key * kDoubleSize;
+ }
+ __ Daddu(scratch, elements, Operand(base_offset));
+
+ if (!key_is_constant) {
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+ : element_size_shift;
+ if (shift_size > 0) {
+ __ dsll(at, key, shift_size);
+ } else if (shift_size == -32) {
+ __ dsra32(at, key, 0);
+ } else {
+ __ dsra(at, key, -shift_size);
+ }
+ __ Daddu(scratch, scratch, at);
+ }
+
+ __ ldc1(result, MemOperand(scratch));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = instr->base_offset();
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ } else {
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ SmiScale(scratch, key, kPointerSizeLog2);
+ __ daddu(scratch, elements, scratch);
+ } else {
+ __ dsll(scratch, key, kPointerSizeLog2);
+ __ daddu(scratch, elements, scratch);
+ }
+ }
+
+ Representation representation = hinstr->representation();
+ if (representation.IsInteger32() && SmiValuesAre32Bits() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ ASSERT(!hinstr->RequiresHoleCheck());
+ if (FLAG_debug_code) {
+ Register temp = scratch1();
+ __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
+ __ AssertSmi(temp);
+ }
+
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ }
+
+ __ Load(result, MemOperand(store_base, offset), representation);
+
+ // Check for the hole value.
+ if (hinstr->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ SmiTst(result, scratch);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_typed_elements()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int base_offset) {
+ if (key_is_constant) {
+ return MemOperand(base, (constant_key << element_size) + base_offset);
+ }
+
+ if (base_offset == 0) {
+ if (shift_size >= 0) {
+ __ dsll(scratch0(), key, shift_size);
+ __ Daddu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ if (shift_size == -32) {
+ __ dsra32(scratch0(), key, 0);
+ } else {
+ __ dsra(scratch0(), key, -shift_size);
+ }
+ __ Daddu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+ }
+
+ if (shift_size >= 0) {
+ __ dsll(scratch0(), key, shift_size);
+ __ Daddu(scratch0(), base, scratch0());
+ return MemOperand(scratch0(), base_offset);
+ } else {
+ if (shift_size == -32) {
+ __ dsra32(scratch0(), key, 0);
+ } else {
+ __ dsra(scratch0(), key, -shift_size);
+ }
+ __ Daddu(scratch0(), base, scratch0());
+ return MemOperand(scratch0(), base_offset);
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register scratch = scratch0();
+ Register temp = scratch1();
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ __ Dsubu(result, sp, 2 * kPointerSize);
+ } else {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
+ __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elem = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
+ __ Branch(&done, eq, fp, Operand(elem));
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(result,
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, result_in_receiver;
+
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode functions.
+ __ ld(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+ // Do not transform the receiver to object for builtins.
+ int32_t strict_mode_function_mask =
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
+ int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
+
+ __ lbu(at,
+ FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
+ __ And(at, at, Operand(strict_mode_function_mask));
+ __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
+ __ lbu(at,
+ FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
+ __ And(at, at, Operand(native_mask));
+ __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ Branch(&global_object, eq, receiver, Operand(scratch));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+ // Deoptimize if the receiver is not a JS object.
+ __ SmiTst(receiver, scratch);
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+ __ GetObjectType(receiver, scratch, scratch);
+ DeoptimizeIf(lt, instr->environment(),
+ scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&result_in_receiver);
+
+ __ bind(&global_object);
+ __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ ld(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ ld(result,
+ FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ Branch(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mov(result, receiver);
+ __ bind(&result_ok);
+ }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(a0)); // Used for parameter count.
+ ASSERT(function.is(a1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ Move(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Daddu(elements, elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+ __ dsll(scratch, length, kPointerSizeLog2);
+ __ bind(&loop);
+ __ Daddu(scratch, elements, scratch);
+ __ ld(scratch, MemOperand(scratch));
+ __ push(scratch);
+ __ Dsubu(length, length, Operand(1));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+ __ dsll(scratch, length, kPointerSizeLog2);
+
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in receiver which is a0, as expected
+ // by InvokeFunction.
+ ParameterCount actual(receiver);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ Register argument_reg = EmitLoadRegister(argument, at);
+ __ push(argument_reg);
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ __ li(scratch0(), instr->hydrogen()->pairs());
+ __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+ // The context is the first argument.
+ __ Push(cp, scratch0(), scratch1());
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ A1State a1_state) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ if (can_invoke_directly) {
+ if (a1_state == A1_UNINITIALIZED) {
+ __ li(a1, function);
+ }
+
+ // Change context.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Set r0 to arguments count if adaption is not needed. Assumes that r0
+ // is available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ li(a0, Operand(arity));
+ }
+
+ // Invoke function.
+ __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it.
+ __ Move(result, input);
+ __ And(at, exponent, Operand(HeapNumber::kSignMask));
+ __ Branch(&done, eq, at, Operand(zero_reg));
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(a1) ? a0 : a1;
+ Register tmp2 = input.is(a2) ? a0 : a2;
+ Register tmp3 = input.is(a3) ? a0 : a3;
+ Register tmp4 = input.is(a4) ? a0 : a4;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ Branch(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(v0))
+ __ mov(tmp1, v0);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+ __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ StoreToSafepointRegisterSlot(tmp1, result);
+ }
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+ __ mov(result, input);
+ __ dsubu(result, zero_reg, input);
+ // Overflow if result is still negative, i.e. 0x80000000.
+ DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LMathAbs* instr_;
+ };
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ FPURegister input = ToDoubleRegister(instr->value());
+ FPURegister result = ToDoubleRegister(instr->result());
+ __ abs_d(result, input);
+ } else if (r.IsSmiOrInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register except_flag = ToRegister(instr->temp());
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ result,
+ input,
+ scratch1,
+ double_scratch0(),
+ except_flag);
+
+ // Deopt if the operation did not succeed.
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ Label done;
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ __ mfhc1(scratch1, input); // Get exponent/sign bits.
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+ Register scratch = scratch0();
+ Label done, check_sign_on_zero;
+
+ // Extract exponent bits.
+ __ mfhc1(result, input);
+ __ Ext(scratch,
+ result,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+ Label skip1;
+ __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+ __ mov(result, zero_reg);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Branch(&check_sign_on_zero);
+ } else {
+ __ Branch(&done);
+ }
+ __ bind(&skip1);
+
+ // The following conversion will not work with numbers
+ // outside of ]-2^32, 2^32[.
+ DeoptimizeIf(ge, instr->environment(), scratch,
+ Operand(HeapNumber::kExponentBias + 32));
+
+ // Save the original sign for later comparison.
+ __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+ __ Move(double_scratch0(), 0.5);
+ __ add_d(double_scratch0(), input, double_scratch0());
+
+ // Check sign of the result: if the sign changed, the input
+ // value was in ]0.5, 0[ and the result should be -0.
+ __ mfhc1(result, double_scratch0());
+ // mfhc1 sign-extends, clear the upper bits.
+ __ dsll32(result, result, 0);
+ __ dsrl32(result, result, 0);
+ __ Xor(result, result, Operand(scratch));
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // ARM uses 'mi' here, which is 'lt'
+ DeoptimizeIf(lt, instr->environment(), result,
+ Operand(zero_reg));
+ } else {
+ Label skip2;
+ // ARM uses 'mi' here, which is 'lt'
+ // Negating it results in 'ge'
+ __ Branch(&skip2, ge, result, Operand(zero_reg));
+ __ mov(result, zero_reg);
+ __ Branch(&done);
+ __ bind(&skip2);
+ }
+
+ Register except_flag = scratch;
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ result,
+ double_scratch0(),
+ at,
+ double_scratch1,
+ except_flag);
+
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ __ bind(&check_sign_on_zero);
+ __ mfhc1(scratch, input); // Get exponent/sign bits.
+ __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister temp = ToDoubleRegister(instr->temp());
+
+ ASSERT(!input.is(result));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done;
+ __ Move(temp, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+ // Set up Infinity in the delay slot.
+ // result is overwritten if the branch is not taken.
+ __ neg_d(result, temp);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(result, input, kDoubleRegZero);
+ __ sqrt_d(result, result);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(f4));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(a2));
+ ASSERT(ToDoubleRegister(instr->left()).is(f2));
+ ASSERT(ToDoubleRegister(instr->result()).is(f0));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(a2, &no_deopt);
+ __ ld(a7, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), a7, Operand(at));
+ __ bind(&no_deopt);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ Clz(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(a1));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ A1_CONTAINS_TARGET);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ li(a0, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Load the code entry address
+ __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ __ li(a0, Operand(instr->arity()));
+ // No cell in a2 for construct type feedback in optimized code
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ __ li(a0, Operand(instr->arity()));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here,
+ // look at the first argument.
+ __ ld(a5, MemOperand(sp, 0));
+ __ Branch(&packed_case, eq, a5, Operand(zero_reg));
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ Daddu(code_object, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sd(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ Daddu(result, base, Operand(ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ Daddu(result, base, offset);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ Register scratch2 = scratch1();
+ Register scratch1 = scratch0();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ __ Store(value, operand, representation);
+ return;
+ }
+
+ __ AssertNotSmi(object);
+
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ sdc1(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
+ __ li(scratch1, Operand(transition));
+ __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object,
+ scratch1,
+ temp,
+ GetRAState(),
+ kSaveFPRegs);
+ }
+ }
+
+ // Do the store.
+ Register destination = object;
+ if (!access.IsInobject()) {
+ destination = scratch1;
+ __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ }
+ Register value = ToRegister(instr->value());
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ if (FLAG_debug_code) {
+ __ Load(scratch2, FieldMemOperand(destination, offset), representation);
+ __ AssertSmi(scratch2);
+ }
+
+ // Store int value directly to upper half of the smi.
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+
+ MemOperand operand = FieldMemOperand(destination, offset);
+ __ Store(value, operand, representation);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(destination,
+ offset,
+ value,
+ scratch2,
+ GetRAState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ // Name is always in a2.
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+ Operand operand((int64_t)0);
+ Register reg;
+ if (instr->index()->IsConstantOperand()) {
+ operand = ToOperand(instr->index());
+ reg = ToRegister(instr->length());
+ cc = CommuteCondition(cc);
+ } else {
+ reg = ToRegister(instr->index());
+ operand = ToOperand(instr->length());
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ Branch(&done, NegateCondition(cc), reg, operand);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment(), reg, operand);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+ : element_size_shift;
+ int base_offset = instr->base_offset();
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ Register address = scratch0();
+ FPURegister value(ToDoubleRegister(instr->value()));
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ Daddu(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ if (shift_size < 0) {
+ if (shift_size == -32) {
+ __ dsra32(address, key, 0);
+ } else {
+ __ dsra(address, key, -shift_size);
+ }
+ } else {
+ __ dsll(address, key, shift_size);
+ }
+ __ Daddu(address, external_pointer, address);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ cvt_s_d(double_scratch0(), value);
+ __ swc1(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ sdc1(value, MemOperand(address, base_offset));
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ base_offset);
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ sb(value, mem_operand);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ sh(value, mem_operand);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ sw(value, mem_operand);
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int base_offset = instr->base_offset();
+ Label not_nan, done;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ __ Daddu(scratch, elements,
+ Operand((constant_key << element_size_shift) + base_offset));
+ } else {
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+ : element_size_shift;
+ __ Daddu(scratch, elements, Operand(base_offset));
+ ASSERT((shift_size == 3) || (shift_size == -29));
+ if (shift_size == 3) {
+ __ dsll(at, ToRegister(instr->key()), 3);
+ } else if (shift_size == -29) {
+ __ dsra(at, ToRegister(instr->key()), 29);
+ }
+ __ Daddu(scratch, scratch, at);
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(¬_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ sdc1(double_scratch, MemOperand(scratch, 0));
+ __ Branch(&done);
+ }
+
+ __ bind(¬_nan);
+ __ sdc1(value, MemOperand(scratch, 0));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = instr->base_offset();
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ SmiScale(scratch, key, kPointerSizeLog2);
+ __ daddu(store_base, elements, scratch);
+ } else {
+ __ dsll(scratch, key, kPointerSizeLog2);
+ __ daddu(store_base, elements, scratch);
+ }
+ }
+
+ Representation representation = instr->hydrogen()->value()->representation();
+ if (representation.IsInteger32() && SmiValuesAre32Bits()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ if (FLAG_debug_code) {
+ Register temp = scratch1();
+ __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
+ __ AssertSmi(temp);
+ }
+
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+
+ __ Store(value, MemOperand(store_base, offset), representation);
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Daddu(key, store_base, Operand(offset));
+ __ RecordWrite(elements,
+ key,
+ value,
+ GetRAState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_typed_elements()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).is(a2));
+ ASSERT(ToRegister(instr->key()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ Handle<Code> ic = (instr->strict_mode() == STRICT)
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Branch(¬_applicable, ne, scratch, Operand(from_map));
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ li(new_map_reg, Operand(to_map));
+ __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteForMap(object_reg,
+ new_map_reg,
+ scratch,
+ GetRAState(),
+ kDontSaveFPRegs);
+ } else {
+ ASSERT(object_reg.is(a0));
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ li(a1, Operand(to_map));
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+ __ bind(¬_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+ ne, &no_memento_found);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+ instr->context());
+ __ AssertSmi(v0);
+ __ SmiUntag(v0);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ ASSERT(!char_code.is(result));
+
+ __ Branch(deferred->entry(), hi,
+ char_code, Operand(String::kMaxOneByteCharCode));
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ dsll(scratch, char_code, kPointerSizeLog2);
+ __ Daddu(result, result, scratch);
+ __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Branch(deferred->entry(), eq, result, Operand(scratch));
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ FPURegister single_scratch = double_scratch0().low();
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ ld(scratch, ToMemOperand(input));
+ __ mtc1(scratch, single_scratch);
+ } else {
+ __ mtc1(ToRegister(input), single_scratch);
+ }
+ __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+
+ FPURegister dbl_scratch = double_scratch0();
+ __ mtc1(ToRegister(input), dbl_scratch);
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22?
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
+ __ SmiTag(result, input);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register src = ToRegister(value);
+ Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
+ DoubleRegister dbl_scratch = double_scratch0();
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ Xor(src, src, Operand(0x80000000));
+ }
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ } else {
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ }
+
+ if (FLAG_inline_new) {
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
+ __ Branch(&done);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, zero_reg);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(v0, dst);
+ }
+
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
+ } else {
+ __ Branch(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+ // Now that we have finished with the object's real address tag it
+ __ Daddu(reg, reg, kHeapObjectTag);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ mov(reg, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ Dsubu(v0, v0, kHeapObjectTag);
+ __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ And(at, input, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, at);
+ DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ } else {
+ __ SmiTag(output, input);
+ }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ if (instr->needs_check()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ // If the input is a HeapObject, value of scratch won't be zero.
+ __ And(scratch, input, Operand(kHeapObjectTag));
+ __ SmiUntag(result, input);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ SmiUntag(result, input);
+ }
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+ DoubleRegister result_reg,
+ bool can_convert_undefined_to_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Register scratch = scratch0();
+ Label convert, load_smi, done;
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ // Heap number map check.
+ __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ if (can_convert_undefined_to_nan) {
+ __ Branch(&convert, ne, scratch, Operand(at));
+ } else {
+ DeoptimizeIf(ne, env, scratch, Operand(at));
+ }
+ // Load heap number.
+ __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ mfc1(at, result_reg);
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
+ DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ }
+ __ Branch(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+ // Smi to double register conversion
+ __ bind(&load_smi);
+ // scratch: untagged value of input_reg
+ __ mtc1(scratch, result_reg);
+ __ cvt_d_w(result_reg, result_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Register input_reg = ToRegister(instr->value());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_scratch = double_scratch0();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+ ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // The input is a tagged HeapObject.
+ // Heap number map check.
+ __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ // This 'at' value and scratch1 map value are used for tests in both clauses
+ // of the if.
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ Label no_heap_number, check_bools, check_false;
+ // Check HeapNumber map.
+ __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+ __ mov(scratch2, input_reg); // In delay slot.
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ Branch(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&check_bools, ne, input_reg, Operand(at));
+ ASSERT(ToRegister(instr->result()).is(input_reg));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
+
+ __ bind(&check_bools);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&check_false, ne, scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ li(input_reg, Operand(1)); // In delay slot.
+
+ __ bind(&check_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+
+ // Load the double value.
+ __ ldc1(double_scratch,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ input_reg,
+ double_scratch,
+ scratch1,
+ double_scratch2,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed.
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+ __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ }
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ ASSERT(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ DoubleRegister result_reg = ToDoubleRegister(result);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->can_convert_undefined_to_nan(),
+ instr->hydrogen()->deoptimize_on_minus_zero(),
+ instr->environment(),
+ mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ Register except_flag = LCodeGen::scratch1();
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ result_reg,
+ double_input,
+ scratch1,
+ double_scratch0(),
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed (except_flag != 0).
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ Branch(&done, ne, result_reg, Operand(zero_reg));
+ __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = LCodeGen::scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ Register except_flag = LCodeGen::scratch1();
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ result_reg,
+ double_input,
+ scratch1,
+ double_scratch0(),
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed (except_flag != 0).
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ Branch(&done, ne, result_reg, Operand(zero_reg));
+ __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+ __ SmiTag(result_reg, result_reg);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->value();
+ __ SmiTst(ToRegister(input), at);
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ SmiTst(ToRegister(input), at);
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ }
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ GetObjectType(input, scratch, scratch);
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ } else {
+ DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ And(at, scratch, mask);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
+ at, Operand(zero_reg));
+ } else {
+ __ And(scratch, scratch, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ }
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Register reg = ToRegister(instr->value());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ li(at, Operand(Handle<Object>(cell)));
+ __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(at));
+ } else {
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(object));
+ }
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(object);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(v0, scratch0());
+ }
+ __ SmiTst(scratch0(), at);
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ Register map_reg = scratch0();
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+ __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
+ }
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ Branch(deferred->entry(), ne, map_reg, Operand(map));
+ } else {
+ DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ }
+
+ __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
+
+ // Check for heap number
+ __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ DeoptimizeIf(ne, instr->environment(), input_reg,
+ Operand(factory()->undefined_value()));
+ __ mov(result_reg, zero_reg);
+ __ jmp(&done);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+ HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ jmp(&done);
+
+ __ bind(&is_smi);
+ __ ClampUint8(result_reg, scratch);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ FmoveHigh(result_reg, value_reg);
+ } else {
+ __ FmoveLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ li(scratch, Operand(size - kHeapObjectTag));
+ } else {
+ __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+ }
+ __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ Label loop;
+ __ bind(&loop);
+ __ Dsubu(scratch, scratch, Operand(kPointerSize));
+ __ Daddu(at, result, Operand(scratch));
+ __ sd(scratch2, MemOperand(at));
+ __ Branch(&loop, ge, scratch, Operand(zero_reg));
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ li(v0, Operand(Smi::FromInt(size)));
+ __ Push(v0);
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ li(v0, Operand(Smi::FromInt(flags)));
+ __ Push(v0);
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ push(a0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // a7 = literals array.
+ // a1 = regexp literal.
+ // a0 = regexp literal clone.
+ // a2 and a4-a6 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ li(a7, instr->hydrogen()->literals());
+ __ ld(a1, FieldMemOperand(a7, literal_offset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&materialized, ne, a1, Operand(at));
+
+ // Create regexp literal using runtime function
+ // Result will be in v0.
+ __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a5, Operand(instr->hydrogen()->pattern()));
+ __ li(a4, Operand(instr->hydrogen()->flags()));
+ __ Push(a7, a6, a5, a4);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ mov(a1, v0);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ Push(a1, a0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(a1);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ ld(a3, FieldMemOperand(a1, i));
+ __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
+ __ sd(a3, FieldMemOperand(v0, i));
+ __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
+ __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
+ }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->is_generator());
+ __ li(a2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ li(a2, Operand(instr->hydrogen()->shared_info()));
+ __ li(a1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, a2, a1);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ Register input = ToRegister(instr->value());
+ __ push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+
+ Register cmp1 = no_reg;
+ Operand cmp2 = Operand(no_reg);
+
+ Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_),
+ input,
+ instr->type_literal(),
+ &cmp1,
+ &cmp2);
+
+ ASSERT(cmp1.is_valid());
+ ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(instr, final_branch_condition, cmp1, cmp2);
+ }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name,
+ Register* cmp1,
+ Operand* cmp2) {
+ // This function utilizes the delay slot heavily. This is used to load
+ // values that are always usable without depending on the type of the input
+ // register.
+ Condition final_branch_condition = kNoCondition;
+ Register scratch = scratch0();
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
+ __ JumpIfSmi(input, true_label);
+ __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ *cmp1 = input;
+ *cmp2 = Operand(at);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->string_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+ // input is an object so we can load the BitFieldOffset even if we take the
+ // other branch.
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, input, scratch);
+ *cmp1 = scratch;
+ *cmp2 = Operand(SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->boolean_string())) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ *cmp1 = at;
+ *cmp2 = Operand(input);
+ final_branch_condition = eq;
+
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ *cmp1 = at;
+ *cmp2 = Operand(input);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->undefined_string())) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ // The first instruction of JumpIfSmi is an And - it is safe in the delay
+ // slot.
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
+ final_branch_condition = ne;
+
+ } else if (String::Equals(type_name, factory->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, scratch, input);
+ __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+ *cmp1 = input;
+ *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->object_string())) {
+ __ JumpIfSmi(input, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ }
+ Register map = input;
+ __ GetObjectType(input, map, scratch);
+ __ Branch(false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(USE_DELAY_SLOT, false_label,
+ gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // map is still valid, so the BitField can be loaded in delay slot.
+ // Check for undetectable objects => false.
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
+ final_branch_condition = eq;
+
+ } else {
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
+ __ Branch(false_label);
+ }
+
+ return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp());
+
+ EmitIsConstructCall(temp1, scratch0());
+
+ EmitBranch(instr, eq, temp1,
+ Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+ ASSERT(!temp1.is(temp2));
+ // Get the frame pointer for the calling frame.
+ __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne, temp2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&done, hs, sp, Operand(at));
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr);
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Register result = ToRegister(instr->result());
+ Register object = ToRegister(instr->object());
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+
+ Register null_value = a5;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+
+ __ And(at, object, kSmiTagMask);
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ GetObjectType(object, a1, a1);
+ DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+
+ Label use_cache, call_runtime;
+ ASSERT(object.is(a0));
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Branch(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(object);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ ASSERT(result.is(v0));
+ __ LoadRoot(at, Heap::kMetaMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+ __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ jmp(&done);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ ld(result,
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ ld(result,
+ FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object, index);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
+ Label out_of_object, done;
+
+ __ And(scratch, index, Operand(Smi::FromInt(1)));
+ __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
+ __ dsra(index, index, 1);
+
+ __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
+ __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
+ __ Daddu(scratch, object, scratch);
+ __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+ __ Branch(&done);
+
+ __ bind(&out_of_object);
+ __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ Dsubu(scratch, result, scratch);
+ __ ld(result, FieldMemOperand(scratch,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ li(at, scope_info);
+ __ Push(at, ToRegister(instr->function()));
+ CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/mips64/lithium-gap-resolver-mips64.h"
+#include "src/mips64/lithium-mips64.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ RAStatus GetRAState() const {
+ return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
+ }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // LOperand is loaded into dbl_scratch, unless already a double register.
+ DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+ FloatRegister flt_scratch,
+ DoubleRegister dbl_scratch);
+ int32_t ToRepresentation_donotuse(LConstantOperand* op,
+ const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
+
+ bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
+
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int base_offset);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ StrictMode strict_mode() const { return info()->strict_mode(); }
+
+ Scope* scope() const { return scope_; }
+
+ Register scratch0() { return kLithiumScratchReg; }
+ Register scratch1() { return kLithiumScratchReg2; }
+ DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
+
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true,
+ Label* if_false,
+ Handle<String> class_name,
+ Register input,
+ Register temporary,
+ Register temporary2);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ enum A1State {
+ A1_UNINITIALIZED,
+ A1_CONTAINS_TARGET
+ };
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in a1.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ A1State a1_state);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ DoubleRegister ToDoubleRegister(int index) const;
+
+ MemOperand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
+ void EmitIntegerMathAbs(LMathAbs* instr);
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template<class InstrType>
+ void EmitBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2);
+ template<class InstrType>
+ void EmitBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2);
+ template<class InstrType>
+ void EmitFalseBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2);
+ void EmitCmpI(LOperand* left, LOperand* right);
+ void EmitNumberUntagD(Register input,
+ DoubleRegister result,
+ bool allow_undefined_as_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ // Returns two registers in cmp1 and cmp2 that can be used in the
+ // Branch instruction after EmitTypeofIs.
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name,
+ Register* cmp1,
+ Operand* cmp2);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed);
+
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp1, Register temp2);
+
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset,
+ AllocationSiteMode mode);
+ // Emit optimized code for integer division.
+ // Inputs are signed.
+ // All registers are clobbered.
+ // If 'remainder' is no_reg, it is not computed.
+ void EmitSignedIntegerDivisionByConstant(Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment);
+
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub1(codegen_->masm_->isolate(),
+ kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub2(codegen_->masm_->isolate(),
+ kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub1(codegen_->masm_->isolate(),
+ kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub2(codegen_->masm_->isolate(),
+ kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() {}
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/mips64/lithium-gap-resolver-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32, owner->zone()),
+ root_index_(0),
+ in_cycle_(false),
+ saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
+ if (saved_destination_->IsRegister()) {
+ __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+ kLithiumScratchDouble);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ sdc1(kLithiumScratchDouble,
+ cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ sd(source_register, cgen_->ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ld(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ if (!destination_operand.OffsetIsInt16Encodable()) {
+ // 'at' is overwritten while saving the value to the destination.
+ // Therefore we can't use 'at'. It is OK if the read from the source
+ // destroys 'at', since that happens before the value is read.
+ // This uses only a single reg of the double reg-pair.
+ __ ldc1(kLithiumScratchDouble, source_operand);
+ __ sdc1(kLithiumScratchDouble, destination_operand);
+ } else {
+ __ ld(at, source_operand);
+ __ sd(at, destination_operand);
+ }
+ } else {
+ __ ld(kLithiumScratchReg, source_operand);
+ __ sd(kLithiumScratchReg, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsSmi(constant_source)) {
+ __ li(dst, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32(constant_source)) {
+ __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ li(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ Move(result, v);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ if (cgen_->IsSmi(constant_source)) {
+ __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
+ __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+ } else if (cgen_->IsInteger32(constant_source)) {
+ __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
+ __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+ } else {
+ __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
+ __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+ }
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ sdc1(source_register, destination_operand);
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ // kLithiumScratchDouble was used to break the cycle,
+ // but kLithiumScratchReg is free.
+ MemOperand source_high_operand =
+ cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ lw(kLithiumScratchReg, source_operand);
+ __ sw(kLithiumScratchReg, destination_operand);
+ __ lw(kLithiumScratchReg, source_high_operand);
+ __ sw(kLithiumScratchReg, destination_high_operand);
+ } else {
+ __ ldc1(kLithiumScratchDouble, source_operand);
+ __ sdc1(kLithiumScratchDouble, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "src/v8.h"
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/hydrogen-osr.h"
+#include "src/lithium-allocator-inl.h"
+#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/mips64/lithium-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "sll-t";
+ case Token::SAR: return "sra-t";
+ case Token::SHR: return "srl-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ OStringStream os;
+ os << hydrogen()->access() << " <- ";
+ stream->Add(os.c_str());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ // Skip a slot if for a double-width slot.
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateResultInstruction<1>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, a1);
+ LOperand* right_operand = UseFixed(right, a0);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ ASSERT(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+// Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
+ }
+ return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(
+ new(zone()) LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result =
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
+ UseFixed(instr->right(), a1));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result =
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), a0),
+ FixedTemp(a4));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ LOperand* receiver = UseFixed(instr->receiver(), a0);
+ LOperand* length = UseFixed(instr->length(), a2);
+ LOperand* elements = UseFixed(instr->elements(), a3);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), a1);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = TempDoubleRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
+ LOperand* input = UseFixedDouble(instr->value(), f8);
+ LOperand* temp = TempDoubleRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineFixedDouble(result, f4);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LMathFloor* result = new(zone()) LMathFloor(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempDoubleRegister();
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), a1);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ return DefineAsRegister(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
+ AssignEnvironment(mul);
+ }
+ return DefineAsRegister(mul);
+
+ } else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips64r2) {
+ if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
+ HAdd* add = HAdd::cast(instr->uses().value());
+ if (instr == add->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded
+ // into a multiply-add.
+ return NULL;
+ }
+ if (instr == add->right() && !add->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add.
+ return NULL;
+ }
+ }
+ }
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new(zone()) LSubI(left, right);
+ LInstruction* result = DefineAsRegister(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips64r2) {
+ if (instr->left()->IsMul())
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+ }
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), f4) :
+ UseFixed(instr->right(), a2);
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, f0),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
+ temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
+ temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsUndetectableAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), a0);
+ LDateField* result =
+ new(zone()) LDateField(object, FixedTemp(a1), instr->index());
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new(zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+ (kMaxInt == Smi::kMaxValue));
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new(zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ // Revisit this decision, here and 8 lines below.
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+ TempDoubleRegister()));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ LClampTToUint8* result =
+ new(zone()) LClampTToUint8(reg, TempDoubleRegister());
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
+ parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object = UseFixed(instr->global_object(),
+ LoadIC::ReceiverRegister());
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ // Use a temp to check the value in the cell in the case where we perform
+ // a hole check.
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+ : new(zone()) LStoreGlobalCell(value, NULL);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_typed_elements()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged() ||
+ instr->representation().IsInteger32());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ }
+
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_typed_elements()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* val = NULL;
+ LOperand* key = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ val = UseRegister(instr->value());
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ instr->value()->representation().IsInteger32());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
+
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj = UseFixed(instr->object(), a2);
+ LOperand* key = UseFixed(instr->key(), a1);
+ LOperand* val = UseFixed(instr->value(), a0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map = instr->has_transition() &&
+ instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
+
+ LOperand* val;
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new(zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj = UseFixed(instr->object(), a1);
+ LOperand* val = UseFixed(instr->value(), a0);
+
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+ instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), a0);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->enumerable(), a0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_LITHIUM_MIPS_H_
+#define V8_MIPS_LITHIUM_MIPS_H_
+
+#include "src/hydrogen.h"
+#include "src/lithium.h"
+#include "src/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(Allocate) \
+ V(AllocateBlockContext) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckInstanceType) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleToI) \
+ V(DoubleBits) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathExp) \
+ V(MathClz32) \
+ V(MathFloor) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(MultiplyAddD) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ // Iterator interface.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
+ LModI(LOperand* left,
+ LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathFloor(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadFunctionPrototype(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+ virtual void PrintDataTo(StringStream* stream);
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTaggedToI(LOperand* value,
+ LOperand* temp,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
+ bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* new_map_temp) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = new_map_temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckMaps(LOperand* value = NULL) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ allocator_(allocator) { }
+
+ Isolate* isolate() const { return graph_->isolate(); }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ LPlatformChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_MIPS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
+ generating_stub_(false),
+ has_frame_(false) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ lb(dst, src);
+ } else if (r.IsUInteger8()) {
+ lbu(dst, src);
+ } else if (r.IsInteger16()) {
+ lh(dst, src);
+ } else if (r.IsUInteger16()) {
+ lhu(dst, src);
+ } else if (r.IsInteger32()) {
+ lw(dst, src);
+ } else {
+ ld(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ sb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ sh(src, dst);
+ } else if (r.IsInteger32()) {
+ sw(src, dst);
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ sd(src, dst);
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index,
+ Condition cond,
+ Register src1, const Operand& src2) {
+ Branch(2, NegateCondition(cond), src1, src2);
+ ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ sd(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond,
+ Register src1, const Operand& src2) {
+ Branch(2, NegateCondition(cond), src1, src2);
+ sd(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the
+ // stack, so adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ if (num_unsaved > 0) {
+ Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
+ MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ MultiPop(kSafepointSavedRegisters);
+ if (num_unsaved > 0) {
+ Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ Dsubu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i++) {
+ FPURegister reg = FPURegister::FromAllocationIndex(i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i++) {
+ FPURegister reg = FPURegister::FromAllocationIndex(i);
+ ldc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ Daddu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
+ PopSafepointRegisters();
+}
+
+
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+ Register dst) {
+ sd(src, SafepointRegistersAndDoublesSlot(dst));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ sd(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ ld(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the highest encoding,
+ // which means that lowest encodings are closest to the stack pointer.
+ return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ UNIMPLEMENTED_MIPS();
+ // General purpose registers are pushed last on the stack.
+ int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+ return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ ASSERT(cc == eq || cc == ne);
+ And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ Branch(branch, cc, scratch,
+ Operand(ExternalReference::new_space_start(isolate())));
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ ASSERT(!AreAliased(value, dst, t8, object));
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ Daddu(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, t8, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object,
+ dst,
+ value,
+ ra_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(value, Operand(BitCast<int64_t>(kZapValue + 4)));
+ li(dst, Operand(BitCast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ ASSERT(!dst.is(at));
+ ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ dst,
+ Operand(isolate()->factory()->meta_map()));
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ if (emit_debug_code()) {
+ ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ map,
+ Operand(at));
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+
+ Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, at, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
+ li(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ ASSERT(!AreAliased(object, address, value, t8));
+ ASSERT(!AreAliased(object, address, value, t9));
+
+ if (emit_debug_code()) {
+ ld(at, MemOperand(address));
+ Assert(
+ eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
+ }
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ }
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq,
+ &done);
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(address, Operand(BitCast<int64_t>(kZapValue + 12)));
+ li(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ li(t8, Operand(store_buffer));
+ ld(scratch, MemOperand(t8));
+ // Store pointer to buffer and increment buffer top.
+ sd(address, MemOperand(scratch));
+ Daddu(scratch, scratch, kPointerSize);
+ // Write back new top of buffer.
+ sd(scratch, MemOperand(t8));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ ASSERT(!scratch.is(t8));
+ if (and_then == kFallThroughAtEnd) {
+ Branch(&done, eq, t8, Operand(zero_reg));
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Ret(eq, t8, Operand(zero_reg));
+ }
+ push(ra);
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(isolate(), fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(ra);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Allocation support.
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(at));
+ ASSERT(!scratch.is(at));
+
+ // Load current lexical context from the stack frame.
+ ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
+ scratch, Operand(zero_reg));
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ ld(scratch, FieldMemOperand(scratch, offset));
+ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the native_context_map.
+ ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
+ holder_reg, Operand(at));
+ pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ Branch(&same_contexts, eq, scratch, Operand(at));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ push(holder_reg); // Temporarily save holder on the stack.
+ mov(holder_reg, at); // Move at to its holding place.
+ LoadRoot(at, Heap::kNullValueRootIndex);
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull,
+ holder_reg, Operand(at));
+
+ ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
+ holder_reg, Operand(at));
+ // Restore at is not needed. at is reloaded below.
+ pop(holder_reg); // Restore holder.
+ // Restore at to holder's context.
+ ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ ld(scratch, FieldMemOperand(scratch, token_offset));
+ ld(at, FieldMemOperand(at, token_offset));
+ Branch(miss, ne, scratch, Operand(at));
+
+ bind(&same_contexts);
+}
+
+
+void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
+ // First of all we assign the hash seed to scratch.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ SmiUntag(scratch);
+
+ // Xor original key with a seed.
+ xor_(reg0, reg0, scratch);
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ // The algorithm uses 32-bit integer values.
+ nor(scratch, reg0, zero_reg);
+ sll(at, reg0, 15);
+ addu(reg0, scratch, at);
+
+ // hash = hash ^ (hash >> 12);
+ srl(at, reg0, 12);
+ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ sll(at, reg0, 2);
+ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ srl(at, reg0, 4);
+ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ sll(scratch, reg0, 11);
+ sll(at, reg0, 3);
+ addu(reg0, reg0, at);
+ addu(reg0, reg0, scratch);
+
+ // hash = hash ^ (hash >> 16);
+ srl(at, reg0, 16);
+ xor_(reg0, reg0, at);
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // reg1 - Used to hold the capacity mask of the dictionary.
+ //
+ // reg2 - Used for the index into the dictionary.
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
+ Label done;
+
+ GetNumberHash(reg0, reg1);
+
+ // Compute the capacity mask.
+ ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+ SmiUntag(reg1, reg1);
+ Dsubu(reg1, reg1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Use reg2 for index calculations and keep the hash intact in reg0.
+ mov(reg2, reg0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+ }
+ and_(reg2, reg2, reg1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ dsll(at, reg2, 1); // 2x.
+ daddu(reg2, reg2, at); // reg2 = reg2 * 3.
+
+ // Check if the key is identical to the name.
+ dsll(at, reg2, kPointerSizeLog2);
+ daddu(reg2, elements, at);
+
+ ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
+ if (i != kNumberDictionaryProbes - 1) {
+ Branch(&done, eq, key, Operand(at));
+ } else {
+ Branch(miss, ne, key, Operand(at));
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal property.
+ // reg2: elements + (index * kPointerSize).
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
+ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ ld(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
+// ---------------------------------------------------------------------------
+// Instruction macros.
+
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ addu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ addu(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ daddu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ daddiu(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ daddu(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ subu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ subu(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ dsubu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ dsubu(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant == kLoongson) {
+ mult(rs, rt.rm());
+ mflo(rd);
+ } else {
+ mul(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant == kLoongson) {
+ mult(rs, at);
+ mflo(rd);
+ } else {
+ mul(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant == kLoongson) {
+ dmult(rs, rt.rm());
+ mflo(rd);
+ } else {
+ // TODO(yuyin):
+ // dmul(rd, rs, rt.rm());
+ dmult(rs, rt.rm());
+ mflo(rd);
+ }
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant == kLoongson) {
+ dmult(rs, at);
+ mflo(rd);
+ } else {
+ // TODO(yuyin):
+ // dmul(rd, rs, at);
+ dmult(rs, at);
+ mflo(rd);
+ }
+ }
+}
+
+
+void MacroAssembler::Mult(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mult(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ mult(rs, at);
+ }
+}
+
+
+void MacroAssembler::Dmult(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ dmult(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ dmult(rs, at);
+ }
+}
+
+
+void MacroAssembler::Multu(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ multu(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ multu(rs, at);
+ }
+}
+
+
+void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ dmultu(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ dmultu(rs, at);
+ }
+}
+
+
+void MacroAssembler::Div(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ div(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ div(rs, at);
+ }
+}
+
+
+void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ ddiv(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ ddiv(rs, at);
+ }
+}
+
+
+void MacroAssembler::Divu(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ divu(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ divu(rs, at);
+ }
+}
+
+
+void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ ddivu(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ ddivu(rs, at);
+ }
+}
+
+
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ and_(rd, rs, rt.rm());
+ } else {
+ if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ andi(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ and_(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ or_(rd, rs, rt.rm());
+ } else {
+ if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ ori(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ or_(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ xor_(rd, rs, rt.rm());
+ } else {
+ if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ xori(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ xor_(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ nor(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ nor(rd, rs, at);
+ }
+}
+
+
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+ ASSERT(rt.is_reg());
+ ASSERT(!at.is(rs));
+ ASSERT(!at.is(rt.rm()));
+ li(at, -1);
+ xor_(rs, rt.rm(), at);
+}
+
+
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ slti(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ slt(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ sltiu(rd, rs, rt.imm64_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ sltu(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+ if (kArchVariant == kMips64r2) {
+ if (rt.is_reg()) {
+ rotrv(rd, rs, rt.rm());
+ } else {
+ rotr(rd, rs, rt.imm64_);
+ }
+ } else {
+ if (rt.is_reg()) {
+ subu(at, zero_reg, rt.rm());
+ sllv(at, rs, at);
+ srlv(rd, rs, rt.rm());
+ or_(rd, rd, at);
+ } else {
+ if (rt.imm64_ == 0) {
+ srl(rd, rs, 0);
+ } else {
+ srl(at, rs, rt.imm64_);
+ sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
+ or_(rd, rd, at);
+ }
+ }
+ }
+}
+
+
+void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ drotrv(rd, rs, rt.rm());
+ } else {
+ drotr(rd, rs, rt.imm64_);
+ }
+}
+
+
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+ if (kArchVariant == kLoongson) {
+ lw(zero_reg, rs);
+ } else {
+ pref(hint, rs);
+ }
+}
+
+
+// ------------Pseudo-instructions-------------
+
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+ lwr(rd, rs);
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+ swr(rd, rs);
+ swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+// Do 64-bit load from unaligned address. Note this only handles
+// the specific case of 32-bit aligned, but not 64-bit aligned.
+void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
+ // Assert fail if the offset from start of object IS actually aligned.
+ // ONLY use with known misalignment, since there is performance cost.
+ ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
+ // TODO(plind): endian dependency.
+ lwu(rd, rs);
+ lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsll32(scratch, scratch, 0);
+ Daddu(rd, rd, scratch);
+}
+
+
+// Do 64-bit store to unaligned address. Note this only handles
+// the specific case of 32-bit aligned, but not 64-bit aligned.
+void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
+ // Assert fail if the offset from start of object IS actually aligned.
+ // ONLY use with known misalignment, since there is performance cost.
+ ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
+ // TODO(plind): endian dependency.
+ sw(rd, rs);
+ dsrl32(scratch, rd, 0);
+ sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+}
+
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ li(dst, Operand(value), mode);
+ } else {
+ ASSERT(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ li(dst, Operand(cell));
+ ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ li(dst, Operand(value));
+ }
+ }
+}
+
+
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
+ ASSERT(!j.is_reg());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
+ // Normal load of an immediate value which does not need Relocation Info.
+ if (is_int32(j.imm64_)) {
+ if (is_int16(j.imm64_)) {
+ daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
+ } else if (!(j.imm64_ & kHiMask)) {
+ ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
+ } else if (!(j.imm64_ & kImm16Mask)) {
+ lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+ } else {
+ lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ & kImm16Mask));
+ }
+ } else {
+ lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ }
+ } else if (MustUseReg(j.rmode_)) {
+ RecordRelocInfo(j.rmode_, j.imm64_);
+ lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ } else if (mode == ADDRESS_LOAD) {
+ // We always need the same number of instructions as we may need to patch
+ // this code to load another value which may need all 4 instructions.
+ lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ } else {
+ lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ }
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+ int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t stack_offset = num_to_push * kPointerSize;
+
+ Dsubu(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ sd(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPushReversed(RegList regs) {
+ int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t stack_offset = num_to_push * kPointerSize;
+
+ Dsubu(sp, sp, Operand(stack_offset));
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ sd(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ ld(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPopReversed(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ ld(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPushFPU(RegList regs) {
+ int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ Dsubu(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kDoubleSize;
+ sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPushReversedFPU(RegList regs) {
+ int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ Dsubu(sp, sp, Operand(stack_offset));
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kDoubleSize;
+ sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPopFPU(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPopReversedFPU(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+ RegList saved_regs = kJSCallerSaved | ra.bit();
+ MultiPush(saved_regs);
+ AllowExternalCallThatCantCauseGC scope(this);
+
+ // Save to a0 in case address == a4.
+ Move(a0, address);
+ PrepareCallCFunction(2, a4);
+
+ li(a1, instructions * kInstrSize);
+ CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+ MultiPop(saved_regs);
+}
+
+
+void MacroAssembler::Ext(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 33);
+ ext_(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Ins(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size <= 32);
+ ASSERT(size != 0);
+ ins_(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+ FPURegister fs,
+ FPURegister scratch) {
+ // Move the data from fs to t8.
+ mfc1(t8, fs);
+ Cvt_d_uw(fd, t8, scratch);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+ Register rs,
+ FPURegister scratch) {
+ // Convert rs to a FP value in fd (and fd + 1).
+ // We do this by converting rs minus the MSB to avoid sign conversion,
+ // then adding 2^31 to the result (if needed).
+
+ ASSERT(!fd.is(scratch));
+ ASSERT(!rs.is(t9));
+ ASSERT(!rs.is(at));
+
+ // Save rs's MSB to t9.
+ Ext(t9, rs, 31, 1);
+ // Remove rs's MSB.
+ Ext(at, rs, 0, 31);
+ // Move the result to fd.
+ mtc1(at, fd);
+ mthc1(zero_reg, fd);
+
+ // Convert fd to a real FP value.
+ cvt_d_w(fd, fd);
+
+ Label conversion_done;
+
+ // If rs's MSB was 0, it's done.
+ // Otherwise we need to add that to the FP register.
+ Branch(&conversion_done, eq, t9, Operand(zero_reg));
+
+ // Load 2^31 into f20 as its float representation.
+ li(at, 0x41E00000);
+ mtc1(zero_reg, scratch);
+ mthc1(at, scratch);
+ // Add it to fd.
+ add_d(fd, fd, scratch);
+
+ bind(&conversion_done);
+}
+
+
+void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
+ round_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
+ floor_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
+ ceil_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
+ trunc_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_l_ud(FPURegister fd,
+ FPURegister fs,
+ FPURegister scratch) {
+ // Load to GPR.
+ dmfc1(t8, fs);
+ // Reset sign bit.
+ li(at, 0x7fffffffffffffff);
+ and_(t8, t8, at);
+ dmtc1(t8, fs);
+ trunc_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+ FPURegister fs,
+ FPURegister scratch) {
+ Trunc_uw_d(fs, t8, scratch);
+ mtc1(t8, fd);
+}
+
+
+void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
+ trunc_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
+ round_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
+ floor_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
+ ceil_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+ Register rs,
+ FPURegister scratch) {
+ ASSERT(!fd.is(scratch));
+ ASSERT(!rs.is(at));
+
+ // Load 2^31 into scratch as its float representation.
+ li(at, 0x41E00000);
+ mtc1(zero_reg, scratch);
+ mthc1(at, scratch);
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ BranchF(&simple_convert, NULL, lt, fd, scratch);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+ sub_d(scratch, fd, scratch);
+ trunc_w_d(scratch, scratch);
+ mfc1(rs, scratch);
+ Or(rs, rs, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_d(scratch, fd);
+ mfc1(rs, scratch);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (0) { // TODO(plind): find reasonable arch-variant symbol names.
+ madd_d(fd, fr, fs, ft);
+ } else {
+ // Can not change source regs's value.
+ ASSERT(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ add_d(fd, fr, scratch);
+ }
+}
+
+
+void MacroAssembler::BranchF(Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cc == al) {
+ Branch(bd, target);
+ return;
+ }
+
+ ASSERT(nan || target);
+ // Check for unordered (NaN) cases.
+ if (nan) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ }
+
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ switch (cc) {
+ case lt:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case gt:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case ge:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case le:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
+
+ if (bd == PROTECT) {
+ nop();
+ }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation zero(0.0);
+ DoubleRepresentation value_rep(imm);
+ // Handle special values first.
+ bool force_load = dst.is(kDoubleRegZero);
+ if (value_rep == zero && !force_load) {
+ mov_d(dst, kDoubleRegZero);
+ } else if (value_rep == minus_zero && !force_load) {
+ neg_d(dst, kDoubleRegZero);
+ } else {
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(imm, &lo, &hi);
+ // Move the low part of the double into the lower bits of the corresponding
+ // FPU register.
+ if (lo != 0) {
+ li(at, Operand(lo));
+ mtc1(at, dst);
+ } else {
+ mtc1(zero_reg, dst);
+ }
+ // Move the high part of the double into the high bits of the corresponding
+ // FPU register.
+ if (hi != 0) {
+ li(at, Operand(hi));
+ mthc1(at, dst);
+ } else {
+ mthc1(zero_reg, dst);
+ }
+ }
+}
+
+
+void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+ if (kArchVariant == kLoongson) {
+ Label done;
+ Branch(&done, ne, rt, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movz(rd, rs, rt);
+ }
+}
+
+
+void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+ if (kArchVariant == kLoongson) {
+ Label done;
+ Branch(&done, eq, rt, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movn(rd, rs, rt);
+ }
+}
+
+
+void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+ movt(rd, rs, cc);
+}
+
+
+void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+ movf(rd, rs, cc);
+}
+
+
+void MacroAssembler::Clz(Register rd, Register rs) {
+ clz(rd, rs);
+}
+
+
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+ Register result,
+ DoubleRegister double_input,
+ Register scratch,
+ DoubleRegister double_scratch,
+ Register except_flag,
+ CheckForInexactConversion check_inexact) {
+ ASSERT(!result.is(scratch));
+ ASSERT(!double_input.is(double_scratch));
+ ASSERT(!except_flag.is(scratch));
+
+ Label done;
+
+ // Clear the except flag (0 = no exception)
+ mov(except_flag, zero_reg);
+
+ // Test for values that can be exactly represented as a signed 32-bit integer.
+ cvt_w_d(double_scratch, double_input);
+ mfc1(result, double_scratch);
+ cvt_d_w(double_scratch, double_scratch);
+ BranchF(&done, NULL, eq, double_input, double_scratch);
+
+ int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
+
+ if (check_inexact == kDontCheckForInexactConversion) {
+ // Ignore inexact exceptions.
+ except_mask &= ~kFCSRInexactFlagMask;
+ }
+
+ // Save FCSR.
+ cfc1(scratch, FCSR);
+ // Disable FPU exceptions.
+ ctc1(zero_reg, FCSR);
+
+ // Do operation based on rounding mode.
+ switch (rounding_mode) {
+ case kRoundToNearest:
+ Round_w_d(double_scratch, double_input);
+ break;
+ case kRoundToZero:
+ Trunc_w_d(double_scratch, double_input);
+ break;
+ case kRoundToPlusInf:
+ Ceil_w_d(double_scratch, double_input);
+ break;
+ case kRoundToMinusInf:
+ Floor_w_d(double_scratch, double_input);
+ break;
+ } // End of switch-statement.
+
+ // Retrieve FCSR.
+ cfc1(except_flag, FCSR);
+ // Restore FCSR.
+ ctc1(scratch, FCSR);
+ // Move the converted value into the result register.
+ mfc1(result, double_scratch);
+
+ // Check for fpu exceptions.
+ And(except_flag, except_flag, Operand(except_mask));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ Register scratch = at;
+ Register scratch2 = t9;
+
+ // Clear cumulative exception flags and save the FCSR.
+ cfc1(scratch2, FCSR);
+ ctc1(zero_reg, FCSR);
+ // Try a conversion to a signed integer.
+ trunc_w_d(single_scratch, double_input);
+ mfc1(result, single_scratch);
+ // Retrieve and restore the FCSR.
+ cfc1(scratch, FCSR);
+ ctc1(scratch2, FCSR);
+ // Check for overflow and NaNs.
+ And(scratch,
+ scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ Branch(done, eq, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ sdc1(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+ CallStub(&stub);
+
+ Daddu(sp, sp, Operand(kDoubleSize));
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = f12;
+ ASSERT(!result.is(object));
+
+ ldc1(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ DoubleToIStub stub(isolate(),
+ object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label done;
+ ASSERT(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+ TruncateHeapNumberToI(result, object);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+ Register src,
+ int num_least_bits) {
+ // Ext(dst, src, kSmiTagSize, num_least_bits);
+ SmiUntag(dst, src);
+ And(dst, dst, Operand((1 << num_least_bits) - 1));
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+ Register src,
+ int num_least_bits) {
+ ASSERT(!src.is(dst));
+ And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+ BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L, bdslot);
+ } else {
+ Jr(L, bdslot);
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
+ }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L, cond, rs, rt, bdslot);
+ } else {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
+ }
+}
+
+
+void MacroAssembler::Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot) {
+ LoadRoot(at, index);
+ Branch(L, cond, rs, Operand(at), bdslot);
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+ b(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ ASSERT(!rs.is(zero_reg));
+ Register r2 = no_reg;
+ Register scratch = at;
+
+ if (rt.is_reg()) {
+ // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+ // rt.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ r2 = rt.rm_;
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ // No code needs to be emitted.
+ return;
+ } else {
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (rt.imm64_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm64_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm64_)) {
+ slti(scratch, rs, rt.imm64_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm64_ == 0) {
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm64_)) {
+ slti(scratch, rs, rt.imm64_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm64_ == 0) {
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm64_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm64_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm64_)) {
+ sltiu(scratch, rs, rt.imm64_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm64_ == 0) {
+ // No code needs to be emitted.
+ return;
+ } else if (is_int16(rt.imm64_)) {
+ sltiu(scratch, rs, rt.imm64_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm64_ == 0) {
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+ // We use branch_offset as an argument for the branch instructions to be sure
+ // it is called just before generating the branch instruction, as needed.
+
+ b(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset = 0;
+ Register r2 = no_reg;
+ Register scratch = at;
+ if (rt.is_reg()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ r2 = rt.rm_;
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ // No code needs to be emitted.
+ return;
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm64_)) {
+ slti(scratch, rs, rt.imm64_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm64_)) {
+ slti(scratch, rs, rt.imm64_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bne(rs, zero_reg, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm64_)) {
+ sltiu(scratch, rs, rt.imm64_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm64_ == 0) {
+ // No code needs to be emitted.
+ return;
+ } else if (is_int16(rt.imm64_)) {
+ sltiu(scratch, rs, rt.imm64_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ beq(rs, zero_reg, offset);
+ } else {
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L, bdslot);
+ } else {
+ Jalr(L, bdslot);
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
+ }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ } else {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ }
+ }
+}
+
+
+// We need to use a bgezal or bltzal, but they can't be used directly with the
+// slt instructions. We could use sub or add instead but we would miss overflow
+// cases, so we keep slt and add an intermediate third instruction.
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+ BranchDelaySlot bdslot) {
+ bal(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Register r2 = no_reg;
+ Register scratch = at;
+
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ slt(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ bal(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset = 0;
+ Register r2 = no_reg;
+ Register scratch = at;
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ slt(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ daddiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(intptr_t target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+ }
+ // The first instruction of 'li' may be placed in the delay slot.
+ // This is not an issue, t9 is expected to be clobbered anyway.
+ li(t9, Operand(target, rmode));
+ Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ bind(&skip);
+}
+
+
+void MacroAssembler::Jump(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+}
+
+
+int MacroAssembler::CallSize(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ int size = 0;
+
+ if (cond == cc_always) {
+ size += 1;
+ } else {
+ size += 3;
+ }
+
+ if (bd == PROTECT)
+ size += 1;
+
+ return size * kInstrSize;
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT)
+ nop();
+
+ ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ int size = CallSize(t9, cond, rs, rt, bd);
+ return size + 4 * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ int64_t target_int = reinterpret_cast<int64_t>(target);
+ // Must record previous source positions before the
+ // li() generates a new code target.
+ positions_recorder()->WriteRecordedPositions();
+ li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
+ Call(t9, cond, rs, rt, bd);
+ ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ AllowDeferredHandleDereference using_raw_address;
+ return CallSize(reinterpret_cast<Address>(code.location()),
+ rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ Jump(ra, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint64_t imm28;
+ imm28 = jump_address(L);
+ imm28 &= kImm28Mask;
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ j(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint64_t imm64;
+ imm64 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ li(at, Operand(imm64), ADDRESS_LOAD);
+ }
+ jr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint64_t imm64;
+ imm64 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ li(at, Operand(imm64), ADDRESS_LOAD);
+ }
+ jalr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::DropAndRet(int drop) {
+ Ret(USE_DELAY_SLOT);
+ daddiu(sp, sp, drop * kPointerSize);
+}
+
+void MacroAssembler::DropAndRet(int drop,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ // Both Drop and Ret need to be conditional.
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), r1, r2);
+ }
+
+ Drop(drop);
+ Ret();
+
+ if (cond != cc_always) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::Drop(int count,
+ Condition cond,
+ Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ daddiu(sp, sp, count * kPointerSize);
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+
+
+void MacroAssembler::Swap(Register reg1,
+ Register reg2,
+ Register scratch) {
+ if (scratch.is(no_reg)) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ BranchAndLink(target);
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ li(at, Operand(handle));
+ push(at);
+}
+
+
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+ ASSERT(!src.is(scratch));
+ mov(scratch, src);
+ dsrl32(src, src, 0);
+ dsll32(src, src, 0);
+ push(src);
+ dsll32(scratch, scratch, 0);
+ push(scratch);
+}
+
+
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+ ASSERT(!dst.is(scratch));
+ pop(scratch);
+ dsrl32(scratch, scratch, 0);
+ pop(dst);
+ dsrl32(dst, dst, 0);
+ dsll32(dst, dst, 0);
+ or_(dst, dst, scratch);
+}
+
+
+void MacroAssembler::DebugBreak() {
+ PrepareCEntryArgs(0);
+ PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
+ CEntryStub ces(isolate(), 1);
+ ASSERT(AllowThisStubCall(&ces));
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+// ---------------------------------------------------------------------------
+// Exception handling.
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve a0-a3 and s0.
+ // a5-a7 are available. We will build up the handler from the bottom by
+ // pushing on the stack.
+ // Set up the code object (a5) and the state (a6) for pushing.
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+ li(a5, Operand(CodeObject()), CONSTANT_SIZE);
+ li(a6, Operand(state));
+
+ // Push the frame pointer, context, state, and code object.
+ if (kind == StackHandler::JS_ENTRY) {
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ // The second zero_reg indicates no context.
+ // The first zero_reg is the NULL frame pointer.
+ // The operands are reversed to match the order of MultiPush/Pop.
+ Push(zero_reg, zero_reg, a6, a5);
+ } else {
+ MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
+ }
+
+ // Link the current handler as the next handler.
+ li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ld(a5, MemOperand(a6));
+ push(a5);
+ // Set this new handler as the current one.
+ sd(sp, MemOperand(a6));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a1);
+ Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ sd(a1, MemOperand(at));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // v0 = exception, a1 = code object, a2 = state.
+ Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
+ Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ dsrl(a2, a2, StackHandler::kKindWidth); // Handler index.
+ dsll(a2, a2, kPointerSizeLog2);
+ Daddu(a2, a3, a2);
+ ld(a2, MemOperand(a2)); // Smi-tagged offset.
+ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
+ dsra32(t9, a2, 0);
+ Daddu(t9, t9, a1);
+ Jump(t9); // Jump.
+}
+
+
+void MacroAssembler::Throw(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in v0.
+ Move(v0, value);
+
+ // Drop the stack pointer to the top of the top handler.
+ li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ ld(sp, MemOperand(a3));
+
+ // Restore the next handler.
+ pop(a2);
+ sd(a2, MemOperand(a3));
+
+ // Get the code object (a1) and state (a2). Restore the context and frame
+ // pointer.
+ MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ Label done;
+ Branch(&done, eq, cp, Operand(zero_reg));
+ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&done);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in v0.
+ if (!value.is(v0)) {
+ mov(v0, value);
+ }
+ // Drop the stack pointer to the top of the top stack handler.
+ li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ld(sp, MemOperand(a3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind);
+ bind(&fetch_next);
+ ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ And(a2, a2, Operand(StackHandler::KindField::kMask));
+ Branch(&fetch_next, ne, a2, Operand(zero_reg));
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(a2);
+ sd(a2, MemOperand(a3));
+
+ // Get the code object (a1) and state (a2). Clear the context and frame
+ // pointer (0 was saved in the handler).
+ MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9));
+ ASSERT(!scratch2.is(t9));
+ ASSERT(!result.is(t9));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT(0 == (object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ intptr_t top =
+ reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ li(topaddr, Operand(allocation_top));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ ld(result, MemOperand(topaddr));
+ ld(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ ld(t9, MemOperand(topaddr));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ ld(t9, MemOperand(topaddr, limit - top));
+ }
+
+ ASSERT(kPointerSize == kDoubleSize);
+ if (emit_debug_code()) {
+ And(at, result, Operand(kDoubleAlignmentMask));
+ Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ Daddu(scratch2, result, Operand(object_size));
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ sd(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Daddu(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!object_size.is(t9));
+ ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top =
+ reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ li(topaddr, Operand(allocation_top));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ ld(result, MemOperand(topaddr));
+ ld(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ ld(t9, MemOperand(topaddr));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ ld(t9, MemOperand(topaddr, limit - top));
+ }
+
+ ASSERT(kPointerSize == kDoubleSize);
+ if (emit_debug_code()) {
+ And(at, result, Operand(kDoubleAlignmentMask));
+ Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ dsll(scratch2, object_size, kPointerSizeLog2);
+ Daddu(scratch2, result, scratch2);
+ } else {
+ Daddu(scratch2, result, Operand(object_size));
+ }
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ And(t9, scratch2, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ }
+ sd(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Daddu(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ And(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ ld(scratch, MemOperand(scratch));
+ Check(less, kUndoAllocationOfNonAllocatedMemory,
+ object, Operand(scratch));
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ sd(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ dsll(scratch1, length, 1); // Length in bytes, not chars.
+ daddiu(scratch1, scratch1,
+ kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string
+ // while observing object alignment.
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kCharSize == 1);
+ daddiu(scratch1, length,
+ kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate ASCII string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ Branch(&succeed, eq, at, Operand(zero_reg));
+ Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
+
+ bind(&succeed);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* need_gc,
+ TaggingMode tagging_mode) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+ // Store heap number map in the allocated object.
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (tagging_mode == TAG_RESULT) {
+ sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
+ sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst,
+ Register src,
+ RegList temps,
+ int field_count) {
+ ASSERT((temps & dst.bit()) == 0);
+ ASSERT((temps & src.bit()) == 0);
+ // Primitive implementation using only one temporary register.
+
+ Register tmp = no_reg;
+ // Find a temp register in temps list.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if ((temps & (1 << i)) != 0) {
+ tmp.code_ = i;
+ break;
+ }
+ }
+ ASSERT(!tmp.is(no_reg));
+
+ for (int i = 0; i < field_count; i++) {
+ ld(tmp, FieldMemOperand(src, i * kPointerSize));
+ sd(tmp, FieldMemOperand(dst, i * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch) {
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+ // Align src before copying in word size chunks.
+ Branch(&byte_loop, le, length, Operand(kPointerSize));
+ bind(&align_loop_1);
+ And(scratch, src, kPointerSize - 1);
+ Branch(&word_loop, eq, scratch, Operand(zero_reg));
+ lbu(scratch, MemOperand(src));
+ Daddu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Daddu(dst, dst, 1);
+ Dsubu(length, length, Operand(1));
+ Branch(&align_loop_1, ne, length, Operand(zero_reg));
+
+ // Copy bytes in word size chunks.
+ bind(&word_loop);
+ if (emit_debug_code()) {
+ And(scratch, src, kPointerSize - 1);
+ Assert(eq, kExpectingAlignmentForCopyBytes,
+ scratch, Operand(zero_reg));
+ }
+ Branch(&byte_loop, lt, length, Operand(kPointerSize));
+ ld(scratch, MemOperand(src));
+ Daddu(src, src, kPointerSize);
+
+ // TODO(kalmard) check if this can be optimized to use sw in most cases.
+ // Can't use unaligned access - copy byte by byte.
+ sb(scratch, MemOperand(dst, 0));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 4));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 5));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 6));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 7));
+ Daddu(dst, dst, 8);
+
+ Dsubu(length, length, Operand(kPointerSize));
+ Branch(&word_loop);
+
+ // Copy the last bytes if any left.
+ bind(&byte_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&byte_loop_1);
+ lbu(scratch, MemOperand(src));
+ Daddu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Daddu(dst, dst, 1);
+ Dsubu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ Branch(&entry);
+ bind(&loop);
+ sd(filler, MemOperand(start_offset));
+ Daddu(start_offset, start_offset, kPointerSize);
+ bind(&entry);
+ Branch(&loop, lt, start_offset, Operand(end_offset));
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, ls, scratch,
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* fail,
+ int elements_offset) {
+ Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Register mantissa_reg = scratch2;
+ Register exponent_reg = scratch3;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+
+ lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ bind(&have_double_value);
+ // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
+ Daddu(scratch1, scratch1, elements_reg);
+ sw(mantissa_reg, FieldMemOperand(
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+ sizeof(kHoleNanLower32);
+ sw(exponent_reg, FieldMemOperand(scratch1, offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
+ // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
+ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ LoadRoot(at, Heap::kNanValueRootIndex);
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
+ jmp(&have_double_value);
+
+ bind(&smi_value);
+ Daddu(scratch1, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
+ // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
+ Daddu(scratch1, scratch1, scratch2);
+ // scratch1 is now effective address of the double element
+
+ Register untagged_value = elements_reg;
+ SmiUntag(untagged_value, value_reg);
+ mtc1(untagged_value, f2);
+ cvt_d_w(f0, f2);
+ sdc1(f0, MemOperand(scratch1, 0));
+ bind(&done);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to) {
+ ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to) {
+ Branch(branch_to, cond, obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ Label success;
+ CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
+ bind(&success);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+ bind(&fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(at, index);
+ Branch(fail, ne, scratch, Operand(at));
+}
+
+
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, f0); // Reg f0 is o32 ABI FP return value.
+ }
+}
+
+
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ Move(dst, a0, a1);
+ } else {
+ Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
+ }
+}
+
+
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
+ if (!IsMipsSoftFloatABI) {
+ Move(f12, src);
+ } else {
+ Move(a0, a1, src);
+ }
+}
+
+
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
+ if (!IsMipsSoftFloatABI) {
+ Move(f0, src);
+ } else {
+ Move(v0, v1, src);
+ }
+}
+
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (!IsMipsSoftFloatABI) {
+ const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+ if (src2.is(f12)) {
+ ASSERT(!src1.is(fparg2));
+ Move(fparg2, src2);
+ Move(f12, src1);
+ } else {
+ Move(f12, src1);
+ Move(fparg2, src2);
+ }
+ } else {
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes.
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // a0: actual arguments count
+ // a1: function (passed through to callee)
+ // a2: expected arguments count
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(a0));
+ ASSERT(expected.is_immediate() || expected.reg().is(a2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ li(a0, Operand(actual.immediate()));
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ li(a2, Operand(expected.immediate()));
+ }
+ }
+ } else if (actual.is_immediate()) {
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+ li(a0, Operand(actual.immediate()));
+ } else {
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
+ }
+
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ li(a3, Operand(code_constant));
+ daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ Branch(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(®ular_invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code,
+ &done, &definitely_mismatches, flag,
+ call_wrapper);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ ASSERT(function.is(a1));
+ Register expected_reg = a2;
+ Register code_reg = a3;
+ ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // The argument count is stored as int32_t on 64-bit platforms.
+ // TODO(plind): Smi on 32-bit platforms.
+ lw(expected_reg,
+ FieldMemOperand(code_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ ASSERT(function.is(a1));
+
+ // Get the function and setup the context.
+ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ InvokeCode(a3, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ li(a1, function);
+ InvokeFunction(a1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail) {
+ ASSERT(kNotStringTag != 0);
+
+ ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(scratch, scratch, Operand(kIsNotStringMask));
+ Branch(fail, ne, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail) {
+ ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
+}
+
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ bool miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ GetObjectType(function, result, scratch);
+ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ if (miss_on_bound_function) {
+ ld(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // ld(scratch,
+ // FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ // And(scratch, scratch,
+ // Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+ lwu(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ And(scratch, scratch,
+ Operand(1 << SharedFunctionInfo::kBoundFunction));
+ Branch(miss, ne, scratch, Operand(zero_reg));
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+ // Get the prototype or initial map from the function.
+ ld(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ LoadRoot(t8, Heap::kTheHoleValueRootIndex);
+ Branch(miss, eq, result, Operand(t8));
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ GetObjectType(result, scratch, scratch);
+ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+ // Get the prototype from the initial map.
+ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ ld(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
+
+void MacroAssembler::GetObjectType(Register object,
+ Register map,
+ Register type_reg) {
+ ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
+
+
+// -----------------------------------------------------------------------------
+// Runtime calls.
+
+void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
+ cond, r1, r2, bd);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ ASSERT(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()),
+ next_address);
+
+ ASSERT(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
+ lb(t9, MemOperand(t9, 0));
+ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(t9, function_address);
+ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ li(s3, Operand(next_address));
+ ld(s0, MemOperand(s3, kNextOffset));
+ ld(s1, MemOperand(s3, kLimitOffset));
+ ld(s2, MemOperand(s3, kLevelOffset));
+ Daddu(s2, s2, Operand(1));
+ sd(s2, MemOperand(s3, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, a0);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(this, t9);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, a0);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ ld(v0, return_value_operand);
+ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ sd(s0, MemOperand(s3, kNextOffset));
+ if (emit_debug_code()) {
+ ld(a1, MemOperand(s3, kLevelOffset));
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ }
+ Dsubu(s2, s2, Operand(1));
+ sd(s2, MemOperand(s3, kLevelOffset));
+ ld(at, MemOperand(s3, kLimitOffset));
+ Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ ld(a5, MemOperand(at));
+ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ ld(cp, *context_restore_operand);
+ }
+ li(s0, Operand(stack_space));
+ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
+
+ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ sd(s1, MemOperand(s3, kLimitOffset));
+ mov(s0, v0);
+ mov(a0, v0);
+ PrepareCallCFunction(1, s1);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+ 1);
+ mov(v0, s0);
+ jmp(&leave_exit_frame);
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
+}
+
+
+void MacroAssembler::ObjectToDoubleFPURegister(Register object,
+ FPURegister result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags) {
+ Label done;
+ if ((flags & OBJECT_NOT_SMI) == 0) {
+ Label not_smi;
+ JumpIfNotSmi(object, ¬_smi);
+ // Remove smi tag and convert to double.
+ // dsra(scratch1, object, kSmiTagSize);
+ dsra32(scratch1, object, 0);
+ mtc1(scratch1, result);
+ cvt_d_w(result, result);
+ Branch(&done);
+ bind(¬_smi);
+ }
+ // Check for heap number and load double value from it.
+ ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
+ Register exponent = scratch1;
+ Register mask_reg = scratch2;
+ lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ li(mask_reg, HeapNumber::kExponentMask);
+
+ And(exponent, exponent, mask_reg);
+ Branch(not_number, eq, exponent, Operand(mask_reg));
+ }
+ ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+ bind(&done);
+}
+
+
+void MacroAssembler::SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1) {
+ // dsra(scratch1, smi, kSmiTagSize);
+ dsra32(scratch1, smi, 0);
+ mtc1(scratch1, value);
+ cvt_d_w(value, value);
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+
+ if (left.is(right) && dst.is(left)) {
+ ASSERT(!dst.is(t9));
+ ASSERT(!scratch.is(t9));
+ ASSERT(!left.is(t9));
+ ASSERT(!right.is(t9));
+ ASSERT(!overflow_dst.is(t9));
+ mov(t9, right);
+ right = t9;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ daddu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ daddu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ daddu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!scratch.is(left));
+ ASSERT(!scratch.is(right));
+
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ mov(overflow_dst, zero_reg);
+ return;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ dsubu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ dsubu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ dsubu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. v0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference(f, isolate()));
+ CEntryStub stub(isolate(), 1, save_doubles);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ BranchDelaySlot bd) {
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ext);
+
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(num_arguments);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ BranchDelaySlot bd) {
+ PrepareCEntryFunction(builtin);
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ al,
+ zero_reg,
+ Operand(zero_reg),
+ bd);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ GetBuiltinEntry(t9, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(t9));
+ Call(t9);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(t9);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ ld(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(a1));
+ GetBuiltinFunction(a1, id);
+ // Load the code entry point from the builtins object.
+ ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch1, Operand(value));
+ li(scratch2, Operand(ExternalReference(counter)));
+ sd(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ ld(scratch1, MemOperand(scratch2));
+ Daddu(scratch1, scratch1, Operand(value));
+ sd(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ ld(scratch1, MemOperand(scratch2));
+ Dsubu(scratch1, scratch1, Operand(value));
+ sd(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Debugging.
+
+void MacroAssembler::Assert(Condition cc, BailoutReason reason,
+ Register rs, Operand rt) {
+ if (emit_debug_code())
+ Check(cc, reason, rs, rt);
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ ASSERT(!elements.is(at));
+ Label ok;
+ push(elements);
+ ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ bind(&ok);
+ pop(elements);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cc, BailoutReason reason,
+ Register rs, Operand rt) {
+ Label L;
+ Branch(&L, cc, rs, rt);
+ Abort(reason);
+ // Will not return here.
+ bind(&L);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
+#endif
+
+ li(a0, Operand(Smi::FromInt(reason)));
+ push(a0);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ Move(dst, cp);
+ }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ ld(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ ld(scratch,
+ MemOperand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ ld(at, FieldMemOperand(scratch, offset));
+ Branch(no_map_match, ne, map_in_out, Operand(at));
+
+ // Use the transitioned cached map.
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ ld(map_in_out, FieldMemOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ ld(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ ld(function, FieldMemOperand(function,
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ ld(function, MemOperand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ Branch(&ok);
+ bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ bind(&ok);
+ }
+}
+
+
+void MacroAssembler::StubPrologue() {
+ Push(ra, fp, cp);
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ li(t9,
+ Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
+ ADDRESS_LOAD);
+ nop(); // Prevent jalr to jal optimization.
+ jalr(t9, a0);
+ nop(); // Branch delay slot nop.
+ nop(); // Pad the empty space.
+ } else {
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ daddiu(sp, sp, -5 * kPointerSize);
+ li(t8, Operand(Smi::FromInt(type)));
+ li(t9, Operand(CodeObject()), CONSTANT_SIZE);
+ sd(ra, MemOperand(sp, 4 * kPointerSize));
+ sd(fp, MemOperand(sp, 3 * kPointerSize));
+ sd(cp, MemOperand(sp, 2 * kPointerSize));
+ sd(t8, MemOperand(sp, 1 * kPointerSize));
+ sd(t9, MemOperand(sp, 0 * kPointerSize));
+ // Adjust FP to point to saved FP.
+ Daddu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ mov(sp, fp);
+ ld(fp, MemOperand(sp, 0 * kPointerSize));
+ ld(ra, MemOperand(sp, 1 * kPointerSize));
+ daddiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ int stack_space) {
+ // Set up the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 (==kSPOffset)] - sp of the called function
+ // [fp - 2 (==kCodeOffset)] - CodeObject
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
+
+ // Save registers.
+ daddiu(sp, sp, -4 * kPointerSize);
+ sd(ra, MemOperand(sp, 3 * kPointerSize));
+ sd(fp, MemOperand(sp, 2 * kPointerSize));
+ daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
+
+ if (emit_debug_code()) {
+ sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+
+ // Accessed from ExitFrame::code_slot.
+ li(t8, Operand(CodeObject()), CONSTANT_SIZE);
+ sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
+ // Save the frame pointer and the context in top.
+ li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ sd(fp, MemOperand(t8));
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ sd(cp, MemOperand(t8));
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack is already aligned to 0 modulo 8 for stores with sdc1.
+ int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
+ int space = kNumOfSavedRegisters * kDoubleSize ;
+ Dsubu(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by the DirectCEntryStub to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ ASSERT(stack_space >= 0);
+ Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ daddiu(at, sp, kPointerSize);
+ sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+ Register argument_count,
+ bool restore_context,
+ bool do_return) {
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
+ Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
+ kNumOfSavedRegisters * kDoubleSize));
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ ldc1(reg, MemOperand(t8, i * kDoubleSize));
+ }
+ }
+
+ // Clear top frame.
+ li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ sd(zero_reg, MemOperand(t8));
+
+ // Restore current context from top and clear it in debug mode.
+ if (restore_context) {
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ ld(cp, MemOperand(t8));
+ }
+#ifdef DEBUG
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ sd(a3, MemOperand(t8));
+#endif
+
+ // Pop the arguments, restore registers, and return.
+ mov(sp, fp); // Respect ABI stack constraint.
+ ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+
+ if (argument_count.is_valid()) {
+ dsll(t8, argument_count, kPointerSizeLog2);
+ daddu(sp, sp, t8);
+ }
+
+ if (do_return) {
+ Ret(USE_DELAY_SLOT);
+ // If returning, the instruction in the delay slot will be the addiu below.
+ }
+ daddiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ // dsll(scratch1, length, kSmiTagSize);
+ dsll32(scratch1, length, 0);
+ LoadRoot(scratch2, map_index);
+ sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ li(scratch1, Operand(String::kEmptyHashField));
+ sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+ sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one Mips
+ // platform for another Mips platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_MIPS
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_MIPS
+}
+
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (emit_debug_code()) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ ASSERT(IsPowerOf2(frame_alignment));
+ andi(at, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop("Unexpected stack alignment");
+ bind(&alignment_as_expected);
+ }
+ }
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero) {
+ Dsubu(scratch, reg, Operand(1));
+ Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
+ scratch, Operand(zero_reg));
+ and_(at, scratch, reg); // In the delay slot.
+ Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+ ASSERT(!reg.is(overflow));
+ mov(overflow, reg); // Save original value.
+ SmiTag(reg);
+ xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst,
+ Register src,
+ Register overflow) {
+ if (dst.is(src)) {
+ // Fall back to slower case.
+ SmiTagCheckOverflow(dst, overflow);
+ } else {
+ ASSERT(!dst.is(src));
+ ASSERT(!dst.is(overflow));
+ ASSERT(!src.is(overflow));
+ SmiTag(dst, src);
+ xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
+ }
+}
+
+
+void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
+ if (SmiValuesAre32Bits()) {
+ lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ } else {
+ lw(dst, src);
+ SmiUntag(dst);
+ }
+}
+
+
+void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
+ lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ dsll(dst, dst, scale);
+ } else {
+ lw(dst, src);
+ ASSERT(scale >= kSmiTagSize);
+ sll(dst, dst, scale - kSmiTagSize);
+ }
+}
+
+
+// Returns 2 values: the Smi and a scaled version of the int within the Smi.
+void MacroAssembler::SmiLoadWithScale(Register d_smi,
+ Register d_scaled,
+ MemOperand src,
+ int scale) {
+ if (SmiValuesAre32Bits()) {
+ ld(d_smi, src);
+ dsra(d_scaled, d_smi, kSmiShift - scale);
+ } else {
+ lw(d_smi, src);
+ ASSERT(scale >= kSmiTagSize);
+ sll(d_scaled, d_smi, scale - kSmiTagSize);
+ }
+}
+
+
+// Returns 2 values: the untagged Smi (int32) and scaled version of that int.
+void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
+ Register d_scaled,
+ MemOperand src,
+ int scale) {
+ if (SmiValuesAre32Bits()) {
+ lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
+ dsll(d_scaled, d_int, scale);
+ } else {
+ lw(d_int, src);
+ // Need both the int and the scaled in, so use two instructions.
+ SmiUntag(d_int);
+ sll(d_scaled, d_int, scale);
+ }
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst,
+ Register src,
+ Label* smi_case) {
+ // ASSERT(!dst.is(src));
+ JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
+ SmiUntag(dst, src);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
+ Register src,
+ Label* non_smi_case) {
+ // ASSERT(!dst.is(src));
+ JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
+ SmiUntag(dst, src);
+}
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Register scratch,
+ BranchDelaySlot bd) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Register scratch,
+ BranchDelaySlot bd) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+ Register reg2,
+ Label* on_not_both_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ // TODO(plind): Find some better to fix this assert issue.
+#if defined(__APPLE__)
+ ASSERT_EQ(1, kSmiTagMask);
+#else
+ ASSERT_EQ((uint64_t)1, kSmiTagMask);
+#endif
+ or_(at, reg1, reg2);
+ JumpIfNotSmi(at, on_not_both_smi);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+ Register reg2,
+ Label* on_either_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ // TODO(plind): Find some better to fix this assert issue.
+#if defined(__APPLE__)
+ ASSERT_EQ(1, kSmiTagMask);
+#else
+ ASSERT_EQ((uint64_t)1, kSmiTagMask);
+#endif
+ // Both Smi tags must be 1 (not Smi).
+ and_(at, reg1, reg2);
+ JumpIfSmi(at, on_either_smi);
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(ne, kOperandIsASmi, at, Operand(zero_reg));
+ }
+}
+
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(eq, kOperandIsASmi, at, Operand(zero_reg));
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, a4);
+ Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
+ push(object);
+ ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, a4);
+ Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
+ push(object);
+ ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
+ pop(object);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ push(object);
+ ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+ pop(object);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ ASSERT(!reg.is(at));
+ LoadRoot(at, index);
+ Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
+ }
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ // dsra(mask, mask, kSmiTagSize + 1);
+ dsra32(mask, mask, 1);
+ Daddu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ Daddu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ ld(scratch2, MemOperand(scratch1, kPointerSize));
+ ld(scratch1, MemOperand(scratch1, 0));
+ Xor(scratch1, scratch1, Operand(scratch2));
+ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ Daddu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ Branch(not_found);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ // dsra(scratch, object, 1); // Shift away the tag.
+ dsra32(scratch, scratch, 0);
+ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ dsll(scratch, scratch, kPointerSizeLog2 + 1);
+ Daddu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ And(scratch1, first, Operand(second));
+ JumpIfSmi(scratch1, failure);
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
+ andi(scratch2, second, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, Operand(kFlatAsciiStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+}
+
+
+static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ num_reg_arguments += 2 * num_double_arguments;
+
+ // O32: Up to four simple arguments are passed in registers a0..a3.
+ // N64: Up to eight simple arguments are passed in registers a0..a7.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ stack_passed_words += kCArgSlotCount;
+ return stack_passed_words;
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string, at);
+ Check(ne, kNonObject, at, Operand(zero_reg));
+
+ ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ andi(at, at, kStringRepresentationMask | kStringEncodingMask);
+ li(scratch, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType, at, Operand(scratch));
+
+ // TODO(plind): requires Smi size check code for mips32.
+
+ ld(at, FieldMemOperand(string, String::kLengthOffset));
+ Check(lt, kIndexIsTooLarge, index, Operand(at));
+
+ ASSERT(Smi::FromInt(0) == 0);
+ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
+ // O32: Up to four simple arguments are passed in registers a0..a3.
+ // Those four arguments must have reserved argument slots on the stack for
+ // mips, even though those argument slots are not normally used.
+ // Both ABIs: Remaining arguments are pushed on the stack, above (higher
+ // address than) the (O32) argument slots. (arg slot calculation handled by
+ // CalculateStackPassedWords()).
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment));
+ sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ li(t8, Operand(function));
+ CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ ASSERT(has_frame());
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+ // The argument stots are presumed to have been set up by
+ // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
+
+#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+ if (emit_debug_code()) {
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ Label alignment_as_expected;
+ And(at, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop("Unexpected alignment in CallCFunction");
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_MIPS
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+
+ if (!function.is(t9)) {
+ mov(t9, function);
+ function = t9;
+ }
+
+ Call(function);
+
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
+
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+
+#undef BRANCH_ARGS_CHECK
+
+
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+ Register scratch,
+ Register new_value) {
+ lwu(scratch, MemOperand(li_location));
+ // At this point scratch is a lui(at, ...) instruction.
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, kTheInstructionToPatchShouldBeALui,
+ scratch, Operand(LUI));
+ lwu(scratch, MemOperand(li_location));
+ }
+ dsrl32(t9, new_value, 0);
+ Ins(scratch, t9, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location));
+
+ lwu(scratch, MemOperand(li_location, kInstrSize));
+ // scratch is now ori(at, ...).
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, kTheInstructionToPatchShouldBeAnOri,
+ scratch, Operand(ORI));
+ lwu(scratch, MemOperand(li_location, kInstrSize));
+ }
+ dsrl(t9, new_value, kImm16Bits);
+ Ins(scratch, t9, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location, kInstrSize));
+
+ lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+ // scratch is now ori(at, ...).
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, kTheInstructionToPatchShouldBeAnOri,
+ scratch, Operand(ORI));
+ lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+ }
+
+ Ins(scratch, new_value, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location, kInstrSize * 3));
+
+ // Update the I-cache so the new lui and ori can be executed.
+ FlushICache(li_location, 4);
+}
+
+void MacroAssembler::GetRelocatedValue(Register li_location,
+ Register value,
+ Register scratch) {
+ lwu(value, MemOperand(li_location));
+ if (emit_debug_code()) {
+ And(value, value, kOpcodeMask);
+ Check(eq, kTheInstructionShouldBeALui,
+ value, Operand(LUI));
+ lwu(value, MemOperand(li_location));
+ }
+
+ // value now holds a lui instruction. Extract the immediate.
+ andi(value, value, kImm16Mask);
+ dsll32(value, value, kImm16Bits);
+
+ lwu(scratch, MemOperand(li_location, kInstrSize));
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, kTheInstructionShouldBeAnOri,
+ scratch, Operand(ORI));
+ lwu(scratch, MemOperand(li_location, kInstrSize));
+ }
+ // "scratch" now holds an ori instruction. Extract the immediate.
+ andi(scratch, scratch, kImm16Mask);
+ dsll32(scratch, scratch, 0);
+
+ or_(value, value, scratch);
+
+ lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, kTheInstructionShouldBeAnOri,
+ scratch, Operand(ORI));
+ lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+ }
+ // "scratch" now holds an ori instruction. Extract the immediate.
+ andi(scratch, scratch, kImm16Mask);
+ dsll(scratch, scratch, kImm16Bits);
+
+ or_(value, value, scratch);
+ // Sign extend extracted address.
+ dsra(value, value, kImm16Bits);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met) {
+ // TODO(plind): Fix li() so we can use constant embedded inside And().
+ // And(scratch, object, Operand(~Page::kPageAlignmentMask));
+ li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK
+ And(scratch, object, at);
+ ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ li(scratch, Operand(map));
+ ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ And(scratch, scratch, Operand(Map::Deprecated::kMask));
+ Branch(if_deprecated, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color;
+ // Note that we are using a 4-byte aligned 8-byte load.
+ Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ And(t8, t9, Operand(mask_scratch));
+ Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
+ // Shift left 1 by adding.
+ Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
+ And(t8, t9, Operand(mask_scratch));
+ Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
+
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ ASSERT(!AreAliased(value, scratch, t8, no_reg));
+ Label is_data_object;
+ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ Branch(&is_data_object, eq, t8, Operand(scratch));
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ Branch(not_data_object, ne, t8, Operand(zero_reg));
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ // addr_reg is divided into fields:
+ // |63 page base 20|19 high 8|7 shift 3|2 0|
+ // 'high' gives the index of the cell holding color bits for the object.
+ // 'shift' gives the offset in the cell for this object's color.
+ And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+ Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ dsll(t8, t8, Bitmap::kBytesPerCellLog2);
+ Daddu(bitmap_reg, bitmap_reg, t8);
+ li(t8, Operand(1));
+ dsllv(mask_reg, t8, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ // Note that we are using a 4-byte aligned 8-byte load.
+ Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ And(t8, mask_scratch, load_scratch);
+ Branch(&done, ne, t8, Operand(zero_reg));
+
+ if (emit_debug_code()) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // sll may overflow, making the check conservative.
+ dsll(t8, mask_scratch, 1);
+ And(t8, load_scratch, t8);
+ Branch(&ok, eq, t8, Operand(zero_reg));
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object;
+
+ // Check for heap-number
+ ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ {
+ Label skip;
+ Branch(&skip, ne, t8, Operand(map));
+ li(length, HeapNumber::kSize);
+ Branch(&is_data_object);
+ bind(&skip);
+ }
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ And(t8, instance_type, Operand(kExternalStringTag));
+ {
+ Label skip;
+ Branch(&skip, eq, t8, Operand(zero_reg));
+ li(length, ExternalString::kSize);
+ Branch(&is_data_object);
+ bind(&skip);
+ }
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
+ And(t8, instance_type, Operand(kStringEncodingMask));
+ {
+ Label skip;
+ Branch(&skip, ne, t8, Operand(zero_reg));
+ // Adjust length for UC16.
+ dsll(t9, t9, 1);
+ bind(&skip);
+ }
+ Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ ASSERT(!length.is(t8));
+ And(length, length, Operand(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Or(t8, t8, Operand(mask_scratch));
+ Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+ Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Daddu(t8, t8, Operand(length));
+ Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+ Register empty_fixed_array_value = a6;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+ mov(a2, a0);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+ EnumLength(a3, a1);
+ Branch(
+ call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+
+ jmp(&start);
+
+ bind(&next);
+ ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLength(a3, a1);
+ Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+
+ bind(&start);
+
+ // Check that there are no elements. Register a2 contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
+ Branch(call_runtime, ne, a2, Operand(at));
+
+ bind(&no_elements);
+ ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ Branch(&next, ne, a2, Operand(null_value));
+}
+
+
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+ ASSERT(!output_reg.is(input_reg));
+ Label done;
+ li(output_reg, Operand(255));
+ // Normal branch: nop in delay slot.
+ Branch(&done, gt, input_reg, Operand(output_reg));
+ // Use delay slot in this branch.
+ Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+ mov(output_reg, zero_reg); // In delay slot.
+ mov(output_reg, input_reg); // Value is in range 0..255.
+ bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg) {
+ Label above_zero;
+ Label done;
+ Label in_bounds;
+
+ Move(temp_double_reg, 0.0);
+ BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+ // Double value is less than zero, NaN or Inf, return 0.
+ mov(result_reg, zero_reg);
+ Branch(&done);
+
+ // Double value is >= 255, return 255.
+ bind(&above_zero);
+ Move(temp_double_reg, 255.0);
+ BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+ li(result_reg, Operand(255));
+ Branch(&done);
+
+ // In 0-255 range, round and truncate.
+ bind(&in_bounds);
+ cvt_w_d(temp_double_reg, input_reg);
+ mfc1(result_reg, temp_double_reg);
+ bind(&done);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond,
+ Label* allocation_memento_present) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ Daddu(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+ li(at, Operand(new_space_allocation_top));
+ ld(at, MemOperand(at));
+ Branch(no_memento_found, gt, scratch_reg, Operand(at));
+ ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ if (allocation_memento_present) {
+ Branch(allocation_memento_present, cond, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
+ }
+}
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contained elements pointer.
+ Move(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
+ ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Branch(&loop_again, ne, current, Operand(factory->null_value()));
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
+ : address_(address),
+ size_(instructions * Assembler::kInstrSize),
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ if (flush_cache_ == FLUSH) {
+ CpuFeatures::FlushICache(address_, size_);
+ }
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr instr) {
+ masm()->emit(instr);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+ // masm()->emit(reinterpret_cast<Instr>(addr));
+}
+
+
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ ASSERT(Assembler::IsBranch(instr));
+ uint32_t opcode = Assembler::GetOpcodeField(instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions (with opcode being the branch type).
+ // There are some special cases (see Assembler::IsBranch()) so extending this
+ // would be tricky.
+ ASSERT(opcode == BEQ ||
+ opcode == BNE ||
+ opcode == BLEZ ||
+ opcode == BGTZ ||
+ opcode == BEQL ||
+ opcode == BNEL ||
+ opcode == BLEZL ||
+ opcode == BGTZL);
+ opcode = (cond == eq) ? BEQ : BNE;
+ instr = (instr & ~kOpcodeMask) | opcode;
+ masm_.emit(instr);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(at));
+ ASSERT(!result.is(at));
+ MultiplierAndShift ms(divisor);
+ li(at, Operand(ms.multiplier()));
+ Mult(dividend, Operand(at));
+ mfhi(result);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ Addu(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ Subu(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) sra(result, result, ms.shift());
+ srl(at, dividend, 31);
+ Addu(result, result, Operand(at));
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
+#include "src/assembler.h"
+#include "src/globals.h"
+#include "src/mips64/assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+// Reserved Register Usage Summary.
+//
+// Registers t8, t9, and at are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// Per the MIPS ABI, register t9 must be used for indirect function call
+// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// MIPS generated code calls C code, it must be via t9 register.
+
+
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode {
+ EMIT_RETURN = true,
+ NO_EMIT_RETURN = false
+};
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
+
+// Flags used for the ObjectToDoubleFPURegister function.
+enum ObjectToDoubleFlags {
+ // No special flags.
+ NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+ // Object is known to be a non smi.
+ OBJECT_NOT_SMI = 1 << 0,
+ // Don't load NaNs or infinities, branch to the non number case instead.
+ AVOID_NANS_AND_INFINITIES = 1 << 1
+};
+
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot {
+ USE_DELAY_SLOT,
+ PROTECT
+};
+
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 16 bits, then
+ // optimize the li to use a single instruction, rather than lui/ori/dsll
+ // sequence.
+ OPTIMIZE_SIZE = 0,
+ // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
+ // could be loaded with just one, so that this value is patchable later.
+ CONSTANT_SIZE = 1,
+ // For address loads only 4 instruction are required. Used to mark
+ // constant load that will be used as address without relocation
+ // information. It ensures predictable code size, so specific sites
+ // in code are patchable.
+ ADDRESS_LOAD = 2
+};
+
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
+ return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+// TODO(plind): Currently ONLY used for O32. Should be fixed for
+// n64, and used in RegExp code, and other places
+// with more than 8 arguments.
+inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+ // Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
+
+ // Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target) { \
+ Name(target, bd); \
+ } \
+ void Name(target_type target, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ COND_TYPED_ARGS) { \
+ Name(target, COND_ARGS, bd); \
+ }
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+ DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+
+ DECLARE_BRANCH_PROTOTYPES(Branch)
+ DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+ DECLARE_BRANCH_PROTOTYPES(BranchShort)
+
+#undef DECLARE_BRANCH_PROTOTYPES
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ static int CallSize(Register target, COND_ARGS);
+ void Call(Register target, COND_ARGS);
+ static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ COND_ARGS);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ COND_ARGS);
+ void Ret(COND_ARGS);
+ inline void Ret(BranchDelaySlot bd, Condition cond = al,
+ Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+ Ret(cond, rs, rt, bd);
+ }
+
+ void Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot = PROTECT);
+
+#undef COND_ARGS
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ // Trivial case of DropAndRet that utilizes the delay slot and only emits
+ // 2 instructions.
+ void DropAndRet(int drop);
+
+ void DropAndRet(int drop,
+ Condition cond,
+ Register reg,
+ const Operand& op);
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
+ void Call(Label* target);
+
+ inline void Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
+ }
+
+ inline void Move(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_d(dst, src);
+ }
+ }
+
+ inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+ mfc1(dst_low, src);
+ mfhc1(dst_high, src);
+ }
+
+ inline void FmoveHigh(Register dst_high, FPURegister src) {
+ mfhc1(dst_high, src);
+ }
+
+ inline void FmoveLow(Register dst_low, FPURegister src) {
+ mfc1(dst_low, src);
+ }
+
+ inline void Move(FPURegister dst, Register src_low, Register src_high) {
+ mtc1(src_low, dst);
+ mthc1(src_high, dst);
+ }
+
+ // Conditional move.
+ void Move(FPURegister dst, double imm);
+ void Movz(Register rd, Register rs, Register rt);
+ void Movn(Register rd, Register rs, Register rt);
+ void Movt(Register rd, Register rs, uint16_t cc = 0);
+ void Movf(Register rd, Register rs, uint16_t cc = 0);
+
+ void Clz(Register rd, Register rs);
+
+ // Jump unconditionally to given label.
+ // We NEED a nop in the branch delay slot, as it used by v8, for example in
+ // CodeGenerator::ProcessDeferred().
+ // Currently the branch delay slot is filled by the MacroAssembler.
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) {
+ Branch(L);
+ }
+
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index);
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index,
+ Condition cond, Register src1, const Operand& src2);
+
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+ void StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond, Register src1, const Operand& src2);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object,
+ Register value,
+ Register address);
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ ra_status,
+ save_fp,
+ remembered_set_action,
+ smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+ void GetNumberHash(Register reg0, Register scratch);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2);
+
+
+ inline void MarkCode(NopMarkerTypes type) {
+ nop(type);
+ }
+
+ // Check if the given instruction is a 'type' marker.
+ // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+ // nop(type)). These instructions are generated to mark special location in
+ // the code, like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
+ }
+
+
+ static inline int GetCodeMarker(Instr instr) {
+ uint32_t opcode = ((instr & kOpcodeMask));
+ uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+ uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+ uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+ // Return <n> if we have a sll zero_reg, zero_reg, n
+ // else return -1.
+ bool sllzz = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+ int type =
+ (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
+ ASSERT((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
+ }
+
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support.
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
+ void AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // ---------------------------------------------------------------------------
+ // Instruction macros.
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rs, const Operand& rt); \
+ void instr(Register rd, Register rs, Register rt) { \
+ instr(rd, rs, Operand(rt)); \
+ } \
+ void instr(Register rs, Register rt, int32_t j) { \
+ instr(rs, rt, Operand(j)); \
+ }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { \
+ instr(rs, Operand(rt)); \
+ } \
+ void instr(Register rs, int32_t j) { \
+ instr(rs, Operand(j)); \
+ }
+
+ DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Daddu);
+ DEFINE_INSTRUCTION(Subu);
+ DEFINE_INSTRUCTION(Dsubu);
+ DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Dmul);
+ DEFINE_INSTRUCTION2(Mult);
+ DEFINE_INSTRUCTION2(Dmult);
+ DEFINE_INSTRUCTION2(Multu);
+ DEFINE_INSTRUCTION2(Dmultu);
+ DEFINE_INSTRUCTION2(Div);
+ DEFINE_INSTRUCTION2(Ddiv);
+ DEFINE_INSTRUCTION2(Divu);
+ DEFINE_INSTRUCTION2(Ddivu);
+
+ DEFINE_INSTRUCTION(And);
+ DEFINE_INSTRUCTION(Or);
+ DEFINE_INSTRUCTION(Xor);
+ DEFINE_INSTRUCTION(Nor);
+ DEFINE_INSTRUCTION2(Neg);
+
+ DEFINE_INSTRUCTION(Slt);
+ DEFINE_INSTRUCTION(Sltu);
+
+ // MIPS32 R2 instruction macro.
+ DEFINE_INSTRUCTION(Ror);
+ DEFINE_INSTRUCTION(Dror);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+
+ void Pref(int32_t hint, const MemOperand& rs);
+
+
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
+
+ void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+
+ void Ulw(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+ void Uld(Register rd, const MemOperand& rs, Register scratch = at);
+ void Usd(Register rd, const MemOperand& rs, Register scratch = at);
+
+ // Load int32 in the rd register.
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+ void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPushReversed(RegList regs);
+
+ void MultiPushFPU(RegList regs);
+ void MultiPushReversedFPU(RegList regs);
+
+ void push(Register src) {
+ Daddu(sp, sp, Operand(-kPointerSize));
+ sd(src, MemOperand(sp, 0));
+ }
+ void Push(Register src) { push(src); }
+
+ // Push a handle.
+ void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Dsubu(sp, sp, Operand(2 * kPointerSize));
+ sd(src1, MemOperand(sp, 1 * kPointerSize));
+ sd(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ sd(src1, MemOperand(sp, 2 * kPointerSize));
+ sd(src2, MemOperand(sp, 1 * kPointerSize));
+ sd(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Dsubu(sp, sp, Operand(4 * kPointerSize));
+ sd(src1, MemOperand(sp, 3 * kPointerSize));
+ sd(src2, MemOperand(sp, 2 * kPointerSize));
+ sd(src3, MemOperand(sp, 1 * kPointerSize));
+ sd(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ void Push(Register src, Condition cond, Register tst1, Register tst2) {
+ // Since we don't have conditional execution we use a Branch.
+ Branch(3, cond, tst1, Operand(tst2));
+ Dsubu(sp, sp, Operand(kPointerSize));
+ sd(src, MemOperand(sp, 0));
+ }
+
+ void PushRegisterAsTwoSmis(Register src, Register scratch = at);
+ void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPopReversed(RegList regs);
+
+ void MultiPopFPU(RegList regs);
+ void MultiPopReversedFPU(RegList regs);
+
+ void pop(Register dst) {
+ ld(dst, MemOperand(sp, 0));
+ Daddu(sp, sp, Operand(kPointerSize));
+ }
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ ASSERT(!src1.is(src2));
+ ld(src2, MemOperand(sp, 0 * kPointerSize));
+ ld(src1, MemOperand(sp, 1 * kPointerSize));
+ Daddu(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ ld(src3, MemOperand(sp, 0 * kPointerSize));
+ ld(src2, MemOperand(sp, 1 * kPointerSize));
+ ld(src1, MemOperand(sp, 2 * kPointerSize));
+ Daddu(sp, sp, 3 * kPointerSize);
+ }
+
+ void Pop(uint32_t count = 1) {
+ Daddu(sp, sp, Operand(count * kPointerSize));
+ }
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+ // from C.
+ // Does not handle errors.
+ void FlushICache(Register address, unsigned instructions);
+
+ // MIPS64 R2 instruction macro.
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+
+ // Convert double to unsigned long.
+ void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
+
+ void Trunc_l_d(FPURegister fd, FPURegister fs);
+ void Round_l_d(FPURegister fd, FPURegister fs);
+ void Floor_l_d(FPURegister fd, FPURegister fs);
+ void Ceil_l_d(FPURegister fd, FPURegister fs);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+
+ void Trunc_w_d(FPURegister fd, FPURegister fs);
+ void Round_w_d(FPURegister fd, FPURegister fs);
+ void Floor_w_d(FPURegister fd, FPURegister fs);
+ void Ceil_w_d(FPURegister fd, FPURegister fs);
+
+ void Madd_d(FPURegister fd,
+ FPURegister fr,
+ FPURegister fs,
+ FPURegister ft,
+ FPURegister scratch);
+
+ // Wrapper function for the different cmp/branch types.
+ void BranchF(Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+ inline void BranchF(BranchDelaySlot bd,
+ Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2) {
+ BranchF(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ // Truncates a double using a specific rounding mode, and writes the value
+ // to the result register.
+ // The except_flag will contain any exceptions caused by the instruction.
+ // If check_inexact is kDontCheckForInexactConversion, then the inexact
+ // exception is masked.
+ void EmitFPUTruncate(FPURoundingMode rounding_mode,
+ Register result,
+ DoubleRegister double_input,
+ Register scratch,
+ DoubleRegister double_scratch,
+ Register except_flag,
+ CheckForInexactConversion check_inexact
+ = kDontCheckForInexactConversion);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32);
+
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles,
+ int stack_space = 0);
+
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles,
+ Register arg_count,
+ bool restore_context,
+ bool do_return = NO_EMIT_RETURN);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ li(kRootRegister, Operand(roots_array_start));
+ }
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail);
+
+ void IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail);
+
+ // -------------------------------------------------------------------------
+ // Debugger Support.
+
+ void DebugBreak();
+
+ // -------------------------------------------------------------------------
+ // Exception handling.
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ void Throw(Register value);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain.
+ void ThrowUncatchable(Register value);
+
+ // Copies a fixed number of fields of heap objects from src to dst.
+ void CopyFields(Register dst, Register src, RegList temps, int field_count);
+
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch);
+
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
+ // -------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ bool miss_on_bound_function = false);
+
+ void GetObjectType(Register function,
+ Register map,
+ Register type_reg);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements. Otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* fail,
+ int elements_offset = 0);
+
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
+ // "branch_to" if the result of the comparison is "cond". If multiple map
+ // compares are required, the compare sequences branches to early_success.
+ void CompareMapAndBranch(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specificed map.
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // Returns a condition that will be enabled if the object was a string.
+ Condition IsObjectStringType(Register obj,
+ Register type,
+ Register result) {
+ ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ And(type, type, Operand(kIsNotStringMask));
+ ASSERT_EQ(0, kStringTag);
+ return eq;
+ }
+
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // Get the number of least significant bits from a register.
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+ // Load the value of a number object into a FPU double register. If the
+ // object is not a number a jump to the label not_number is performed
+ // and the FPU double register is unchanged.
+ void ObjectToDoubleFPURegister(
+ Register object,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+ // Load the value of a smi object into a FPU double register. The register
+ // scratch1 can be the same register as smi in which case smi will hold the
+ // untagged value afterwards.
+ void SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1);
+
+ // -------------------------------------------------------------------------
+ // Overflow handling functions.
+ // Usage: first call the appropriate arithmetic function, then call one of the
+ // jump functions with the overflow_dst register as the second parameter.
+
+ void AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void BranchOnOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void BranchOnNoOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(lt, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(ge, overflow_check, Operand(zero_reg), bd);
+ }
+
+ // -------------------------------------------------------------------------
+ // Runtime calls.
+
+ // See comments at the beginning of CEntryStub::Generate.
+ inline void PrepareCEntryArgs(int num_args) {
+ li(s0, num_args);
+ li(s1, (num_args - 1) * kPointerSize);
+ }
+
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(s2, Operand(ref));
+ }
+
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ COND_ARGS);
+
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub, COND_ARGS);
+
+#undef COND_ARGS
+
+ void CallJSExitStub(CodeStub* stub);
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ BranchDelaySlot bd = PROTECT);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack
+ // and add space for the four mips argument slots.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments,
+ int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ Register scratch);
+
+ // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+ // Arguments 5..n are stored to stack using following:
+ // sw(a4, CFunctionArgumentOperand(5));
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
+
+ // There are two ways of passing double arguments on MIPS, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call JS arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // Jump to the builtin routine.
+ void JumpToExternalReference(const ExternalReference& builtin,
+ BranchDelaySlot bd = PROTECT);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in a1.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ struct Unresolved {
+ int pc;
+ uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
+ const char* name;
+ };
+
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and at gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
+
+ void SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void AssertFastElements(Register elements);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason msg);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ // ---------------------------------------------------------------------------
+ // Number utilities.
+
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities.
+
+ // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+ void SmiTagCheckOverflow(Register reg, Register overflow);
+ void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ STATIC_ASSERT(kSmiShift == 32);
+ dsll32(dst, src, 0);
+ } else {
+ Addu(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) {
+ SmiTag(reg, reg);
+ }
+
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
+ TrySmiTag(reg, reg, scratch, not_a_smi);
+ }
+
+ void TrySmiTag(Register dst,
+ Register src,
+ Register scratch,
+ Label* not_a_smi) {
+ if (SmiValuesAre32Bits()) {
+ SmiTag(dst, src);
+ } else {
+ SmiTagCheckOverflow(at, src, scratch);
+ BranchOnOverflow(not_a_smi, scratch);
+ mov(dst, at);
+ }
+ }
+
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ STATIC_ASSERT(kSmiShift == 32);
+ dsra32(dst, src, 0);
+ } else {
+ sra(dst, src, kSmiTagSize);
+ }
+ }
+
+ void SmiUntag(Register reg) {
+ SmiUntag(reg, reg);
+ }
+
+ // Left-shifted from int32 equivalent of Smi.
+ void SmiScale(Register dst, Register src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // The int portion is upper 32-bits of 64-bit word.
+ dsra(dst, src, kSmiShift - scale);
+ } else {
+ ASSERT(scale >= kSmiTagSize);
+ sll(dst, src, scale - kSmiTagSize);
+ }
+ }
+
+ // Combine load with untagging or scaling.
+ void SmiLoadUntag(Register dst, MemOperand src);
+
+ void SmiLoadScale(Register dst, MemOperand src, int scale);
+
+ // Returns 2 values: the Smi and a scaled version of the int within the Smi.
+ void SmiLoadWithScale(Register d_smi,
+ Register d_scaled,
+ MemOperand src,
+ int scale);
+
+ // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
+ void SmiLoadUntagWithScale(Register d_int,
+ Register d_scaled,
+ MemOperand src,
+ int scale);
+
+
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
+ }
+
+ // Untag the source value into destination and jump if source is a smi.
+ // Source and destination can be the same register.
+ void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+ // Source and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value,
+ Label* smi_label,
+ Register scratch = at,
+ BranchDelaySlot bd = PROTECT);
+
+ // Jump if the register contains a non-smi.
+ void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Register scratch = at,
+ BranchDelaySlot bd = PROTECT);
+
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if reg is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities.
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
+
+ // -------------------------------------------------------------------------
+ // String utilities.
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Check that they are non-smis.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void ClampUint8(Register output_reg, Register input_reg);
+
+ void ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg);
+
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register dst, Register src) {
+ Ext(dst, src, Field::kShift, Field::kSize);
+ }
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
+ static const int shift = Field::kShift;
+ static const int mask = Field::kMask >> shift;
+ dsrl(dst, src, shift);
+ And(dst, dst, Operand(mask));
+ dsll32(dst, dst, 0);
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+ // Generates function and stub prologue code.
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Patch the relocated value (lui/ori pair).
+ void PatchRelocatedValue(Register li_location,
+ Register scratch,
+ Register new_value);
+ // Get the relocatad value (loaded data) from the lui/ori pair.
+ void GetRelocatedValue(Register li_location,
+ Register value,
+ Register scratch);
+
+ // Expects object in a0 and returns map with validated enum cache
+ // in a0. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Register null_value, Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, jump to allocation_memento_present.
+ void TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond = al,
+ Label* allocation_memento_present = NULL);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found, eq, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ void CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+
+ void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void J(Label* L, BranchDelaySlot bdslot);
+ void Jr(Label* L, BranchDelaySlot bdslot);
+ void Jalr(Label* L, BranchDelaySlot bdslot);
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Get the code for the given builtin. Returns if able to resolve
+ // the function in the 'resolved' flag.
+ Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+ bool generating_stub_;
+ bool has_frame_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for compiled frame
+ // traversal.
+ friend class StandardFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ // Emit an instruction directly.
+ void Emit(Instr instr);
+
+ // Emit an address directly.
+ void Emit(Address addr);
+
+ // Change the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void ChangeBranchCondition(Condition cond);
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
+};
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/code-stubs.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - t3 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - a5 : Pointer to current code object (Code*) including heap object tag.
+ * - a6 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - a7 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t0 : Points to tip of backtrack stack
+ * - t1 : Unused.
+ * - t2 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * TODO(plind): O32 documented here with intent of having single 32/64 codebase
+ * in the future.
+ *
+ * The O32 stack will have the following structure:
+ *
+ * - fp[76] Isolate* isolate (address of the current isolate)
+ * - fp[72] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[68] stack_area_base (High end of the memory area to use as
+ * backtracking stack).
+ * - fp[64] capture array size (may fit multiple sets of matches)
+ * - fp[60] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[44..59] MIPS O32 four argument slots
+ * - fp[40] secondary link/return address used by native call.
+ * --- sp when called ---
+ * - fp[36] return address (lr).
+ * - fp[32] old frame pointer (r11).
+ * - fp[0..31] backup of registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-4] end of input (address of end of string).
+ * - fp[-8] start of input (address of first character in string).
+ * - fp[-12] start index (character index of start).
+ * - fp[-16] void* input_string (location of a handle containing the string).
+ * - fp[-20] success counter (only for global regexps to count matches).
+ * - fp[-24] Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * - fp[-28] At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - fp[-32] register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ *
+ * The N64 stack will have the following structure:
+ *
+ * - fp[88] Isolate* isolate (address of the current isolate) kIsolate
+ * - fp[80] secondary link/return address used by exit frame on native call. kSecondaryReturnAddress
+ kStackFrameHeader
+ * --- sp when called ---
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[64] s9, old-fp Old fp, callee saved(s9).
+ * - fp[0..63] s0..s7 Callee-saved registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-40] end of input (address of end of string). kInputEnd
+ * - fp[-48] start of input (address of first character in string). kInputStart
+ * - fp[-56] start index (character index of start). kStartIndex
+ * - fp[-64] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-80] Offset of location before start of input (effectively character kInputStartMinusOne
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * --------- The following output registers are 32-bit values. ---------
+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * Address secondary_return_address, // Only used by native call.
+ * int* capture_output_array,
+ * byte* stack_area_base,
+ * bool direct_call = false,
+ * void* return_address,
+ * Isolate* isolate);
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(v0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerMIPS::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Daddu(current_input_offset(),
+ current_input_offset(), Operand(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ ld(a0, register_location(reg));
+ __ Daddu(a0, a0, Operand(by));
+ __ sd(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(a0);
+ __ Daddu(a0, a0, code_pointer());
+ __ Jump(a0);
+}
+
+
+void RegExpMacroAssemblerMIPS::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+ BranchOrBacktrack(¬_at_start, ne, a0, Operand(zero_reg));
+
+ // If we did, are we still at the start of the input?
+ __ ld(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+ __ bind(¬_at_start);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
+ // If we did, are we still at the start of the input?
+ __ ld(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Daddu(backtrack_stackpointer(),
+ backtrack_stackpointer(),
+ Operand(kIntSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ ld(a0, register_location(start_reg)); // Index of start of capture.
+ __ ld(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Dsubu(a1, a1, a0); // Length of capture.
+
+ // If length is zero, either the capture is empty or it is not participating.
+ // In either case succeed immediately.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ __ Daddu(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Daddu(a0, a0, Operand(end_of_input_address()));
+ __ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+ __ Daddu(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ lbu(a3, MemOperand(a0, 0));
+ __ daddiu(a0, a0, char_size());
+ __ lbu(a4, MemOperand(a2, 0));
+ __ daddiu(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, a4, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(a4, a4, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, a4, Operand(a3));
+ __ Dsubu(a3, a3, Operand('a'));
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Dsubu(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Dsubu(current_input_offset(), a2, end_of_input_address());
+ } else {
+ ASSERT(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() | backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Daddu(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mov(s3, a1);
+ // Address of current input position.
+ __ Daddu(a1, current_input_offset(), Operand(end_of_input_address()));
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+
+ // Find length of back-referenced capture.
+ __ ld(a0, register_location(start_reg));
+ __ ld(a1, register_location(start_reg + 1));
+ __ Dsubu(a1, a1, a0); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ __ Daddu(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+
+ // Compute pointers to match string and capture string.
+ __ Daddu(a0, a0, Operand(end_of_input_address()));
+ __ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+ __ Daddu(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ lbu(a3, MemOperand(a0, 0));
+ __ daddiu(a0, a0, char_size());
+ __ lbu(a4, MemOperand(a2, 0));
+ __ daddiu(a2, a2, char_size());
+ } else {
+ ASSERT(mode_ == UC16);
+ __ lhu(a3, MemOperand(a0, 0));
+ __ daddiu(a0, a0, char_size());
+ __ lhu(a4, MemOperand(a2, 0));
+ __ daddiu(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Dsubu(current_input_offset(), a2, end_of_input_address());
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_equal, eq, a0, rhs);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_not_equal, ne, a0, rhs);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ Dsubu(a0, current_character(), Operand(minus));
+ __ And(a0, a0, Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Dsubu(a0, current_character(), Operand(from));
+ // Unsigned lower-or-same condition.
+ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Dsubu(a0, current_character(), Operand(from));
+ // Unsigned higher condition.
+ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ li(a0, Operand(table));
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ __ And(a1, current_character(), Operand(kTableSize - 1));
+ __ Daddu(a0, a0, a1);
+ } else {
+ __ Daddu(a0, a0, current_character());
+ }
+
+ __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+
+bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0d.
+ __ Dsubu(a0, current_character(), Operand('\t'));
+ __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Dsubu(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non ASCII-digits.
+ __ Dsubu(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+ __ Dsubu(a0, a0, Operand(0x0b));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+ __ Dsubu(a0, a0, Operand(0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ li(a0, Operand(map));
+ __ Daddu(a0, a0, current_character());
+ __ lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Branch(&done, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ li(a0, Operand(map));
+ __ Daddu(a0, a0, current_character());
+ __ lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::Fail() {
+ __ li(v0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+ Label return_v0;
+ if (masm_->has_exception()) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ __ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
+ // or dont save.
+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+ s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+
+ if (kMipsAbi == kN64) {
+ // TODO(plind): Should probably alias a4-a7, for clarity.
+ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+ }
+
+ __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ // TODO(plind): this 8 is the # of argument regs, should have definition.
+ __ Daddu(frame_pointer(), sp, Operand(8 * kPointerSize));
+ __ mov(a0, zero_reg);
+ __ push(a0); // Make room for success counter and initialize it to 0.
+ __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ ld(a0, MemOperand(a0));
+ __ Dsubu(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Dsubu(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ ld(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Dsubu(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ ld(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Dsubu(a0, current_input_offset(), Operand(char_size()));
+ __ dsll(t1, a1, (mode_ == UC16) ? 1 : 0);
+ __ Dsubu(a0, a0, t1);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ sd(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ Daddu(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ sd(a0, MemOperand(a1));
+ __ Daddu(a1, a1, Operand(-kPointerSize));
+ __ Dsubu(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ sd(a0, register_location(i));
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+
+ __ jmp(&start_label_);
+
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ ld(a1, MemOperand(frame_pointer(), kInputStart));
+ __ ld(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ ld(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Dsubu(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ dsrl(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Daddu(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ ASSERT_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ ld(a2, register_location(i));
+ __ ld(a3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in a4 for the zero-length check later.
+ __ mov(t3, a2);
+ }
+ if (mode_ == UC16) {
+ __ dsra(a2, a2, 1);
+ __ Daddu(a2, a2, a1);
+ __ dsra(a3, a3, 1);
+ __ Daddu(a3, a3, a1);
+ } else {
+ __ Daddu(a2, a1, Operand(a2));
+ __ Daddu(a3, a1, Operand(a3));
+ }
+ // V8 expects the output to be an int32_t array.
+ __ sw(a2, MemOperand(a0));
+ __ Daddu(a0, a0, kIntSize);
+ __ sw(a3, MemOperand(a0));
+ __ Daddu(a0, a0, kIntSize);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ Daddu(a0, a0, 1);
+ __ sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Dsubu(a1, a1, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ mov(v0, a0);
+ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
+
+ __ sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ Daddu(a2, a2, num_saved_registers_ * kIntSize);
+ __ sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare a0 to initialize registers with its value in the next run.
+ __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t3: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(
+ &load_char_start_regexp, ne, current_input_offset(), Operand(t3));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ __ Daddu(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ Branch(&load_char_start_regexp);
+ } else {
+ __ li(v0, Operand(SUCCESS));
+ }
+ }
+ // Exit and return v0.
+ __ bind(&exit_label_);
+ if (global()) {
+ __ ld(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_v0);
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers s0..s7 and return (restoring ra to pc).
+ __ MultiPop(registers_to_retain | ra.bit());
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() | backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+
+ // String might have moved: Reload end of string from frame.
+ __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers = current_input_offset().bit() |
+ current_character().bit();
+ __ MultiPush(regexp_registers);
+ Label grow_failed;
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, a0);
+ __ mov(a0, backtrack_stackpointer());
+ __ Daddu(a1, frame_pointer(), Operand(kStackHighEnd));
+ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), v0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ ld(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ ld(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ ld(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerMIPS::Implementation() {
+ return kMIPSImplementation;
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
+ Pop(a0);
+ __ sd(a0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ __ emit(0);
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int16(cp_offset)) {
+ __ lwu(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Daddu(a0, code_pointer(), cp_offset);
+ __ lwu(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ ld(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
+ __ ld(current_input_offset(), register_location(reg));
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ __ ld(backtrack_stackpointer(), register_location(reg));
+ __ ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position,
+ ge,
+ current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ sd(a0, register_location(register_index));
+}
+
+
+bool RegExpMacroAssemblerMIPS::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ sd(current_input_offset(), register_location(reg));
+ } else {
+ __ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ sd(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ sd(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ __ ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Dsubu(a0, backtrack_stackpointer(), a1);
+ __ sd(a0, register_location(reg));
+}
+
+
+bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
+ return false;
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+ int stack_alignment = base::OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Dsubu(sp, sp, Operand(kPointerSize));
+ ASSERT(IsPowerOf2(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ sd(scratch, MemOperand(sp));
+
+ __ mov(a2, frame_pointer());
+ // Code* of self.
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ // We need to make room for the return address on the stack.
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ Dsubu(sp, sp, Operand(stack_alignment));
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are in registers, meaning we teat the return address as
+ // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ __ li(t9, Operand(stack_guard_check));
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm_, t9);
+
+ // DirectCEntryStub allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ ld(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = isolate->stack_guard()->HandleInterrupts();
+
+ if (*code_handle != re_code) { // Return address no longer valid.
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_index + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ }
+
+ return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ BranchOrBacktrack(on_outside_input,
+ ge,
+ current_input_offset(),
+ Operand(-cp_offset * char_size()));
+}
+
+
+void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
+ Condition cond,
+ Register rs,
+ const Operand& rt) {
+ __ BranchAndLink(to, cond, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeReturn() {
+ __ pop(ra);
+ __ Daddu(t1, ra, Operand(masm_->CodeObject()));
+ __ Jump(t1);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Dsubu(ra, ra, Operand(masm_->CodeObject()));
+ __ push(ra);
+}
+
+
+void RegExpMacroAssemblerMIPS::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Daddu(backtrack_stackpointer(),
+ backtrack_stackpointer(),
+ Operand(-kIntSize));
+ __ sw(source, MemOperand(backtrack_stackpointer()));
+}
+
+
+void RegExpMacroAssemblerMIPS::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ lw(target, MemOperand(backtrack_stackpointer()));
+ __ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ ld(a0, MemOperand(a0));
+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ ld(a0, MemOperand(a0));
+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+ if (cp_offset != 0) {
+ // t3 is not being used to store the capture start index at this point.
+ __ Daddu(t3, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t3;
+ }
+ // We assume that we cannot do unaligned loads on MIPS, so this function
+ // must only be used to load a single character at a time.
+ ASSERT(characters == 1);
+ __ Daddu(t1, end_of_input_address(), Operand(offset));
+ if (mode_ == ASCII) {
+ __ lbu(current_character(), MemOperand(t1, 0));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ lhu(current_character(), MemOperand(t1, 0));
+ }
+}
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
+#include "src/macro-assembler.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/mips64/assembler-mips64.h"
+#include "src/mips64/macro-assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerMIPS();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+
+ void print_regexp_frame_constants();
+
+ private:
+#if defined(MIPS_ABI_N64)
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+
+// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kSecondaryReturnAddress;
+ // Stack parameters placed by caller.
+ static const int kIsolate = kStackFrameHeader + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kFramePointer - kPointerSize;
+ static const int kStackHighEnd = kDirectCall - kPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+ static const int kInputEnd = kRegisterOutput - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+#elif defined(MIPS_ABI_O32)
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+ // Stack parameters placed by caller.
+ static const int kRegisterOutput =
+ kStackFrameHeader + 4 * kPointerSize + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kInputEnd = kFramePointer - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+#else
+# error "undefined MIPS ABI"
+#endif
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return a6; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return a7; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t2; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t0; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return a5; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to,
+ Condition cond,
+ Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <cmath>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/assembler.h"
+#include "src/disasm.h"
+#include "src/globals.h" // Need the BitCast.
+#include "src/mips64/constants-mips64.h"
+#include "src/mips64/simulator-mips64.h"
+
+
+// Only build the simulator if not compiling for real MIPS hardware.
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+// Utils functions.
+bool HaveSameSign(int64_t a, int64_t b) {
+ return ((a ^ b) >= 0);
+}
+
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xffffffffL;
+ u1 = u >> 32;
+ v0 = v & 0xffffffffL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffffL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger {
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+ ~MipsDebugger();
+
+ void Stop(Instruction* instr);
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+ bool GetValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+
+MipsDebugger::~MipsDebugger() {
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+
+void MipsDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char** msg_address =
+ reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ char* msg = *msg_address;
+ ASSERT(msg != NULL);
+
+ // Update this stop description.
+ if (!watched_stops_[code].desc) {
+ watched_stops_[code].desc = msg;
+ }
+
+ if (strlen(msg) > 0) {
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", str);
+ fflush(coverage_log);
+ }
+ // Overwrite the instruction and address with nops.
+ instr->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
+ }
+ // TODO(yuyin): 2 -> 3?
+ sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstructionSize);
+}
+
+
+#else // GENERATED_CODE_COVERAGE
+
+#define UNSUPPORTED() printf("Unsupported instruction.\n");
+
+static void InitializeCoverage() {}
+
+
+void MipsDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ Instruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
+ }
+ PrintF("Simulator hit %s (%u)\n", msg, code);
+ // TODO(yuyin): 2 -> 3?
+ sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstrSize);
+ Debug();
+}
+#endif // GENERATED_CODE_COVERAGE
+
+
+int64_t MipsDebugger::GetRegisterValue(int regnum) {
+ if (regnum == kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
+ }
+}
+
+
+int64_t MipsDebugger::GetFPURegisterValue(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+
+float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+
+double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+
+bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return false;
+}
+
+
+bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != NULL) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+
+bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = NULL;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+
+void MipsDebugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+
+void MipsDebugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+
+void MipsDebugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+ PrintF("\n");
+ // at, v0, a0.
+ PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ REG_INFO(1), REG_INFO(2), REG_INFO(4));
+ // v1, a1.
+ PrintF("%34s\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ "", REG_INFO(3), REG_INFO(5));
+ // a2.
+ PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(6));
+ // a3.
+ PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(7));
+ PrintF("\n");
+ // a4-t3, s0-s7
+ for (int i = 0; i < 8; i++) {
+ PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ REG_INFO(8+i), REG_INFO(16+i));
+ }
+ PrintF("\n");
+ // t8, k0, LO.
+ PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ REG_INFO(24), REG_INFO(26), REG_INFO(32));
+ // t9, k1, HI.
+ PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ REG_INFO(25), REG_INFO(27), REG_INFO(33));
+ // sp, fp, gp.
+ PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ REG_INFO(29), REG_INFO(30), REG_INFO(28));
+ // pc.
+ PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ REG_INFO(31), REG_INFO(34));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) FPURegisters::Name(n), \
+ GetFPURegisterValue(n), \
+ GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ // TODO(plind): consider printing 2 columns for space efficiency.
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(0) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(1) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(2) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(3) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(4) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(5) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(6) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(7) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(8) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(9) );
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(11));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(13));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(15));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(17));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(19));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(21));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(23));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(25));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(27));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(29));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(30));
+ PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(31));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%016lx %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ PrintF("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08lx %ld \n", arg1, value, value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016lx %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xffffffffUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08lx %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0)
+ || (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ OFStream os(stdout);
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj->Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = NULL;
+ int64_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%012lx: 0x%016lx %14ld",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int64_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & 1) == 0) {
+ PrintF("smi %d", static_cast<int>(value >> 32));
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) ||
+ (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * Instruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * Instruction::kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08lx %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(NULL)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * Instruction::kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc +
+ Instruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ // Print registers and disassemble.
+ PrintAllRegs();
+ PrintF("\n");
+
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * Instruction::kInstrSize);
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08lx %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi\n");
+ PrintF(" step one instruction (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to print all registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+static bool ICacheMatch(void* one, void* two) {
+ ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+ void* start_addr,
+ size_t size) {
+ int64_t start = reinterpret_cast<int64_t>(start_addr);
+ int64_t intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ ASSERT_EQ((uint64_t)0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+ ICacheHash(page),
+ true);
+ if (entry->value == NULL) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+ intptr_t start,
+ int size) {
+ ASSERT(size <= CachePage::kPageSize);
+ ASSERT(AllOnOnePage(start, size - 1));
+ ASSERT((start & CachePage::kLineMask) == 0);
+ ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
+ int64_t address = reinterpret_cast<int64_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset),
+ Instruction::kInstrSize));
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ::v8::internal::ExternalReference::set_redirector(isolate,
+ &RedirectExternalReference);
+}
+
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ i_cache_ = isolate_->simulator_i_cache();
+ if (i_cache_ == NULL) {
+ i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
+ }
+ Initialize(isolate);
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ stack_size_ = FLAG_sim_stack_size * KB;
+ stack_ = reinterpret_cast<char*>(malloc(stack_size_));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = NULL;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64;
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+ InitializeCoverage();
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ last_debugger_input_ = NULL;
+}
+
+
+Simulator::~Simulator() {
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ swi_instruction_(rtCallRedirInstr),
+ type_(type),
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->
+ FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_),
+ Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_swi_instruction() {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) return current;
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+ char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+ char* addr_of_redirection =
+ addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(int64_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
+ private:
+ void* external_function_;
+ uint32_t swi_instruction_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int64_t value) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[1];
+ registers_[reg] = registers_[reg] << 32;
+ registers_[reg] += dbl[0];
+}
+
+
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ // TODO(plind): big endian issue.
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ *pword = value;
+}
+
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ // TODO(plind): big endian issue.
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ *phiword = value;
+}
+
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *BitCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::get_register(int reg) const {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ else
+ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+}
+
+
+double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, ®isters_[reg], sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, sizeof(registers_[0]));
+ return(dm_val);
+}
+
+
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+uint32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<uint32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
+}
+
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *BitCast<float*>(
+ const_cast<int64_t*>(&FPUregisters_[fpureg]));
+}
+
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *BitCast<double*>(&FPUregisters_[fpureg]);
+}
+
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or f12 and f13 (n64), or f14 (O32).
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
+ if (!IsMipsSoftFloatABI) {
+ const int fparg2 = (kMipsAbi == kN64) ? 13 : 14;
+ *x = get_fpu_register_double(12);
+ *y = get_fpu_register_double(fparg2);
+ *z = get_register(a2);
+ } else {
+ // TODO(plind): bad ABI stuff, refactor or remove.
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+ // Registers a0 and a1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+ // Registers a2 and a3 -> y.
+ reg_buffer[0] = get_register(a2);
+ reg_buffer[1] = get_register(a3);
+ memcpy(y, buffer, sizeof(buffer));
+ // Register 2 -> z.
+ reg_buffer[0] = get_register(a2);
+ memcpy(z, buffer, sizeof(*z));
+ }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+ if (!IsMipsSoftFloatABI) {
+ set_fpu_register_double(0, result);
+ } else {
+ char buffer[2 * sizeof(registers_[0])];
+ int64_t* reg_buffer = reinterpret_cast<int64_t*>(buffer);
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to v0 and v1.
+ set_register(v0, reg_buffer[0]);
+ set_register(v1, reg_buffer[1]);
+ }
+}
+
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+
+bool Simulator::test_fcsr_bit(uint32_t cc) {
+ return FCSR_ & (1 << cc);
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const {
+ return registers_[pc];
+}
+
+
+// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
+// interrupt is caused. On others it does a funky rotation thing. For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour. Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator. Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if (1) { // Flag for this was removed.
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ base::OS::Abort();
+ }
+}
+
+
+void Simulator::TraceRegWr(int64_t value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, "%016lx", value);
+ }
+}
+
+
+// TODO(plind): consider making icount_ printing a flag option.
+void Simulator::TraceMemRd(int64_t addr, int64_t value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, "%016lx <-- [%016lx] (%ld)",
+ value, addr, icount_);
+ }
+}
+
+
+void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (t) {
+ case BYTE:
+ SNPrintF(trace_buf_, " %02x --> [%016lx]",
+ static_cast<int8_t>(value), addr);
+ break;
+ case HALF:
+ SNPrintF(trace_buf_, " %04x --> [%016lx]",
+ static_cast<int16_t>(value), addr);
+ break;
+ case WORD:
+ SNPrintF(trace_buf_, " %08x --> [%016lx]",
+ static_cast<int32_t>(value), addr);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_, "%016lx --> [%016lx] (%ld)",
+ value, addr, icount_);
+ break;
+ }
+ }
+}
+
+
+// TODO(plind): sign-extend and zero-extend not implmented properly
+// on all the ReadXX functions, I don't think re-interpret cast does it.
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
+ if (addr >=0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ }
+ PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ return 0;
+}
+
+
+uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
+ if (addr >=0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ }
+ PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ return 0;
+}
+
+
+void Simulator::WriteW(int64_t addr, int value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0) {
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+
+int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
+ if (addr >=0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ return 0;
+}
+
+
+void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+
+double Simulator::ReadD(int64_t addr, Instruction* instr) {
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned (double) read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+ return 0;
+}
+
+
+void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned (double) write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+
+uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ }
+ PrintF("Unaligned unsigned halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ return 0;
+}
+
+
+int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ }
+ PrintF("Unaligned signed halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ return 0;
+}
+
+
+void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ TraceMemWr(addr, value, HALF);
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF(
+ "Unaligned unsigned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+
+void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ TraceMemWr(addr, value, HALF);
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+
+uint32_t Simulator::ReadBU(int64_t addr) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr & 0xff;
+}
+
+
+int32_t Simulator::ReadB(int64_t addr) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+}
+
+
+void Simulator::WriteB(int64_t addr, uint8_t value) {
+ TraceMemWr(addr, value, BYTE);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+
+void Simulator::WriteB(int64_t addr, int8_t value) {
+ TraceMemWr(addr, value, BYTE);
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the v1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+
+struct ObjectPair {
+ Object* x;
+ Object* y;
+};
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3,
+ int64_t arg4,
+ int64_t arg5);
+
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
+ int64_t arg0, int64_t arg1, void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime. They are also used for debugging with simulator.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+ // There are several instructions that could get us here,
+ // the break_ instruction, or several variants of traps. All
+ // Are "SPECIAL" class opcode, and are distinuished by function.
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+ // We first check if we met a call_rt_redirected.
+ if (instr->InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4, arg5;
+
+ if (kMipsAbi == kN64) {
+ arg4 = get_register(a4); // Abi n64 register a4.
+ arg5 = get_register(a5); // Abi n64 register a5.
+ } else { // Abi O32.
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ arg4 = stack_pointer[4];
+ arg5 = stack_pointer[5];
+ }
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ if (!IsMipsSoftFloatABI) {
+ // With the hard floating point calling convention, double
+ // arguments are passed in FPU registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_fpu_register(f14);
+ arg3 = get_fpu_register(f15);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = get_register(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization.
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ FUNCTION_ADDR(generic_target), dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ FUNCTION_ADDR(generic_target), dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %d",
+ FUNCTION_ADDR(generic_target), dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int64_t>(iresult));
+ // set_register(v1, static_cast<int64_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08lx\n",
+ reinterpret_cast<void*>(external), arg0);
+ }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08lx %08lx\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08lx %08lx\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08lx %08lx %08lx\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p "
+ "args %08lx, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+ FUNCTION_ADDR(target),
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5);
+ }
+ // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ // set_register(v0, static_cast<int32_t>(result));
+ // set_register(v1, static_cast<int32_t>(result >> 32));
+ ObjectPair result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ set_register(v0, (int64_t)(result.x));
+ set_register(v1, (int64_t)(result.y));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08lx : %08lx\n", get_register(v1), get_register(v0));
+ }
+ set_register(ra, saved_ra);
+ set_pc(get_register(ra));
+
+ } else if (func == BREAK && code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint64_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+
+void Simulator::PrintWatchpoint(uint64_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %ld marker: %3d (instr count: %8ld) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+
+void Simulator::HandleStop(uint64_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.Stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
+ }
+}
+
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
+ return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+
+bool Simulator::IsEnabledStop(uint64_t code) {
+ ASSERT(code <= kMaxStopCode);
+ ASSERT(code > kMaxWatchpointCode);
+ return !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint64_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+
+void Simulator::DisableStop(uint64_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+
+void Simulator::IncreaseStopCounter(uint64_t code) {
+ ASSERT(code <= kMaxStopCode);
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ PrintF("Stop counter for code %ld has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint64_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+
+void Simulator::SignalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
+ }
+ }
+}
+
+
+// Handle execution based on instruction types.
+
+void Simulator::ConfigureTypeRegister(Instruction* instr,
+ int64_t* alu_out,
+ int64_t* i64hilo,
+ uint64_t* u64hilo,
+ int64_t* next_pc,
+ int64_t* return_addr_reg,
+ bool* do_interrupt,
+ int64_t* i128resultH,
+ int64_t* i128resultL) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // DecodeTypeRegister correctly.
+
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int64_t rs_reg = instr->RsValue();
+ const int64_t rs = get_register(rs_reg);
+ const uint64_t rs_u = static_cast<uint64_t>(rs);
+ const int64_t rt_reg = instr->RtValue();
+ const int64_t rt = get_register(rt_reg);
+ const uint64_t rt_u = static_cast<uint64_t>(rt);
+ const int64_t rd_reg = instr->RdValue();
+ const uint64_t sa = instr->SaValue();
+
+ const int32_t fs_reg = instr->FsValue();
+
+
+ // ---------- Configuration.
+ switch (op) {
+ case COP1: // Coprocessor instructions.
+ switch (instr->RsFieldRaw()) {
+ case BC1: // Handled in DecodeTypeImmed, should never come here.
+ UNREACHABLE();
+ break;
+ case CFC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ *alu_out = FCSR_;
+ break;
+ case MFC1:
+ *alu_out = static_cast<int64_t>(get_fpu_register_word(fs_reg));
+ break;
+ case DMFC1:
+ *alu_out = get_fpu_register(fs_reg);
+ break;
+ case MFHC1:
+ *alu_out = get_fpu_register_hi_word(fs_reg);
+ break;
+ case CTC1:
+ case MTC1:
+ case DMTC1:
+ case MTHC1:
+ // Do the store in the execution step.
+ break;
+ case S:
+ case D:
+ case W:
+ case L:
+ case PS:
+ // Do everything in the execution step.
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ }
+ break;
+ case COP1X:
+ break;
+ case SPECIAL:
+ switch (instr->FunctionFieldRaw()) {
+ case JR:
+ case JALR:
+ *next_pc = get_register(instr->RsValue());
+ *return_addr_reg = instr->RdValue();
+ break;
+ case SLL:
+ *alu_out = (int32_t)rt << sa;
+ break;
+ case DSLL:
+ *alu_out = rt << sa;
+ break;
+ case DSLL32:
+ *alu_out = rt << sa << 32;
+ break;
+ case SRL:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ *alu_out = (uint32_t)rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ *alu_out = ((uint32_t)rt_u >> sa) | ((uint32_t)rt_u << (32 - sa));
+ }
+ break;
+ case DSRL:
+ *alu_out = rt_u >> sa;
+ break;
+ case DSRL32:
+ *alu_out = rt_u >> sa >> 32;
+ break;
+ case SRA:
+ *alu_out = (int32_t)rt >> sa;
+ break;
+ case DSRA:
+ *alu_out = rt >> sa;
+ break;
+ case DSRA32:
+ *alu_out = rt >> sa >> 32;
+ break;
+ case SLLV:
+ *alu_out = (int32_t)rt << rs;
+ break;
+ case DSLLV:
+ *alu_out = rt << rs;
+ break;
+ case SRLV:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ *alu_out = (uint32_t)rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ *alu_out =
+ ((uint32_t)rt_u >> rs_u) | ((uint32_t)rt_u << (32 - rs_u));
+ }
+ break;
+ case DSRLV:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ *alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
+ break;
+ case SRAV:
+ *alu_out = (int32_t)rt >> rs;
+ break;
+ case DSRAV:
+ *alu_out = rt >> rs;
+ break;
+ case MFHI:
+ *alu_out = get_register(HI);
+ break;
+ case MFLO:
+ *alu_out = get_register(LO);
+ break;
+ case MULT:
+ // TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo
+ // regs.
+ // TODO(plind) - make the 32-bit MULT ops conform to spec regarding
+ // checking of 32-bit input values, and un-define operations of HW.
+ *i64hilo = static_cast<int64_t>((int32_t)rs) *
+ static_cast<int64_t>((int32_t)rt);
+ break;
+ case MULTU:
+ *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case DMULT:
+ *i128resultH = MultiplyHighSigned(rs, rt);
+ *i128resultL = rs * rt;
+ break;
+ case DMULTU:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case ADD:
+ case DADD:
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
+ }
+ }
+ *alu_out = rs + rt;
+ break;
+ case ADDU: {
+ int32_t alu32_out = rs + rt;
+ // Sign-extend result of 32bit operation into 64bit register.
+ *alu_out = static_cast<int64_t>(alu32_out);
+ }
+ break;
+ case DADDU:
+ *alu_out = rs + rt;
+ break;
+ case SUB:
+ case DSUB:
+ if (!HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
+ }
+ }
+ *alu_out = rs - rt;
+ break;
+ case SUBU: {
+ int32_t alu32_out = rs - rt;
+ // Sign-extend result of 32bit operation into 64bit register.
+ *alu_out = static_cast<int64_t>(alu32_out);
+ }
+ break;
+ case DSUBU:
+ *alu_out = rs - rt;
+ break;
+ case AND:
+ *alu_out = rs & rt;
+ break;
+ case OR:
+ *alu_out = rs | rt;
+ break;
+ case XOR:
+ *alu_out = rs ^ rt;
+ break;
+ case NOR:
+ *alu_out = ~(rs | rt);
+ break;
+ case SLT:
+ *alu_out = rs < rt ? 1 : 0;
+ break;
+ case SLTU:
+ *alu_out = rs_u < rt_u ? 1 : 0;
+ break;
+ // Break and trap instructions.
+ case BREAK:
+
+ *do_interrupt = true;
+ break;
+ case TGE:
+ *do_interrupt = rs >= rt;
+ break;
+ case TGEU:
+ *do_interrupt = rs_u >= rt_u;
+ break;
+ case TLT:
+ *do_interrupt = rs < rt;
+ break;
+ case TLTU:
+ *do_interrupt = rs_u < rt_u;
+ break;
+ case TEQ:
+ *do_interrupt = rs == rt;
+ break;
+ case TNE:
+ *do_interrupt = rs != rt;
+ break;
+ case MOVN:
+ case MOVZ:
+ case MOVCI:
+ // No action taken on decode.
+ break;
+ case DIV:
+ case DIVU:
+ case DDIV:
+ case DDIVU:
+ // div and divu never raise exceptions.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case SPECIAL2:
+ switch (instr->FunctionFieldRaw()) {
+ case MUL:
+ // Only the lower 32 bits are kept.
+ *alu_out = (int32_t)rs_u * (int32_t)rt_u;
+ break;
+ case CLZ:
+ // MIPS32 spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ *alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ *alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int64_t rs_reg = instr->RsValue();
+ const int64_t rs = get_register(rs_reg);
+ const uint64_t rs_u = static_cast<uint32_t>(rs);
+ const int64_t rt_reg = instr->RtValue();
+ const int64_t rt = get_register(rt_reg);
+ const uint64_t rt_u = static_cast<uint32_t>(rt);
+ const int64_t rd_reg = instr->RdValue();
+
+ const int32_t fr_reg = instr->FrValue();
+ const int32_t fs_reg = instr->FsValue();
+ const int32_t ft_reg = instr->FtValue();
+ const int64_t fd_reg = instr->FdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int64_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc
+ int64_t next_pc = 0;
+ int64_t return_addr_reg = 31;
+
+ int64_t i128resultH;
+ int64_t i128resultL;
+
+ // Set up the variables if needed before executing the instruction.
+ ConfigureTypeRegister(instr,
+ &alu_out,
+ &i64hilo,
+ &u64hilo,
+ &next_pc,
+ &return_addr_reg,
+ &do_interrupt,
+ &i128resultH,
+ &i128resultL);
+
+ // ---------- Raise exceptions triggered.
+ SignalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1: // Branch on coprocessor condition.
+ UNREACHABLE();
+ break;
+ case CFC1:
+ set_register(rt_reg, alu_out);
+ break;
+ case MFC1:
+ case DMFC1:
+ case MFHC1:
+ set_register(rt_reg, alu_out);
+ break;
+ case CTC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case MTC1:
+ // Hardware writes upper 32-bits to zero on mtc1.
+ set_fpu_register_hi_word(fs_reg, 0);
+ set_fpu_register_word(fs_reg, registers_[rt_reg]);
+ break;
+ case DMTC1:
+ set_fpu_register(fs_reg, registers_[rt_reg]);
+ break;
+ case MTHC1:
+ set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
+ break;
+ case S:
+ float f;
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_S:
+ f = get_fpu_register_float(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(f));
+ break;
+ case CVT_W_S:
+ case CVT_L_S:
+ case TRUNC_W_S:
+ case TRUNC_L_S:
+ case ROUND_W_S:
+ case ROUND_L_S:
+ case FLOOR_W_S:
+ case FLOOR_L_S:
+ case CEIL_W_S:
+ case CEIL_L_S:
+ case CVT_PS_S:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case D:
+ double ft, fs;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ cc = instr->FCccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
+ switch (instr->FunctionFieldRaw()) {
+ case ADD_D:
+ set_fpu_register_double(fd_reg, fs + ft);
+ break;
+ case SUB_D:
+ set_fpu_register_double(fd_reg, fs - ft);
+ break;
+ case MUL_D:
+ set_fpu_register_double(fd_reg, fs * ft);
+ break;
+ case DIV_D:
+ set_fpu_register_double(fd_reg, fs / ft);
+ break;
+ case ABS_D:
+ set_fpu_register_double(fd_reg, fabs(fs));
+ break;
+ case MOV_D:
+ set_fpu_register_double(fd_reg, fs);
+ break;
+ case NEG_D:
+ set_fpu_register_double(fd_reg, -fs);
+ break;
+ case SQRT_D:
+ set_fpu_register_double(fd_reg, sqrt(fs));
+ break;
+ case C_UN_D:
+ set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+ break;
+ case C_EQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft));
+ break;
+ case C_UEQ_D:
+ set_fcsr_bit(fcsr_cc,
+ (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+ break;
+ case C_OLT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft));
+ break;
+ case C_ULT_D:
+ set_fcsr_bit(fcsr_cc,
+ (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+ break;
+ case C_OLE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft));
+ break;
+ case C_ULE_D:
+ set_fcsr_bit(fcsr_cc,
+ (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+ break;
+ case CVT_W_D: // Convert double to word.
+ // Rounding modes are not yet supported.
+ ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ // No break.
+ case ROUND_W_D: // Round double to word (round half to even).
+ {
+ double rounded = std::floor(fs + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case TRUNC_W_D: // Truncate double to word (round towards 0).
+ {
+ double rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case FLOOR_W_D: // Round double to word towards negative infinity.
+ {
+ double rounded = std::floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CEIL_W_D: // Round double to word towards positive infinity.
+ {
+ double rounded = std::ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CVT_S_D: // Convert double to float (single).
+ set_fpu_register_float(fd_reg, static_cast<float>(fs));
+ break;
+ case CVT_L_D: // Mips64r2: Truncate double to 64-bit long-word.
+ // Rounding modes are not yet supported.
+ ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ // No break.
+ case ROUND_L_D: { // Mips64r2 instruction.
+ // check error cases
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case TRUNC_L_D: { // Mips64r2 instruction.
+ double rounded = trunc(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case FLOOR_L_D: { // Mips64r2 instruction.
+ double rounded = floor(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case CEIL_L_D: { // Mips64r2 instruction.
+ double rounded = ceil(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case C_F_D:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case W:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_S_W: // Convert word to float (single).
+ alu_out = get_fpu_register_signed_word(fs_reg);
+ set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
+ break;
+ case CVT_D_W: // Convert word to double.
+ alu_out = get_fpu_register_signed_word(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case L:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: // Mips32r2 instruction.
+ i64 = get_fpu_register(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ break;
+ case CVT_S_L:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ double fr, ft, fs;
+ fr = get_fpu_register_double(fr_reg);
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ set_fpu_register_double(fd_reg, fs * ft + fr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case SPECIAL:
+ switch (instr->FunctionFieldRaw()) {
+ case JR: {
+ Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+ current_pc+Instruction::kInstrSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case JALR: {
+ Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+ current_pc+Instruction::kInstrSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ set_register(return_addr_reg,
+ current_pc + 2 * Instruction::kInstrSize);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case MULT:
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case MULTU:
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ case DMULT:
+ set_register(LO, static_cast<int64_t>(i128resultL));
+ set_register(HI, static_cast<int64_t>(i128resultH));
+ break;
+ case DMULTU:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case DIV:
+ case DDIV:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
+ break;
+ case DIVU:
+ if (rt_u != 0) {
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ }
+ break;
+ // Break and trap instructions.
+ case BREAK:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ if (do_interrupt) {
+ SoftwareInterrupt(instr);
+ }
+ break;
+ // Conditional moves.
+ case MOVN:
+ if (rt) {
+ set_register(rd_reg, rs);
+ TraceRegWr(rs);
+ }
+ break;
+ case MOVCI: {
+ uint32_t cc = instr->FBccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ } else {
+ if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ }
+ break;
+ }
+ case MOVZ:
+ if (!rt) {
+ set_register(rd_reg, rs);
+ TraceRegWr(rs);
+ }
+ break;
+ default: // For other special opcodes we do the default operation.
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
+ break;
+ case SPECIAL2:
+ switch (instr->FunctionFieldRaw()) {
+ case MUL:
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ set_register(LO, Unpredictable);
+ set_register(HI, Unpredictable);
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ set_register(rd_reg, alu_out);
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS:
+ // Ins instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ TraceRegWr(alu_out);
+ break;
+ case EXT:
+ // Ext instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ TraceRegWr(alu_out);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
+}
+
+
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+void Simulator::DecodeTypeImmediate(Instruction* instr) {
+ // Instruction fields.
+ Opcode op = instr->OpcodeFieldRaw();
+ int64_t rs = get_register(instr->RsValue());
+ uint64_t rs_u = static_cast<uint64_t>(rs);
+ int64_t rt_reg = instr->RtValue(); // Destination register.
+ int64_t rt = get_register(rt_reg);
+ int16_t imm16 = instr->Imm16Value();
+
+ int32_t ft_reg = instr->FtValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int64_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ int64_t addr = 0x0;
+ // Value to be written in memory.
+ uint64_t mem_value = 0x0;
+ // Alignment for 32-bit integers used in LWL, LWR, etc.
+ const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
+
+ // ---------- Configuration (and execution for REGIMM).
+ switch (op) {
+ // ------------- COP1. Coprocessor instructions.
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1: // Branch on coprocessor condition.
+ cc = instr->FBccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
+ cc_value = test_fcsr_bit(fcsr_cc);
+ do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ // ------------- REGIMM class.
+ case REGIMM:
+ switch (instr->RtFieldRaw()) {
+ case BLTZ:
+ do_branch = (rs < 0);
+ break;
+ case BLTZAL:
+ do_branch = rs < 0;
+ break;
+ case BGEZ:
+ do_branch = rs >= 0;
+ break;
+ case BGEZAL:
+ do_branch = rs >= 0;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ switch (instr->RtFieldRaw()) {
+ case BLTZ:
+ case BLTZAL:
+ case BGEZ:
+ case BGEZAL:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ default:
+ break;
+ }
+ break; // case REGIMM.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we don't
+ // need to replace rt with zero.
+ case BEQ:
+ do_branch = (rs == rt);
+ break;
+ case BNE:
+ do_branch = rs != rt;
+ break;
+ case BLEZ:
+ do_branch = rs <= 0;
+ break;
+ case BGTZ:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case ADDI:
+ case DADDI:
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] =
+ rs < (Registers::kMinValue - se_imm16);
+ }
+ }
+ alu_out = rs + se_imm16;
+ break;
+ case ADDIU: {
+ int32_t alu32_out = rs + se_imm16;
+ // Sign-extend result of 32bit operation into 64bit register.
+ alu_out = static_cast<int64_t>(alu32_out);
+ }
+ break;
+ case DADDIU:
+ alu_out = rs + se_imm16;
+ break;
+ case SLTI:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case SLTIU:
+ alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ break;
+ case ANDI:
+ alu_out = rs & oe_imm16;
+ break;
+ case ORI:
+ alu_out = rs | oe_imm16;
+ break;
+ case XORI:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case LUI: {
+ int32_t alu32_out = (oe_imm16 << 16);
+ // Sign-extend result of 32bit operation into 64bit register.
+ alu_out = static_cast<int64_t>(alu32_out);
+ }
+ break;
+ // ------------- Memory instructions.
+ case LB:
+ addr = rs + se_imm16;
+ alu_out = ReadB(addr);
+ break;
+ case LH:
+ addr = rs + se_imm16;
+ alu_out = ReadH(addr, instr);
+ break;
+ case LWL: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+ uint8_t byte_shift = kInt32AlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case LW:
+ addr = rs + se_imm16;
+ alu_out = ReadW(addr, instr);
+ break;
+ case LWU:
+ addr = rs + se_imm16;
+ alu_out = ReadWU(addr, instr);
+ break;
+ case LD:
+ addr = rs + se_imm16;
+ alu_out = Read2W(addr, instr);
+ break;
+ case LBU:
+ addr = rs + se_imm16;
+ alu_out = ReadBU(addr);
+ break;
+ case LHU:
+ addr = rs + se_imm16;
+ alu_out = ReadHU(addr, instr);
+ break;
+ case LWR: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+ uint8_t byte_shift = kInt32AlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case SB:
+ addr = rs + se_imm16;
+ break;
+ case SH:
+ addr = rs + se_imm16;
+ break;
+ case SWL: {
+ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+ uint8_t byte_shift = kInt32AlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
+ case SW:
+ case SD:
+ addr = rs + se_imm16;
+ break;
+ case SWR: {
+ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case LWC1:
+ addr = rs + se_imm16;
+ alu_out = ReadW(addr, instr);
+ break;
+ case LDC1:
+ addr = rs + se_imm16;
+ fp_out = ReadD(addr, instr);
+ break;
+ case SWC1:
+ case SDC1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // ---------- Raise exceptions triggered.
+ SignalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case BEQ:
+ case BNE:
+ case BLEZ:
+ case BGTZ:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + 2* Instruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case ADDI:
+ case DADDI:
+ case ADDIU:
+ case DADDIU:
+ case SLTI:
+ case SLTIU:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LUI:
+ set_register(rt_reg, alu_out);
+ TraceRegWr(alu_out);
+ break;
+ // ------------- Memory instructions.
+ case LB:
+ case LH:
+ case LWL:
+ case LW:
+ case LWU:
+ case LD:
+ case LBU:
+ case LHU:
+ case LWR:
+ set_register(rt_reg, alu_out);
+ break;
+ case SB:
+ WriteB(addr, static_cast<int8_t>(rt));
+ break;
+ case SH:
+ WriteH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case SWL:
+ WriteW(addr, mem_value, instr);
+ break;
+ case SW:
+ WriteW(addr, rt, instr);
+ break;
+ case SD:
+ Write2W(addr, rt, instr);
+ break;
+ case SWR:
+ WriteW(addr, mem_value, instr);
+ break;
+ case LWC1:
+ set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(ft_reg, static_cast<int32_t>(alu_out));
+ break;
+ case LDC1:
+ set_fpu_register_double(ft_reg, fp_out);
+ break;
+ case SWC1:
+ addr = rs + se_imm16;
+ WriteW(addr, get_fpu_register(ft_reg), instr);
+ break;
+ case SDC1:
+ addr = rs + se_imm16;
+ WriteD(addr, get_fpu_register_double(ft_reg), instr);
+ break;
+ default:
+ break;
+ }
+
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ Instruction* branch_delay_instr =
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra) {
+ set_pc(next_pc);
+ }
+}
+
+
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+void Simulator::DecodeTypeJump(Instruction* instr) {
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int32_t pc_high_bits = current_pc & 0xf0000000;
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ Instruction* branch_delay_instr =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(isolate_->simulator_i_cache(), instr);
+ }
+ pc_modified_ = false;
+
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ }
+
+ switch (instr->InstructionType()) {
+ case Instruction::kRegisterType:
+ DecodeTypeRegister(instr);
+ break;
+ case Instruction::kImmediateType:
+ DecodeTypeImmediate(instr);
+ break;
+ case Instruction::kJumpType:
+ DecodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(" 0x%08lx %-44s %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start(), trace_buf_.start());
+ }
+
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int64_t>(instr) +
+ Instruction::kInstrSize);
+ }
+}
+
+
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ InstructionDecode(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instuction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+
+void Simulator::CallInternal(byte* entry) {
+ // Prepare to execute the code at entry.
+ set_register(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = get_register(s0);
+ int64_t s1_val = get_register(s1);
+ int64_t s2_val = get_register(s2);
+ int64_t s3_val = get_register(s3);
+ int64_t s4_val = get_register(s4);
+ int64_t s5_val = get_register(s5);
+ int64_t s6_val = get_register(s6);
+ int64_t s7_val = get_register(s7);
+ int64_t gp_val = get_register(gp);
+ int64_t sp_val = get_register(sp);
+ int64_t fp_val = get_register(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ set_register(s0, callee_saved_value);
+ set_register(s1, callee_saved_value);
+ set_register(s2, callee_saved_value);
+ set_register(s3, callee_saved_value);
+ set_register(s4, callee_saved_value);
+ set_register(s5, callee_saved_value);
+ set_register(s6, callee_saved_value);
+ set_register(s7, callee_saved_value);
+ set_register(gp, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation.
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(s0));
+ CHECK_EQ(callee_saved_value, get_register(s1));
+ CHECK_EQ(callee_saved_value, get_register(s2));
+ CHECK_EQ(callee_saved_value, get_register(s3));
+ CHECK_EQ(callee_saved_value, get_register(s4));
+ CHECK_EQ(callee_saved_value, get_register(s5));
+ CHECK_EQ(callee_saved_value, get_register(s6));
+ CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(gp));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore callee-saved registers with the original value.
+ set_register(s0, s0_val);
+ set_register(s1, s1_val);
+ set_register(s2, s2_val);
+ set_register(s3, s3_val);
+ set_register(s4, s4_val);
+ set_register(s5, s5_val);
+ set_register(s6, s6_val);
+ set_register(s7, s7_val);
+ set_register(gp, gp_val);
+ set_register(sp, sp_val);
+ set_register(fp, fp_val);
+}
+
+
+int64_t Simulator::Call(byte* entry, int argument_count, ...) {
+ const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments.
+
+ // First four arguments passed in registers in both ABI's.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int64_t));
+ set_register(a1, va_arg(parameters, int64_t));
+ set_register(a2, va_arg(parameters, int64_t));
+ set_register(a3, va_arg(parameters, int64_t));
+
+ if (kMipsAbi == kN64) {
+ // Up to eight arguments passed in registers in N64 ABI.
+ // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
+ if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
+ if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
+ if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
+ if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
+ }
+
+ // Remaining arguments passed on stack.
+ int64_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int stack_args_count = (argument_count > kRegisterPassedArguments) ?
+ (argument_count - kRegisterPassedArguments) : 0;
+ int stack_args_size = stack_args_count * sizeof(int64_t) + kCArgsSlotsSize;
+ int64_t entry_stack = original_stack - stack_args_size;
+
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = kRegisterPassedArguments; i < argument_count; i++) {
+ int stack_index = i - kRegisterPassedArguments + kCArgSlotCount;
+ stack_argument[stack_index] = va_arg(parameters, int64_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ int64_t result = get_register(v0);
+ return result;
+}
+
+
+double Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (!IsMipsSoftFloatABI) {
+ const FPURegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+ set_fpu_register_double(f12, d0);
+ set_fpu_register_double(fparg2, d1);
+ } else {
+ int buffer[2];
+ ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
+ memcpy(buffer, &d0, sizeof(d0));
+ set_dw_register(a0, buffer);
+ memcpy(buffer, &d1, sizeof(d1));
+ set_dw_register(a2, buffer);
+ }
+ CallInternal(entry);
+ if (!IsMipsSoftFloatABI) {
+ return get_fpu_register_double(f0);
+ } else {
+ return get_double_from_register_pair(v0);
+ }
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ int64_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+
+#undef UNSUPPORTED
+
+} } // namespace v8::internal
+
+#endif // USE_SIMULATOR
+
+#endif // V8_TARGET_ARCH_MIPS64
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Declares a Simulator for MIPS instructions if we are not generating a native
+// MIPS binary. This Simulator allows us to run and debug MIPS code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a MIPS HW platform.
+
+#ifndef V8_MIPS_SIMULATOR_MIPS_H_
+#define V8_MIPS_SIMULATOR_MIPS_H_
+
+#include "src/allocation.h"
+#include "src/mips64/constants-mips64.h"
+
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native mips platform.
+
+namespace v8 {
+namespace internal {
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ entry(p0, p1, p2, p3, p4)
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth (or ninth) argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#ifdef MIPS_ABI_N64
+typedef int (*mips_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+#else // O32 Abi.
+
+typedef int (*mips_regexp_matcher)(String* input,
+ int32_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ void* return_address,
+ int* output,
+ int32_t output_size,
+ Address stack_base,
+ int32_t direct_call,
+ Isolate* isolate);
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+
+#endif // MIPS_ABI_N64
+
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on mips uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
+};
+
+} } // namespace v8::internal
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarantee that the
+// running thread has its stack in all memory up to address 0x00000000.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (reinterpret_cast<uintptr_t>(this) >= limit ? \
+ reinterpret_cast<uintptr_t>(this) - limit : 0)
+
+#else // !defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/assembler.h"
+#include "src/hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) {
+ return &data_[offset];
+ }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+class Simulator {
+ public:
+ friend class MipsDebugger;
+
+ // Registers are declared in order. See SMRL chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0, v1,
+ a0, a1, a2, a3, a4, a5, a6, a7,
+ t0, t1, t2, t3,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ // Generated code will always use doubles. So we will only use even registers.
+ enum FPURegister {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
+ f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+ f26, f27, f28, f29, f30, f31,
+ kNumFPURegisters
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int64_t value);
+ void set_register_word(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
+ int64_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ // Same for FPURegisters.
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
+ void set_fpu_register_double(int fpureg, double value);
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ uint32_t get_fpu_register_hi_word(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
+ double get_fpu_register_double(int fpureg) const;
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
+ bool set_fcsr_round64_error(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ // Call on program start.
+ static void Initialize(Isolate* isolate);
+
+ // V8 generally calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 7 parameters. This is a convenience function,
+ // which sets up the simulator state and grabs the result on return.
+ int64_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ double CallFP(byte* entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // ICache checking.
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint32_t ReadBU(int64_t addr);
+ inline int32_t ReadB(int64_t addr);
+ inline void WriteB(int64_t addr, uint8_t value);
+ inline void WriteB(int64_t addr, int8_t value);
+
+ inline uint16_t ReadHU(int64_t addr, Instruction* instr);
+ inline int16_t ReadH(int64_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
+
+ inline uint32_t ReadWU(int64_t addr, Instruction* instr);
+ inline int32_t ReadW(int64_t addr, Instruction* instr);
+ inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
+ inline int64_t Read2W(int64_t addr, Instruction* instr);
+ inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
+
+ inline double ReadD(int64_t addr, Instruction* instr);
+ inline void WriteD(int64_t addr, double value, Instruction* instr);
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+ DWORD
+ // DFLOAT - Floats may have printing issues due to paired lwc1's
+ };
+
+ void TraceRegWr(int64_t value);
+ void TraceMemWr(int64_t addr, int64_t value, TraceType t);
+ void TraceMemRd(int64_t addr, int64_t value);
+
+ // Operations depending on endianness.
+ // Get Double Higher / Lower word.
+ inline int32_t GetDoubleHIW(double* addr);
+ inline int32_t GetDoubleLOW(double* addr);
+ // Set Double Higher / Lower word.
+ inline int32_t SetDoubleHIW(double* addr);
+ inline int32_t SetDoubleLOW(double* addr);
+
+ // Executing is handled based on the instruction type.
+ void DecodeTypeRegister(Instruction* instr);
+
+ // Helper function for DecodeTypeRegister.
+ void ConfigureTypeRegister(Instruction* instr,
+ int64_t* alu_out,
+ int64_t* i64hilo,
+ uint64_t* u64hilo,
+ int64_t* next_pc,
+ int64_t* return_addr_reg,
+ bool* do_interrupt,
+ int64_t* result128H,
+ int64_t* result128L);
+
+ void DecodeTypeImmediate(Instruction* instr);
+ void DecodeTypeJump(Instruction* instr);
+
+ // Used for breakpoints and traps.
+ void SoftwareInterrupt(Instruction* instr);
+
+ // Stop helper functions.
+ bool IsWatchpoint(uint64_t code);
+ void PrintWatchpoint(uint64_t code);
+ void HandleStop(uint64_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint64_t code);
+ void EnableStop(uint64_t code);
+ void DisableStop(uint64_t code);
+ void IncreaseStopCounter(uint64_t code);
+ void PrintStopInfo(uint64_t code);
+
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void BranchDelayInstructionDecode(Instruction* instr) {
+ if (instr->InstructionBits() == nopInstr) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->IsForbiddenInBranchDelay()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Eror:Unexpected %i opcode in a branch delay slot.",
+ instr->OpcodeValue());
+ }
+ InstructionDecode(instr);
+ }
+
+ // ICache.
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void SignalExceptions();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ ExternalReference::Type type);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
+ void SetFpResult(const double& result);
+
+ void CallInternal(byte* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ // Allocate 1MB for stack.
+ size_t stack_size_;
+ char* stack_;
+ bool pc_modified_;
+ int64_t icount_;
+ int break_count_;
+ EmbeddedVector<char, 128> trace_buf_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ // Icache simulation.
+ v8::internal::HashMap* i_cache_;
+
+ v8::internal::Isolate* isolate_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+
+#ifdef MIPS_ABI_N64
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+#else // Must be O32 Abi.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+#endif // MIPS_ABI_N64
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // !defined(USE_SIMULATOR)
+#endif // V8_MIPS_SIMULATOR_MIPS_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register receiver,
+ Register name,
+ // Number of the cache entry, not scaled.
+ Register offset,
+ Register scratch,
+ Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
+ uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
+ uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ ASSERT(value_off_addr > key_off_addr);
+ ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+ ASSERT(map_off_addr > key_off_addr);
+ ASSERT((map_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ dsll(offset_scratch, offset, 1);
+ __ Daddu(offset_scratch, offset_scratch, offset);
+
+ // Calculate the base address of the entry.
+ __ li(base_addr, Operand(key_offset));
+ __ dsll(at, offset_scratch, kPointerSizeLog2);
+ __ Daddu(base_addr, base_addr, at);
+
+ // Check that the key in the entry matches the name.
+ __ ld(at, MemOperand(base_addr, 0));
+ __ Branch(&miss, ne, name, Operand(at));
+
+ // Check the map matches.
+ __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, at, Operand(scratch2));
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+ __ Branch(&miss, ne, flags_reg, Operand(flags));
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
+
+ // Check that receiver is a JSObject.
+ __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ Branch(miss_label, ne, map, Operand(tmp));
+
+ // Restore the temporarily used register.
+ __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ // ASSERT(sizeof(Entry) == 12);
+ // ASSERT(sizeof(Entry) == 3 * kPointerSize);
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
+ ASSERT(!extra2.is(receiver));
+ ASSERT(!extra2.is(name));
+ ASSERT(!extra2.is(scratch));
+ ASSERT(!extra2.is(extra));
+
+ // Check register validity.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Daddu(scratch, scratch, at);
+ uint64_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ dsrl(scratch, scratch, kHeapObjectTagSize);
+ __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
+ __ And(scratch, scratch, Operand(mask));
+
+ // Probe the primary table.
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kPrimary,
+ receiver,
+ name,
+ scratch,
+ extra,
+ extra2,
+ extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ dsrl(at, name, kHeapObjectTagSize);
+ __ Dsubu(scratch, scratch, at);
+ uint64_t mask2 = kSecondaryTableSize - 1;
+ __ Daddu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
+ __ And(scratch, scratch, Operand(mask2));
+
+ // Probe the secondary table.
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kSecondary,
+ receiver,
+ name,
+ scratch,
+ extra,
+ extra2,
+ extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ ld(prototype,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ __ ld(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ __ ld(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ ld(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ ld(scratch, MemOperand(cp, offset));
+ __ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ ld(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ li(at, function);
+ __ Branch(miss, ne, at, Operand(scratch));
+
+ // Load its initial map. The global functions all have initial maps.
+ __ li(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
+ // Calculate the offset into the properties array.
+ offset = offset + FixedArray::kHeaderSize;
+ __ ld(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ src = dst;
+ }
+ __ ld(dst, FieldMemOperand(src, offset));
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Load length directly from the JS array.
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, scratch1);
+}
+
+
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ li(scratch, Operand(cell));
+ __ ld(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(miss, ne, scratch, Operand(at));
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in a0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ // a0 : value.
+ Label exit;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ li(scratch1, constant);
+ __ Branch(miss_label, ne, value_reg, Operand(scratch1));
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ Handle<Map> current;
+ if (!it.Done()) {
+ __ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ // Do the CompareMap() directly within the Branch() functions.
+ current = it.Current();
+ it.Advance();
+ if (it.Done()) {
+ __ Branch(miss_label, ne, scratch1, Operand(current));
+ break;
+ }
+ __ Branch(&do_store, eq, scratch1, Operand(current));
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ mtc1(scratch1, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ push(receiver_reg);
+ __ li(a2, Operand(transition));
+ __ Push(a2, a0);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3, 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ li(scratch1, Operand(transition));
+ __ sd(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ if (representation.IsDouble()) {
+ __ sd(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ sd(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ ld(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ sd(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ sd(value_reg, FieldMemOperand(scratch1, offset));
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ bind(&exit);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+}
+
+
+// Generate StoreField code, value is passed in a0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // a0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ FieldIndex index = lookup->GetFieldIndex();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ Handle<Map> current;
+ while (true) {
+ // Do the CompareMap() directly within the Branch() functions.
+ current = it.Current();
+ it.Advance();
+ if (it.Done()) {
+ __ Branch(miss_label, ne, scratch1, Operand(current));
+ break;
+ }
+ __ Branch(&do_store, eq, scratch1, Operand(current));
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ // Load the double storage.
+ if (index.is_inobject()) {
+ __ ld(scratch1, FieldMemOperand(receiver_reg, index.offset()));
+ } else {
+ __ ld(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ ld(scratch1, FieldMemOperand(scratch1, index.offset()));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ mtc1(scratch2, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index.is_inobject()) {
+ // Set the property straight into the object.
+ __ sd(value_reg, FieldMemOperand(receiver_reg, index.offset()));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ index.offset(),
+ name_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ // Get the properties array.
+ __ ld(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ sd(value_reg, FieldMemOperand(scratch1, index.offset()));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ index.offset(),
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ bind(&exit);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ li(this->name(), Operand(name));
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ li(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!receiver.is(scratch_in));
+ // Preparing to push, adjust sp.
+ __ Dsubu(sp, sp, Operand((argc + 1) * kPointerSize));
+ __ sd(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ sd(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
+ }
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = a0;
+ Register call_data = a4;
+ Register holder = a2;
+ Register api_function_address = a1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ li(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ li(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ li(call_data, api_call_info);
+ __ ld(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ li(call_data, call_data_obj);
+ }
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+ ExternalReference ref =
+ ExternalReference(&fun,
+ type,
+ masm->isolate());
+ __ li(api_function_address, Operand(ref));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ ld(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ Register map_reg = scratch1;
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ } else {
+ __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ li(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ Branch(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ Branch(&success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ ld(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ ld(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ Branch(&miss, ne, scratch2(), Operand(callback));
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ FieldIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ } else {
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ li(v0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ // Build AccessorInfo::args_ list on the stack and push property name below
+ // the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
+ __ push(receiver());
+ if (heap()->InNewSpace(callback->data())) {
+ __ li(scratch3(), callback);
+ __ ld(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
+ }
+ __ Dsubu(sp, sp, 6 * kPointerSize);
+ __ sd(scratch3(), MemOperand(sp, 5 * kPointerSize));
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ sd(scratch3(), MemOperand(sp, 4 * kPointerSize));
+ __ sd(scratch3(), MemOperand(sp, 3 * kPointerSize));
+ __ li(scratch4(),
+ Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(scratch4(), MemOperand(sp, 2 * kPointerSize));
+ __ sd(reg, MemOperand(sp, 1 * kPointerSize));
+ __ sd(name(), MemOperand(sp, 0 * kPointerSize));
+ __ Daddu(scratch2(), sp, 1 * kPointerSize);
+
+ __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
+ // Abi for CallApiGetter.
+ Register getter_address_reg = a2;
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ li(getter_address_reg, Operand(ref));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
+ }
+ // Leave the internal frame.
+ }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+
+ __ Push(receiver(), holder_reg); // Receiver.
+ __ li(at, Operand(callback)); // Callback info.
+ __ push(at);
+ __ li(at, Operand(name));
+ __ Push(at, value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ld(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(v0);
+
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain is still the same.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = { receiver, name, a3, a0, a4, a5 };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = { receiver, name, a3, a0, a4, a5 };
+ return registers;
+}
+
+
+Register StoreStubCompiler::value() {
+ return a0;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a3, a4, a5 };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a3, a4, a5 };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ld(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+
+ // Get the value from the cell.
+ __ li(a3, Operand(cell));
+ __ ld(a4, FieldMemOperand(a3, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&miss, eq, a4, Operand(at));
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a4);
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Branch(&miss, ne, this->name(), Operand(name));
+ }
+
+ Label number_case;
+ Register match = scratch1();
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
+
+ Register map_reg = scratch2();
+
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match.
+ // Separate compare from branch, to provide path for above JumpIfSmi().
+ __ Dsubu(match, map_reg, Operand(map));
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
+ eq, match, Operand(zero_reg));
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; ++i) {
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
+ scratch1(), Operand(receiver_maps->at(i)));
+ } else {
+ Label next_map;
+ __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+ __ li(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // The return address is in ra
+ Label slow, miss;
+
+ Register key = LoadIC::NameRegister();
+ Register receiver = LoadIC::ReceiverRegister();
+ ASSERT(receiver.is(a1));
+ ASSERT(key.is(a2));
+
+ __ UntagAndJumpIfNotSmi(a6, key, &miss);
+ __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ __ LoadFromNumberDictionary(&slow, a4, key, v0, a6, a3, a5);
+ __ Ret();
+
+ // Slow case, key and receiver still unmodified.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ bind(&miss);
+
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
// then we have to check that the strings are aligned before
// comparing them blockwise.
const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
- uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
+ uintptr_t pa_addr = reinterpret_cast<uintptr_t>(a);
+ uintptr_t pb_addr = reinterpret_cast<uintptr_t>(b);
if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
#endif
const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
#include "src/arm64/constants-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/constants-mips64.h" // NOLINT
#endif
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
+#elif V8_TARGET_ARCH_MIPS64
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::fp));
#endif
}
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_MIPS64
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
+ defined(V8_TARGET_ARCH_MIPS64)
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS or ARM
// with ool constant pool, and omitted on the other architectures because
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
if (!root->IsSmi() && root == heap_object) {
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
+ defined(V8_TARGET_ARCH_MIPS64)
if (from == kFromCode) {
// In order to avoid code bloat in the deserializer we don't have
// support for the encoding that specifies a particular root should
#include "src/arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/simulator-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/simulator-x87.h"
#else
'test-macro-assembler-mips.cc'
],
}],
+ ['v8_target_arch=="mips64el"', {
+ 'sources': [
+ 'test-assembler-mips64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-mips64.cc',
+ 'test-disasm-mips64.cc',
+ 'test-macro-assembler-mips64.cc'
+ ],
+ }],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'test-assembler-x87.cc',
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
}], # 'arch == mipsel or arch == mips'
+##############################################################################
+['arch == mips64el', {
+
+ # BUG(2657): Test sometimes times out on MIPS simulator.
+ 'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
+
+ # BUG(v8:3154).
+ 'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
+
+ # BUG(1075): Unresolved crashes on MIPS also.
+ 'test-serialize/Deserialize': [SKIP],
+ 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
+ 'test-serialize/DeserializeAndRunScript2': [SKIP],
+ 'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+}], # 'arch == mips64el'
+
##############################################################################
['arch == x87', {
} else {
uint64_t stored_bits = DoubleToBits(stored_number);
// Check if quiet nan (bits 51..62 all set).
-#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
+ !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
} else {
uint64_t stored_bits = DoubleToBits(stored_date);
// Check if quiet nan (bits 51..62 all set).
-#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
+ !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/macro-assembler-mips64.h"
+#include "src/mips64/simulator-mips64.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
+
+
+#define __ assm.
+
+
+TEST(MIPS0) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ // Addition.
+ __ addu(v0, a0, a1);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ int64_t res =
+ reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ ::printf("f() = %ld\n", res);
+ CHECK_EQ(0xabcL, res);
+}
+
+
+TEST(MIPS1) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ mov(a1, a0);
+ __ li(v0, 0);
+ __ b(&C);
+ __ nop();
+
+ __ bind(&L);
+ __ addu(v0, v0, a1);
+ __ addiu(a1, a1, -1);
+
+ __ bind(&C);
+ __ xori(v1, a1, 0);
+ __ Branch(&L, ne, v1, Operand((int64_t)0));
+ __ nop();
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ int64_t res =
+ reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
+ ::printf("f() = %ld\n", res);
+ CHECK_EQ(1275L, res);
+}
+
+
+TEST(MIPS2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ Label exit, error;
+
+ // ----- Test all instructions.
+
+ // Test lui, ori, and addiu, used in the li pseudo-instruction.
+ // This way we can then safely load registers with chosen values.
+
+ __ ori(a4, zero_reg, 0);
+ __ lui(a4, 0x1234);
+ __ ori(a4, a4, 0);
+ __ ori(a4, a4, 0x0f0f);
+ __ ori(a4, a4, 0xf0f0);
+ __ addiu(a5, a4, 1);
+ __ addiu(a6, a5, -0x10);
+
+ // Load values in temporary registers.
+ __ li(a4, 0x00000004);
+ __ li(a5, 0x00001234);
+ __ li(a6, 0x12345678);
+ __ li(a7, 0x7fffffff);
+ __ li(t0, 0xfffffffc);
+ __ li(t1, 0xffffedcc);
+ __ li(t2, 0xedcba988);
+ __ li(t3, 0x80000000);
+
+ // SPECIAL class.
+ __ srl(v0, a6, 8); // 0x00123456
+ __ sll(v0, v0, 11); // 0x91a2b000
+ __ sra(v0, v0, 3); // 0xf2345600
+ __ srav(v0, v0, a4); // 0xff234560
+ __ sllv(v0, v0, a4); // 0xf2345600
+ __ srlv(v0, v0, a4); // 0x0f234560
+ __ Branch(&error, ne, v0, Operand(0x0f234560));
+ __ nop();
+
+ __ addu(v0, a4, a5); // 0x00001238
+ __ subu(v0, v0, a4); // 0x00001234
+ __ Branch(&error, ne, v0, Operand(0x00001234));
+ __ nop();
+ __ addu(v1, a7, a4); // 32bit addu result is sign-extended into 64bit reg.
+ __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
+ __ nop();
+ __ subu(v1, t3, a4); // 0x7ffffffc
+ __ Branch(&error, ne, v1, Operand(0x7ffffffc));
+ __ nop();
+
+ __ and_(v0, a5, a6); // 0x0000000000001230
+ __ or_(v0, v0, a5); // 0x0000000000001234
+ __ xor_(v0, v0, a6); // 0x000000001234444c
+ __ nor(v0, v0, a6); // 0xffffffffedcba987
+ __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
+ __ nop();
+
+ // Shift both 32bit number to left, to preserve meaning of next comparison.
+ __ dsll32(a7, a7, 0);
+ __ dsll32(t3, t3, 0);
+
+ __ slt(v0, t3, a7);
+ __ Branch(&error, ne, v0, Operand(0x1));
+ __ nop();
+ __ sltu(v0, t3, a7);
+ __ Branch(&error, ne, v0, Operand(zero_reg));
+ __ nop();
+
+ // Restore original values in registers.
+ __ dsrl32(a7, a7, 0);
+ __ dsrl32(t3, t3, 0);
+ // End of SPECIAL class.
+
+ __ addiu(v0, zero_reg, 0x7421); // 0x00007421
+ __ addiu(v0, v0, -0x1); // 0x00007420
+ __ addiu(v0, v0, -0x20); // 0x00007400
+ __ Branch(&error, ne, v0, Operand(0x00007400));
+ __ nop();
+ __ addiu(v1, a7, 0x1); // 0x80000000 - result is sign-extended.
+ __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
+ __ nop();
+
+ __ slti(v0, a5, 0x00002000); // 0x1
+ __ slti(v0, v0, 0xffff8000); // 0x0
+ __ Branch(&error, ne, v0, Operand(zero_reg));
+ __ nop();
+ __ sltiu(v0, a5, 0x00002000); // 0x1
+ __ sltiu(v0, v0, 0x00008000); // 0x1
+ __ Branch(&error, ne, v0, Operand(0x1));
+ __ nop();
+
+ __ andi(v0, a5, 0xf0f0); // 0x00001030
+ __ ori(v0, v0, 0x8a00); // 0x00009a30
+ __ xori(v0, v0, 0x83cc); // 0x000019fc
+ __ Branch(&error, ne, v0, Operand(0x000019fc));
+ __ nop();
+ __ lui(v1, 0x8123); // Result is sign-extended into 64bit register.
+ __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
+ __ nop();
+
+ // Bit twiddling instructions & conditional moves.
+ // Uses a4-t3 as set above.
+ __ Clz(v0, a4); // 29
+ __ Clz(v1, a5); // 19
+ __ addu(v0, v0, v1); // 48
+ __ Clz(v1, a6); // 3
+ __ addu(v0, v0, v1); // 51
+ __ Clz(v1, t3); // 0
+ __ addu(v0, v0, v1); // 51
+ __ Branch(&error, ne, v0, Operand(51));
+ __ Movn(a0, a7, a4); // Move a0<-a7 (a4 is NOT 0).
+ __ Ins(a0, a5, 12, 8); // 0x7ff34fff
+ __ Branch(&error, ne, a0, Operand(0x7ff34fff));
+ __ Movz(a0, t2, t3); // a0 not updated (t3 is NOT 0).
+ __ Ext(a1, a0, 8, 12); // 0x34f
+ __ Branch(&error, ne, a1, Operand(0x34f));
+ __ Movz(a0, t2, v1); // a0<-t2, v0 is 0, from 8 instr back.
+ __ Branch(&error, ne, a0, Operand(t2));
+
+ // Everything was correctly executed. Load the expected result.
+ __ li(v0, 0x31415926);
+ __ b(&exit);
+ __ nop();
+
+ __ bind(&error);
+ // Got an error. Return a wrong result.
+ __ li(v0, 666);
+
+ __ bind(&exit);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ int64_t res =
+ reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ ::printf("f() = %ld\n", res);
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+
+TEST(MIPS3) {
+ // Test floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ double i;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles t.a ... t.f.
+ MacroAssembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ add_d(f8, f4, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
+
+ __ mov_d(f10, f8); // c
+ __ neg_d(f12, f6); // -b
+ __ sub_d(f10, f10, f12);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
+
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
+
+ __ li(a4, 120);
+ __ mtc1(a4, f14);
+ __ cvt_d_w(f14, f14); // f14 = 120.0.
+ __ mul_d(f10, f10, f14);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
+
+ __ div_d(f12, f10, f4);
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
+
+ __ sqrt_d(f14, f12);
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
+ // g = sqrt(f) = 10.97451593465515908537
+
+ if (kArchVariant == kMips64r2) {
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
+ __ madd_d(f14, f6, f4, f6);
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
+ }
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 0.0;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.h = 1.5;
+ t.i = 2.75;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(1.5e14, t.b);
+ CHECK_EQ(1.50275e14, t.c);
+ CHECK_EQ(1.50550e14, t.d);
+ CHECK_EQ(1.8066e16, t.e);
+ CHECK_EQ(120.44, t.f);
+ CHECK_EQ(10.97451593465515908537, t.g);
+ if (kArchVariant == kMips64r2) {
+ CHECK_EQ(6.875, t.h);
+ }
+}
+
+
+TEST(MIPS4) {
+ // Test moves between floating point and integer registers.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)) );
+
+ // Swap f4 and f5, by using 3 integer registers, a4-a6,
+ // both two 32-bit chunks, and one 64-bit chunk.
+ // mXhc1 is mips32/64-r2 only, not r1,
+ // but we will not support r1 in practice.
+ __ mfc1(a4, f4);
+ __ mfhc1(a5, f4);
+ __ dmfc1(a6, f5);
+
+ __ mtc1(a4, f5);
+ __ mthc1(a5, f5);
+ __ dmtc1(a6, f4);
+
+ // Store the swapped f4 and f5 back to memory.
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 1.5e22;
+ t.b = 2.75e11;
+ t.c = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(2.75e11, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1.5e22, t.c);
+}
+
+
+TEST(MIPS5) {
+ // Test conversions between doubles and integers.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ int i;
+ int j;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ // Load all structure elements to registers.
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, i)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, j)) );
+
+ // Convert double in f4 to int in element i.
+ __ cvt_w_d(f8, f4);
+ __ mfc1(a6, f8);
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, i)) );
+
+ // Convert double in f6 to int in element j.
+ __ cvt_w_d(f10, f6);
+ __ mfc1(a7, f10);
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, j)) );
+
+ // Convert int in original i (a4) to double in a.
+ __ mtc1(a4, f12);
+ __ cvt_d_w(f0, f12);
+ __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
+
+ // Convert int in original j (a5) to double in b.
+ __ mtc1(a5, f14);
+ __ cvt_d_w(f2, f14);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 1.5e4;
+ t.b = 2.75e8;
+ t.i = 12345678;
+ t.j = -100000;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(12345678.0, t.a);
+ CHECK_EQ(-100000.0, t.b);
+ CHECK_EQ(15000, t.i);
+ CHECK_EQ(275000000, t.j);
+}
+
+
+TEST(MIPS6) {
+ // Test simple memory loads and stores.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint32_t ui;
+ int32_t si;
+ int32_t r1;
+ int32_t r2;
+ int32_t r3;
+ int32_t r4;
+ int32_t r5;
+ int32_t r6;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ // Basic word load/store.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
+
+ // lh with positive data.
+ __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
+
+ // lh with negative data.
+ __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
+
+ // lhu with negative data.
+ __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
+
+ // lb with negative data.
+ __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
+
+ // sh writes only 1/2 of word.
+ __ lui(t1, 0x3333);
+ __ ori(t1, t1, 0x3333);
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+ __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.ui = 0x11223344;
+ t.si = 0x99aabbcc;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0x11223344, t.r1);
+ CHECK_EQ(0x3344, t.r2);
+ CHECK_EQ(0xffffbbcc, t.r3);
+ CHECK_EQ(0x0000bbcc, t.r4);
+ CHECK_EQ(0xffffffcc, t.r5);
+ CHECK_EQ(0x3333bbcc, t.r6);
+}
+
+
+TEST(MIPS7) {
+ // Test floating point compare and branch instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ int32_t result;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles t.a ... t.f.
+ MacroAssembler assm(isolate, NULL, 0);
+ Label neither_is_nan, less_than, outa_here;
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ c(UN, D, f4, f6);
+ __ bc1f(&neither_is_nan);
+ __ nop();
+ __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+ __ Branch(&outa_here);
+
+ __ bind(&neither_is_nan);
+
+ if (kArchVariant == kLoongson) {
+ __ c(OLT, D, f6, f4);
+ __ bc1t(&less_than);
+ } else {
+ __ c(OLT, D, f6, f4, 2);
+ __ bc1t(&less_than, 2);
+ }
+ __ nop();
+ __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+ __ Branch(&outa_here);
+
+ __ bind(&less_than);
+ __ Addu(a4, zero_reg, Operand(1));
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
+
+
+ // This test-case should have additional tests.
+
+ __ bind(&outa_here);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 2.0;
+ t.d = -4.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.result = 0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1, t.result);
+}
+
+
+TEST(MIPS8) {
+ // Test ROTR and ROTRV instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t input;
+ int32_t result_rotr_4;
+ int32_t result_rotr_8;
+ int32_t result_rotr_12;
+ int32_t result_rotr_16;
+ int32_t result_rotr_20;
+ int32_t result_rotr_24;
+ int32_t result_rotr_28;
+ int32_t result_rotrv_4;
+ int32_t result_rotrv_8;
+ int32_t result_rotrv_12;
+ int32_t result_rotrv_16;
+ int32_t result_rotrv_20;
+ int32_t result_rotrv_24;
+ int32_t result_rotrv_28;
+ } T;
+ T t;
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ // Basic word load.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
+
+ // ROTR instruction (called through the Ror macro).
+ __ Ror(a5, a4, 0x0004);
+ __ Ror(a6, a4, 0x0008);
+ __ Ror(a7, a4, 0x000c);
+ __ Ror(t0, a4, 0x0010);
+ __ Ror(t1, a4, 0x0014);
+ __ Ror(t2, a4, 0x0018);
+ __ Ror(t3, a4, 0x001c);
+
+ // Basic word store.
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
+
+ // ROTRV instruction (called through the Ror macro).
+ __ li(t3, 0x0004);
+ __ Ror(a5, a4, t3);
+ __ li(t3, 0x0008);
+ __ Ror(a6, a4, t3);
+ __ li(t3, 0x000C);
+ __ Ror(a7, a4, t3);
+ __ li(t3, 0x0010);
+ __ Ror(t0, a4, t3);
+ __ li(t3, 0x0014);
+ __ Ror(t1, a4, t3);
+ __ li(t3, 0x0018);
+ __ Ror(t2, a4, t3);
+ __ li(t3, 0x001C);
+ __ Ror(t3, a4, t3);
+
+ // Basic word store.
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.input = 0x12345678;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(0x81234567, t.result_rotr_4);
+ CHECK_EQ(0x78123456, t.result_rotr_8);
+ CHECK_EQ(0x67812345, t.result_rotr_12);
+ CHECK_EQ(0x56781234, t.result_rotr_16);
+ CHECK_EQ(0x45678123, t.result_rotr_20);
+ CHECK_EQ(0x34567812, t.result_rotr_24);
+ CHECK_EQ(0x23456781, t.result_rotr_28);
+
+ CHECK_EQ(0x81234567, t.result_rotrv_4);
+ CHECK_EQ(0x78123456, t.result_rotrv_8);
+ CHECK_EQ(0x67812345, t.result_rotrv_12);
+ CHECK_EQ(0x56781234, t.result_rotrv_16);
+ CHECK_EQ(0x45678123, t.result_rotrv_20);
+ CHECK_EQ(0x34567812, t.result_rotrv_24);
+ CHECK_EQ(0x23456781, t.result_rotrv_28);
+}
+
+
+TEST(MIPS9) {
+ // Test BRANCH improvements.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+ Label exit, exit2, exit3;
+
+ __ Branch(&exit, ge, a0, Operand(zero_reg));
+ __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
+ __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
+
+ __ bind(&exit);
+ __ bind(&exit2);
+ __ bind(&exit3);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+}
+
+
+TEST(MIPS10) {
+ // Test conversions between doubles and long integers.
+ // Test hos the long ints map to FP regs pairs.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double a_converted;
+ double b;
+ int32_t dbl_mant;
+ int32_t dbl_exp;
+ int32_t long_hi;
+ int32_t long_lo;
+ int64_t long_as_int64;
+ int32_t b_long_hi;
+ int32_t b_long_lo;
+ int64_t b_long_as_int64;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ if (kArchVariant == kMips64r2) {
+ // Rewritten for FR=1 FPU mode:
+ // - 32 FP regs of 64-bits each, no odd/even pairs.
+ // - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
+ // Load all structure elements to registers.
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
+
+ // Save the raw bits of the double.
+ __ mfc1(a4, f0);
+ __ mfhc1(a5, f0);
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
+
+ // Convert double in f0 to long, save hi/lo parts.
+ __ cvt_l_d(f0, f0);
+ __ mfc1(a4, f0); // f0 LS 32 bits of long.
+ __ mfhc1(a5, f0); // f0 MS 32 bits of long.
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, long_lo)));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, long_hi)));
+
+ // Combine the high/low ints, convert back to double.
+ __ dsll32(a6, a5, 0); // Move a5 to high bits of a6.
+ __ or_(a6, a6, a4);
+ __ dmtc1(a6, f1);
+ __ cvt_d_l(f1, f1);
+ __ sdc1(f1, MemOperand(a0, OFFSET_OF(T, a_converted)));
+
+
+ // Convert the b long integers to double b.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
+ __ mtc1(a4, f8); // f8 LS 32-bits.
+ __ mthc1(a5, f8); // f8 MS 32-bits.
+ __ cvt_d_l(f10, f8);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
+
+ // Convert double b back to long-int.
+ __ ldc1(f31, MemOperand(a0, OFFSET_OF(T, b)));
+ __ cvt_l_d(f31, f31);
+ __ dmfc1(a7, f31);
+ __ sd(a7, MemOperand(a0, OFFSET_OF(T, b_long_as_int64)));
+
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
+ t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
+ t.b_long_lo = 0x00ff00ff;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0x41DFFFFF, t.dbl_exp);
+ CHECK_EQ(0xFFC00000, t.dbl_mant);
+ CHECK_EQ(0, t.long_hi);
+ CHECK_EQ(0x7fffffff, t.long_lo);
+ CHECK_EQ(2.147483647e9, t.a_converted);
+
+ // 0xFF00FF00FF -> 1.095233372415e12.
+ CHECK_EQ(1.095233372415e12, t.b);
+ CHECK_EQ(0xFF00FF00FF, t.b_long_as_int64);
+ }
+}
+
+
+TEST(MIPS11) {
+ // Test LWL, LWR, SWL and SWR instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t reg_init;
+ int32_t mem_init;
+ int32_t lwl_0;
+ int32_t lwl_1;
+ int32_t lwl_2;
+ int32_t lwl_3;
+ int32_t lwr_0;
+ int32_t lwr_1;
+ int32_t lwr_2;
+ int32_t lwr_3;
+ int32_t swl_0;
+ int32_t swl_1;
+ int32_t swl_2;
+ int32_t swl_3;
+ int32_t swr_0;
+ int32_t swr_1;
+ int32_t swr_2;
+ int32_t swr_3;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+
+ // Test all combinations of LWL and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) );
+
+ // Test all combinations of LWR and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
+
+ // Test all combinations of SWL and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) );
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) );
+
+ // Test all combinations of SWR and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) );
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.reg_init = 0xaabbccdd;
+ t.mem_init = 0x11223344;
+
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0x44bbccdd, t.lwl_0);
+ CHECK_EQ(0x3344ccdd, t.lwl_1);
+ CHECK_EQ(0x223344dd, t.lwl_2);
+ CHECK_EQ(0x11223344, t.lwl_3);
+
+ CHECK_EQ(0x11223344, t.lwr_0);
+ CHECK_EQ(0xaa112233, t.lwr_1);
+ CHECK_EQ(0xaabb1122, t.lwr_2);
+ CHECK_EQ(0xaabbcc11, t.lwr_3);
+
+ CHECK_EQ(0x112233aa, t.swl_0);
+ CHECK_EQ(0x1122aabb, t.swl_1);
+ CHECK_EQ(0x11aabbcc, t.swl_2);
+ CHECK_EQ(0xaabbccdd, t.swl_3);
+
+ CHECK_EQ(0xaabbccdd, t.swr_0);
+ CHECK_EQ(0xbbccdd44, t.swr_1);
+ CHECK_EQ(0xccdd3344, t.swr_2);
+ CHECK_EQ(0xdd223344, t.swr_3);
+}
+
+
+TEST(MIPS12) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t x;
+ int32_t y;
+ int32_t y1;
+ int32_t y2;
+ int32_t y3;
+ int32_t y4;
+ } T;
+ T t;
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ mov(t2, fp); // Save frame pointer.
+ __ mov(fp, a0); // Access struct T by fp.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)) );
+
+ __ addu(a5, a4, a7);
+ __ subu(t0, a4, a7);
+ __ nop();
+ __ push(a4); // These instructions disappear after opt.
+ __ Pop();
+ __ addu(a4, a4, a4);
+ __ nop();
+ __ Pop(); // These instructions disappear after opt.
+ __ push(a7);
+ __ nop();
+ __ push(a7); // These instructions disappear after opt.
+ __ pop(a7);
+ __ nop();
+ __ push(a7);
+ __ pop(t0);
+ __ nop();
+ __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ nop();
+ __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ nop();
+ __ push(a5);
+ __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ pop(a5);
+ __ nop();
+ __ push(a5);
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ pop(a5);
+ __ nop();
+ __ push(a5);
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ pop(a6);
+ __ nop();
+ __ push(a6);
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ pop(a5);
+ __ nop();
+ __ push(a5);
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ pop(a7);
+ __ nop();
+
+ __ mov(fp, t2);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.x = 1;
+ t.y = 2;
+ t.y1 = 3;
+ t.y2 = 4;
+ t.y3 = 0XBABA;
+ t.y4 = 0xDEDA;
+
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(3, t.y1);
+}
+
+
+TEST(MIPS13) {
+ // Test Cvt_d_uw and Trunc_uw_d macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double cvt_big_out;
+ double cvt_small_out;
+ uint32_t trunc_big_out;
+ uint32_t trunc_small_out;
+ uint32_t cvt_big_in;
+ uint32_t cvt_small_in;
+ } T;
+ T t;
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
+ __ Cvt_d_uw(f10, a4, f22);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
+
+ __ Trunc_uw_d(f10, f10, f22);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
+
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
+ __ Cvt_d_uw(f8, a4, f22);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
+
+ __ Trunc_uw_d(f8, f8, f22);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ t.cvt_big_in = 0xFFFFFFFF;
+ t.cvt_small_in = 333;
+
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
+ CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
+
+ CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
+ CHECK_EQ(static_cast<int>(t.trunc_small_out),
+ static_cast<int>(t.cvt_small_in));
+}
+
+
+TEST(MIPS14) {
+ // Test round, floor, ceil, trunc, cvt.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+#define ROUND_STRUCT_ELEMENT(x) \
+ int32_t x##_up_out; \
+ int32_t x##_down_out; \
+ int32_t neg_##x##_up_out; \
+ int32_t neg_##x##_down_out; \
+ uint32_t x##_err1_out; \
+ uint32_t x##_err2_out; \
+ uint32_t x##_err3_out; \
+ uint32_t x##_err4_out; \
+ int32_t x##_invalid_result;
+
+ typedef struct {
+ double round_up_in;
+ double round_down_in;
+ double neg_round_up_in;
+ double neg_round_down_in;
+ double err1_in;
+ double err2_in;
+ double err3_in;
+ double err4_in;
+
+ ROUND_STRUCT_ELEMENT(round)
+ ROUND_STRUCT_ELEMENT(floor)
+ ROUND_STRUCT_ELEMENT(ceil)
+ ROUND_STRUCT_ELEMENT(trunc)
+ ROUND_STRUCT_ELEMENT(cvt)
+ } T;
+ T t;
+
+#undef ROUND_STRUCT_ELEMENT
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+#define RUN_ROUND_TEST(x) \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
+
+ RUN_ROUND_TEST(round)
+ RUN_ROUND_TEST(floor)
+ RUN_ROUND_TEST(ceil)
+ RUN_ROUND_TEST(trunc)
+ RUN_ROUND_TEST(cvt)
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ t.round_up_in = 123.51;
+ t.round_down_in = 123.49;
+ t.neg_round_up_in = -123.5;
+ t.neg_round_down_in = -123.49;
+ t.err1_in = 123.51;
+ t.err2_in = 1;
+ t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
+ t.err4_in = NAN;
+
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
+#define CHECK_ROUND_RESULT(type) \
+ CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
+ CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
+ CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
+ CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
+ CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
+
+ CHECK_ROUND_RESULT(round);
+ CHECK_ROUND_RESULT(floor);
+ CHECK_ROUND_RESULT(ceil);
+ CHECK_ROUND_RESULT(cvt);
+}
+
+
+TEST(MIPS15) {
+ // Test chaining of label usages within instructions (issue 1644).
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, NULL, 0);
+
+ Label target;
+ __ beq(v0, v1, &target);
+ __ nop();
+ __ bne(v0, v1, &target);
+ __ nop();
+ __ bind(&target);
+ __ nop();
+}
+
+
+// ----- mips64 tests -----------------------------------------------
+
+TEST(MIPS16) {
+ // Test 64-bit memory loads and stores.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int64_t r1;
+ int64_t r2;
+ int64_t r3;
+ int64_t r4;
+ int64_t r5;
+ int64_t r6;
+ uint32_t ui;
+ int32_t si;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ // Basic 32-bit word load/store, with un-signed data.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
+
+ // Check that the data got zero-extended into 64-bit a4.
+ __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)) );
+
+ // Basic 32-bit word load/store, with SIGNED data.
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)) );
+
+ // Check that the data got sign-extended into 64-bit a4.
+ __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)) );
+
+ // 32-bit UNSIGNED word load/store, with SIGNED data.
+ __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)) );
+
+ // Check that the data got zero-extended into 64-bit a4.
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)) );
+
+ // lh with positive data.
+ __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
+
+ // lh with negative data.
+ __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
+
+ // lhu with negative data.
+ __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
+
+ // lb with negative data.
+ __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
+
+ // // sh writes only 1/2 of word.
+ __ lui(t1, 0x3333);
+ __ ori(t1, t1, 0x3333);
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+ __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
+ __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.ui = 0x44332211;
+ t.si = 0x99aabbcc;
+ t.r1 = 0x1111111111111111;
+ t.r2 = 0x2222222222222222;
+ t.r3 = 0x3333333333333333;
+ t.r4 = 0x4444444444444444;
+ t.r5 = 0x5555555555555555;
+ t.r6 = 0x6666666666666666;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ // Unsigned data, 32 & 64.
+ CHECK_EQ(0x1111111144332211L, t.r1);
+ CHECK_EQ(0x0000000000002211L, t.r2);
+
+ // Signed data, 32 & 64.
+ CHECK_EQ(0x33333333ffffbbccL, t.r3);
+ CHECK_EQ(0xffffffff0000bbccL, t.r4);
+
+ // Signed data, 32 & 64.
+ CHECK_EQ(0x55555555ffffffccL, t.r5);
+ CHECK_EQ(0x000000003333bbccL, t.r6);
+}
+
+#undef __
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/base/platform/platform.h"
+#include "src/code-stubs.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/constants-mips64.h"
+#include "src/simulator.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/test-code-stubs.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg,
+ bool inline_fastpath) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
+ inline_fastpath);
+
+ byte* start = stub.GetCode()->instruction_start();
+ Label done;
+
+ // Save callee save registers.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // For softfp, move the input value into f12.
+ if (IsMipsSoftFloatABI) {
+ __ Move(f12, a0, a1);
+ }
+ // Push the double argument.
+ __ Dsubu(sp, sp, Operand(kDoubleSize));
+ __ sdc1(f12, MemOperand(sp));
+ __ Move(source_reg, sp);
+
+ // Save registers make sure they don't get clobbered.
+ int source_reg_offset = kDoubleSize;
+ int reg_num = 2;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ push(reg);
+ source_reg_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument.
+ __ Dsubu(sp, sp, Operand(kDoubleSize));
+ __ sdc1(f12, MemOperand(sp));
+
+ // Call through to the actual stub
+ if (inline_fastpath) {
+ __ ldc1(f12, MemOperand(source_reg));
+ __ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
+ if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
+ // Restore clobbered source_reg.
+ __ Daddu(source_reg, sp, Operand(source_reg_offset));
+ }
+ }
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+ __ bind(&done);
+
+ __ Daddu(sp, sp, Operand(kDoubleSize));
+
+ // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 2; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ lw(at, MemOperand(sp, 0));
+ __ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
+ __ Daddu(sp, sp, Operand(kPointerSize));
+ }
+ }
+
+ __ Daddu(sp, sp, Operand(kDoubleSize));
+
+ __ Move(v0, destination_reg);
+ Label ok;
+ __ Branch(&ok, eq, v0, Operand(zero_reg));
+ __ bind(&ok);
+
+ // Restore callee save registers.
+ __ MultiPop(kCalleeSaved | ra.bit());
+
+ Label ok1;
+ __ Branch(&ok1, eq, v0, Operand(zero_reg));
+ __ bind(&ok1);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ return (reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+ double from) {
+#ifdef USE_SIMULATOR
+ Simulator::current(Isolate::Current())->CallFP(FUNCTION_ADDR(func), from, 0.);
+ return Simulator::current(Isolate::Current())->get_register(v0.code());
+#else
+ return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {
+ sp, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
+ Register dest_registers[] = {
+ v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ false));
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ true));
+ }
+ }
+}
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ disasm.InstructionDecode(disasm_buffer, pc);
+
+ if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ fprintf(stderr,
+ "expected: \n"
+ "%s\n"
+ "disassembled: \n"
+ "%s\n\n",
+ compare_string, disasm_buffer.start());
+ return false;
+ }
+ return true;
+}
+
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
+ Assembler assm(isolate, buffer, 4*1024); \
+ bool failure = false;
+
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte *progcounter = &buffer[pc_offset]; \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+ }
+
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+if (failure) { \
+ V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
+ }
+
+
+TEST(Type0) {
+ SET_UP();
+
+ COMPARE(addu(a0, a1, a2),
+ "00a62021 addu a0, a1, a2");
+ COMPARE(daddu(a0, a1, a2),
+ "00a6202d daddu a0, a1, a2");
+ COMPARE(addu(a6, a7, t0),
+ "016c5021 addu a6, a7, t0");
+ COMPARE(daddu(a6, a7, t0),
+ "016c502d daddu a6, a7, t0");
+ COMPARE(addu(v0, v1, s0),
+ "00701021 addu v0, v1, s0");
+ COMPARE(daddu(v0, v1, s0),
+ "0070102d daddu v0, v1, s0");
+
+ COMPARE(subu(a0, a1, a2),
+ "00a62023 subu a0, a1, a2");
+ COMPARE(dsubu(a0, a1, a2),
+ "00a6202f dsubu a0, a1, a2");
+ COMPARE(subu(a6, a7, t0),
+ "016c5023 subu a6, a7, t0");
+ COMPARE(dsubu(a6, a7, t0),
+ "016c502f dsubu a6, a7, t0");
+ COMPARE(subu(v0, v1, s0),
+ "00701023 subu v0, v1, s0");
+ COMPARE(dsubu(v0, v1, s0),
+ "0070102f dsubu v0, v1, s0");
+
+ COMPARE(mult(a0, a1),
+ "00850018 mult a0, a1");
+ COMPARE(dmult(a0, a1),
+ "0085001c dmult a0, a1");
+ COMPARE(mult(a6, a7),
+ "014b0018 mult a6, a7");
+ COMPARE(dmult(a6, a7),
+ "014b001c dmult a6, a7");
+ COMPARE(mult(v0, v1),
+ "00430018 mult v0, v1");
+ COMPARE(dmult(v0, v1),
+ "0043001c dmult v0, v1");
+
+ COMPARE(multu(a0, a1),
+ "00850019 multu a0, a1");
+ COMPARE(dmultu(a0, a1),
+ "0085001d dmultu a0, a1");
+ COMPARE(multu(a6, a7),
+ "014b0019 multu a6, a7");
+ COMPARE(dmultu(a6, a7),
+ "014b001d dmultu a6, a7");
+ COMPARE(multu(v0, v1),
+ "00430019 multu v0, v1");
+ COMPARE(dmultu(v0, v1),
+ "0043001d dmultu v0, v1");
+
+ COMPARE(div(a0, a1),
+ "0085001a div a0, a1");
+ COMPARE(div(a6, a7),
+ "014b001a div a6, a7");
+ COMPARE(div(v0, v1),
+ "0043001a div v0, v1");
+ COMPARE(ddiv(a0, a1),
+ "0085001e ddiv a0, a1");
+ COMPARE(ddiv(a6, a7),
+ "014b001e ddiv a6, a7");
+ COMPARE(ddiv(v0, v1),
+ "0043001e ddiv v0, v1");
+
+ COMPARE(divu(a0, a1),
+ "0085001b divu a0, a1");
+ COMPARE(divu(a6, a7),
+ "014b001b divu a6, a7");
+ COMPARE(divu(v0, v1),
+ "0043001b divu v0, v1");
+ COMPARE(ddivu(a0, a1),
+ "0085001f ddivu a0, a1");
+ COMPARE(ddivu(a6, a7),
+ "014b001f ddivu a6, a7");
+ COMPARE(ddivu(v0, v1),
+ "0043001f ddivu v0, v1");
+
+ if (kArchVariant != kLoongson) {
+ COMPARE(mul(a0, a1, a2),
+ "70a62002 mul a0, a1, a2");
+ COMPARE(mul(a6, a7, t0),
+ "716c5002 mul a6, a7, t0");
+ COMPARE(mul(v0, v1, s0),
+ "70701002 mul v0, v1, s0");
+ }
+
+ COMPARE(addiu(a0, a1, 0x0),
+ "24a40000 addiu a0, a1, 0");
+ COMPARE(addiu(s0, s1, 32767),
+ "26307fff addiu s0, s1, 32767");
+ COMPARE(addiu(a6, a7, -32768),
+ "256a8000 addiu a6, a7, -32768");
+ COMPARE(addiu(v0, v1, -1),
+ "2462ffff addiu v0, v1, -1");
+ COMPARE(daddiu(a0, a1, 0x0),
+ "64a40000 daddiu a0, a1, 0");
+ COMPARE(daddiu(s0, s1, 32767),
+ "66307fff daddiu s0, s1, 32767");
+ COMPARE(daddiu(a6, a7, -32768),
+ "656a8000 daddiu a6, a7, -32768");
+ COMPARE(daddiu(v0, v1, -1),
+ "6462ffff daddiu v0, v1, -1");
+
+ COMPARE(and_(a0, a1, a2),
+ "00a62024 and a0, a1, a2");
+ COMPARE(and_(s0, s1, s2),
+ "02328024 and s0, s1, s2");
+ COMPARE(and_(a6, a7, t0),
+ "016c5024 and a6, a7, t0");
+ COMPARE(and_(v0, v1, a2),
+ "00661024 and v0, v1, a2");
+
+ COMPARE(or_(a0, a1, a2),
+ "00a62025 or a0, a1, a2");
+ COMPARE(or_(s0, s1, s2),
+ "02328025 or s0, s1, s2");
+ COMPARE(or_(a6, a7, t0),
+ "016c5025 or a6, a7, t0");
+ COMPARE(or_(v0, v1, a2),
+ "00661025 or v0, v1, a2");
+
+ COMPARE(xor_(a0, a1, a2),
+ "00a62026 xor a0, a1, a2");
+ COMPARE(xor_(s0, s1, s2),
+ "02328026 xor s0, s1, s2");
+ COMPARE(xor_(a6, a7, t0),
+ "016c5026 xor a6, a7, t0");
+ COMPARE(xor_(v0, v1, a2),
+ "00661026 xor v0, v1, a2");
+
+ COMPARE(nor(a0, a1, a2),
+ "00a62027 nor a0, a1, a2");
+ COMPARE(nor(s0, s1, s2),
+ "02328027 nor s0, s1, s2");
+ COMPARE(nor(a6, a7, t0),
+ "016c5027 nor a6, a7, t0");
+ COMPARE(nor(v0, v1, a2),
+ "00661027 nor v0, v1, a2");
+
+ COMPARE(andi(a0, a1, 0x1),
+ "30a40001 andi a0, a1, 0x1");
+ COMPARE(andi(v0, v1, 0xffff),
+ "3062ffff andi v0, v1, 0xffff");
+
+ COMPARE(ori(a0, a1, 0x1),
+ "34a40001 ori a0, a1, 0x1");
+ COMPARE(ori(v0, v1, 0xffff),
+ "3462ffff ori v0, v1, 0xffff");
+
+ COMPARE(xori(a0, a1, 0x1),
+ "38a40001 xori a0, a1, 0x1");
+ COMPARE(xori(v0, v1, 0xffff),
+ "3862ffff xori v0, v1, 0xffff");
+
+ COMPARE(lui(a0, 0x1),
+ "3c040001 lui a0, 0x1");
+ COMPARE(lui(v0, 0xffff),
+ "3c02ffff lui v0, 0xffff");
+
+ COMPARE(sll(a0, a1, 0),
+ "00052000 sll a0, a1, 0");
+ COMPARE(sll(s0, s1, 8),
+ "00118200 sll s0, s1, 8");
+ COMPARE(sll(a6, a7, 24),
+ "000b5600 sll a6, a7, 24");
+ COMPARE(sll(v0, v1, 31),
+ "000317c0 sll v0, v1, 31");
+ COMPARE(dsll(a0, a1, 0),
+ "00052038 dsll a0, a1, 0");
+ COMPARE(dsll(s0, s1, 8),
+ "00118238 dsll s0, s1, 8");
+ COMPARE(dsll(a6, a7, 24),
+ "000b5638 dsll a6, a7, 24");
+ COMPARE(dsll(v0, v1, 31),
+ "000317f8 dsll v0, v1, 31");
+
+ COMPARE(sllv(a0, a1, a2),
+ "00c52004 sllv a0, a1, a2");
+ COMPARE(sllv(s0, s1, s2),
+ "02518004 sllv s0, s1, s2");
+ COMPARE(sllv(a6, a7, t0),
+ "018b5004 sllv a6, a7, t0");
+ COMPARE(sllv(v0, v1, fp),
+ "03c31004 sllv v0, v1, fp");
+ COMPARE(dsllv(a0, a1, a2),
+ "00c52014 dsllv a0, a1, a2");
+ COMPARE(dsllv(s0, s1, s2),
+ "02518014 dsllv s0, s1, s2");
+ COMPARE(dsllv(a6, a7, t0),
+ "018b5014 dsllv a6, a7, t0");
+ COMPARE(dsllv(v0, v1, fp),
+ "03c31014 dsllv v0, v1, fp");
+
+ COMPARE(srl(a0, a1, 0),
+ "00052002 srl a0, a1, 0");
+ COMPARE(srl(s0, s1, 8),
+ "00118202 srl s0, s1, 8");
+ COMPARE(srl(a6, a7, 24),
+ "000b5602 srl a6, a7, 24");
+ COMPARE(srl(v0, v1, 31),
+ "000317c2 srl v0, v1, 31");
+ COMPARE(dsrl(a0, a1, 0),
+ "0005203a dsrl a0, a1, 0");
+ COMPARE(dsrl(s0, s1, 8),
+ "0011823a dsrl s0, s1, 8");
+ COMPARE(dsrl(a6, a7, 24),
+ "000b563a dsrl a6, a7, 24");
+ COMPARE(dsrl(v0, v1, 31),
+ "000317fa dsrl v0, v1, 31");
+
+ COMPARE(srlv(a0, a1, a2),
+ "00c52006 srlv a0, a1, a2");
+ COMPARE(srlv(s0, s1, s2),
+ "02518006 srlv s0, s1, s2");
+ COMPARE(srlv(a6, a7, t0),
+ "018b5006 srlv a6, a7, t0");
+ COMPARE(srlv(v0, v1, fp),
+ "03c31006 srlv v0, v1, fp");
+ COMPARE(dsrlv(a0, a1, a2),
+ "00c52016 dsrlv a0, a1, a2");
+ COMPARE(dsrlv(s0, s1, s2),
+ "02518016 dsrlv s0, s1, s2");
+ COMPARE(dsrlv(a6, a7, t0),
+ "018b5016 dsrlv a6, a7, t0");
+ COMPARE(dsrlv(v0, v1, fp),
+ "03c31016 dsrlv v0, v1, fp");
+
+ COMPARE(sra(a0, a1, 0),
+ "00052003 sra a0, a1, 0");
+ COMPARE(sra(s0, s1, 8),
+ "00118203 sra s0, s1, 8");
+ COMPARE(sra(a6, a7, 24),
+ "000b5603 sra a6, a7, 24");
+ COMPARE(sra(v0, v1, 31),
+ "000317c3 sra v0, v1, 31");
+ COMPARE(dsra(a0, a1, 0),
+ "0005203b dsra a0, a1, 0");
+ COMPARE(dsra(s0, s1, 8),
+ "0011823b dsra s0, s1, 8");
+ COMPARE(dsra(a6, a7, 24),
+ "000b563b dsra a6, a7, 24");
+ COMPARE(dsra(v0, v1, 31),
+ "000317fb dsra v0, v1, 31");
+
+ COMPARE(srav(a0, a1, a2),
+ "00c52007 srav a0, a1, a2");
+ COMPARE(srav(s0, s1, s2),
+ "02518007 srav s0, s1, s2");
+ COMPARE(srav(a6, a7, t0),
+ "018b5007 srav a6, a7, t0");
+ COMPARE(srav(v0, v1, fp),
+ "03c31007 srav v0, v1, fp");
+ COMPARE(dsrav(a0, a1, a2),
+ "00c52017 dsrav a0, a1, a2");
+ COMPARE(dsrav(s0, s1, s2),
+ "02518017 dsrav s0, s1, s2");
+ COMPARE(dsrav(a6, a7, t0),
+ "018b5017 dsrav a6, a7, t0");
+ COMPARE(dsrav(v0, v1, fp),
+ "03c31017 dsrav v0, v1, fp");
+
+ if (kArchVariant == kMips64r2) {
+ COMPARE(rotr(a0, a1, 0),
+ "00252002 rotr a0, a1, 0");
+ COMPARE(rotr(s0, s1, 8),
+ "00318202 rotr s0, s1, 8");
+ COMPARE(rotr(a6, a7, 24),
+ "002b5602 rotr a6, a7, 24");
+ COMPARE(rotr(v0, v1, 31),
+ "002317c2 rotr v0, v1, 31");
+ COMPARE(drotr(a0, a1, 0),
+ "0025203a drotr a0, a1, 0");
+ COMPARE(drotr(s0, s1, 8),
+ "0031823a drotr s0, s1, 8");
+ COMPARE(drotr(a6, a7, 24),
+ "002b563a drotr a6, a7, 24");
+ COMPARE(drotr(v0, v1, 31),
+ "002317fa drotr v0, v1, 31");
+
+ COMPARE(rotrv(a0, a1, a2),
+ "00c52046 rotrv a0, a1, a2");
+ COMPARE(rotrv(s0, s1, s2),
+ "02518046 rotrv s0, s1, s2");
+ COMPARE(rotrv(a6, a7, t0),
+ "018b5046 rotrv a6, a7, t0");
+ COMPARE(rotrv(v0, v1, fp),
+ "03c31046 rotrv v0, v1, fp");
+ COMPARE(drotrv(a0, a1, a2),
+ "00c52056 drotrv a0, a1, a2");
+ COMPARE(drotrv(s0, s1, s2),
+ "02518056 drotrv s0, s1, s2");
+ COMPARE(drotrv(a6, a7, t0),
+ "018b5056 drotrv a6, a7, t0");
+ COMPARE(drotrv(v0, v1, fp),
+ "03c31056 drotrv v0, v1, fp");
+ }
+
+ COMPARE(break_(0),
+ "0000000d break, code: 0x00000 (0)");
+ COMPARE(break_(261120),
+ "00ff000d break, code: 0x3fc00 (261120)");
+ COMPARE(break_(1047552),
+ "03ff000d break, code: 0xffc00 (1047552)");
+
+ COMPARE(tge(a0, a1, 0),
+ "00850030 tge a0, a1, code: 0x000");
+ COMPARE(tge(s0, s1, 1023),
+ "0211fff0 tge s0, s1, code: 0x3ff");
+ COMPARE(tgeu(a0, a1, 0),
+ "00850031 tgeu a0, a1, code: 0x000");
+ COMPARE(tgeu(s0, s1, 1023),
+ "0211fff1 tgeu s0, s1, code: 0x3ff");
+ COMPARE(tlt(a0, a1, 0),
+ "00850032 tlt a0, a1, code: 0x000");
+ COMPARE(tlt(s0, s1, 1023),
+ "0211fff2 tlt s0, s1, code: 0x3ff");
+ COMPARE(tltu(a0, a1, 0),
+ "00850033 tltu a0, a1, code: 0x000");
+ COMPARE(tltu(s0, s1, 1023),
+ "0211fff3 tltu s0, s1, code: 0x3ff");
+ COMPARE(teq(a0, a1, 0),
+ "00850034 teq a0, a1, code: 0x000");
+ COMPARE(teq(s0, s1, 1023),
+ "0211fff4 teq s0, s1, code: 0x3ff");
+ COMPARE(tne(a0, a1, 0),
+ "00850036 tne a0, a1, code: 0x000");
+ COMPARE(tne(s0, s1, 1023),
+ "0211fff6 tne s0, s1, code: 0x3ff");
+
+ COMPARE(mfhi(a0),
+ "00002010 mfhi a0");
+ COMPARE(mfhi(s2),
+ "00009010 mfhi s2");
+ COMPARE(mfhi(t0),
+ "00006010 mfhi t0");
+ COMPARE(mfhi(v1),
+ "00001810 mfhi v1");
+ COMPARE(mflo(a0),
+ "00002012 mflo a0");
+ COMPARE(mflo(s2),
+ "00009012 mflo s2");
+ COMPARE(mflo(t0),
+ "00006012 mflo t0");
+ COMPARE(mflo(v1),
+ "00001812 mflo v1");
+
+ COMPARE(slt(a0, a1, a2),
+ "00a6202a slt a0, a1, a2");
+ COMPARE(slt(s0, s1, s2),
+ "0232802a slt s0, s1, s2");
+ COMPARE(slt(a6, a7, t0),
+ "016c502a slt a6, a7, t0");
+ COMPARE(slt(v0, v1, a2),
+ "0066102a slt v0, v1, a2");
+ COMPARE(sltu(a0, a1, a2),
+ "00a6202b sltu a0, a1, a2");
+ COMPARE(sltu(s0, s1, s2),
+ "0232802b sltu s0, s1, s2");
+ COMPARE(sltu(a6, a7, t0),
+ "016c502b sltu a6, a7, t0");
+ COMPARE(sltu(v0, v1, a2),
+ "0066102b sltu v0, v1, a2");
+
+ COMPARE(slti(a0, a1, 0),
+ "28a40000 slti a0, a1, 0");
+ COMPARE(slti(s0, s1, 32767),
+ "2a307fff slti s0, s1, 32767");
+ COMPARE(slti(a6, a7, -32768),
+ "296a8000 slti a6, a7, -32768");
+ COMPARE(slti(v0, v1, -1),
+ "2862ffff slti v0, v1, -1");
+ COMPARE(sltiu(a0, a1, 0),
+ "2ca40000 sltiu a0, a1, 0");
+ COMPARE(sltiu(s0, s1, 32767),
+ "2e307fff sltiu s0, s1, 32767");
+ COMPARE(sltiu(a6, a7, -32768),
+ "2d6a8000 sltiu a6, a7, -32768");
+ COMPARE(sltiu(v0, v1, -1),
+ "2c62ffff sltiu v0, v1, -1");
+
+ if (kArchVariant != kLoongson) {
+ COMPARE(movz(a0, a1, a2),
+ "00a6200a movz a0, a1, a2");
+ COMPARE(movz(s0, s1, s2),
+ "0232800a movz s0, s1, s2");
+ COMPARE(movz(a6, a7, t0),
+ "016c500a movz a6, a7, t0");
+ COMPARE(movz(v0, v1, a2),
+ "0066100a movz v0, v1, a2");
+ COMPARE(movn(a0, a1, a2),
+ "00a6200b movn a0, a1, a2");
+ COMPARE(movn(s0, s1, s2),
+ "0232800b movn s0, s1, s2");
+ COMPARE(movn(a6, a7, t0),
+ "016c500b movn a6, a7, t0");
+ COMPARE(movn(v0, v1, a2),
+ "0066100b movn v0, v1, a2");
+
+ COMPARE(movt(a0, a1, 1),
+ "00a52001 movt a0, a1, 1");
+ COMPARE(movt(s0, s1, 2),
+ "02298001 movt s0, s1, 2");
+ COMPARE(movt(a6, a7, 3),
+ "016d5001 movt a6, a7, 3");
+ COMPARE(movt(v0, v1, 7),
+ "007d1001 movt v0, v1, 7");
+ COMPARE(movf(a0, a1, 0),
+ "00a02001 movf a0, a1, 0");
+ COMPARE(movf(s0, s1, 4),
+ "02308001 movf s0, s1, 4");
+ COMPARE(movf(a6, a7, 5),
+ "01745001 movf a6, a7, 5");
+ COMPARE(movf(v0, v1, 6),
+ "00781001 movf v0, v1, 6");
+
+ COMPARE(clz(a0, a1),
+ "70a42020 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "72f6b020 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "70621020 clz v0, v1");
+ }
+
+ if (kArchVariant == kMips64r2) {
+ COMPARE(ins_(a0, a1, 31, 1),
+ "7ca4ffc4 ins a0, a1, 31, 1");
+ COMPARE(ins_(s6, s7, 30, 2),
+ "7ef6ff84 ins s6, s7, 30, 2");
+ COMPARE(ins_(v0, v1, 0, 32),
+ "7c62f804 ins v0, v1, 0, 32");
+ COMPARE(ext_(a0, a1, 31, 1),
+ "7ca407c0 ext a0, a1, 31, 1");
+ COMPARE(ext_(s6, s7, 30, 2),
+ "7ef60f80 ext s6, s7, 30, 2");
+ COMPARE(ext_(v0, v1, 0, 32),
+ "7c62f800 ext v0, v1, 0, 32");
+ }
+
+ VERIFY_RUN();
+}
__ Pop(xzr, root);
__ Ret();
__ SetStackPointer(old_stack_pointer);
-#elif V8_TARGET_ARCH_MIPS
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
__ push(kRootRegister);
__ InitializeRootRegister();
- __ li(v0, Operand(0));
+ __ mov(v0, zero_reg);
__ li(t1, Operand(string.at(0)));
StringHelper::GenerateHashInit(masm, v0, t1);
for (int i = 1; i < string.length(); i++) {
__ Pop(xzr, root);
__ Ret();
__ SetStackPointer(old_stack_pointer);
-#elif V8_TARGET_ARCH_MIPS
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
__ push(kRootRegister);
__ InitializeRootRegister();
__ li(v0, Operand(key));
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Handle<Smi>::cast(value)->value());
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \
+ !defined(V8_TARGET_ARCH_MIPS64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = factory->NewNumberFromInt(Smi::kMinValue - 1);
CHECK(value->IsHeapNumber());
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/macro-assembler.h"
+#include "src/mips64/macro-assembler-mips64.h"
+#include "src/mips64/simulator-mips64.h"
+
+
+using namespace v8::internal;
+
+typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
+
+#define __ masm->
+
+
+static byte to_non_zero(int n) {
+ return static_cast<unsigned>(n) % 255 + 1;
+}
+
+
+static bool all_zeroes(const byte* beg, const byte* end) {
+ CHECK(beg);
+ CHECK(beg <= end);
+ while (beg < end) {
+ if (*beg++ != 0)
+ return false;
+ }
+ return true;
+}
+
+
+TEST(CopyBytes) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+
+ const int data_size = 1 * KB;
+ size_t act_size;
+
+ // Allocate two blocks to copy data between.
+ byte* src_buffer =
+ static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
+ CHECK(src_buffer);
+ CHECK(act_size >= static_cast<size_t>(data_size));
+ byte* dest_buffer =
+ static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
+ CHECK(dest_buffer);
+ CHECK(act_size >= static_cast<size_t>(data_size));
+
+ // Storage for a0 and a1.
+ byte* a0_;
+ byte* a1_;
+
+ MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler* masm = &assembler;
+
+ // Code to be generated: The stuff in CopyBytes followed by a store of a0 and
+ // a1, respectively.
+ __ CopyBytes(a0, a1, a2, a3);
+ __ li(a2, Operand(reinterpret_cast<int64_t>(&a0_)));
+ __ li(a3, Operand(reinterpret_cast<int64_t>(&a1_)));
+ __ sd(a0, MemOperand(a2));
+ __ jr(ra);
+ __ sd(a1, MemOperand(a3));
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ ::F f = FUNCTION_CAST< ::F>(code->entry());
+
+ // Initialise source data with non-zero bytes.
+ for (int i = 0; i < data_size; i++) {
+ src_buffer[i] = to_non_zero(i);
+ }
+
+ const int fuzz = 11;
+
+ for (int size = 0; size < 600; size++) {
+ for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
+ for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
+ memset(dest_buffer, 0, data_size);
+ CHECK(dest + size < dest_buffer + data_size);
+ (void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(src),
+ reinterpret_cast<int64_t>(dest),
+ size, 0, 0);
+ // a0 and a1 should point at the first byte after the copied data.
+ CHECK_EQ(src + size, a0_);
+ CHECK_EQ(dest + size, a1_);
+ // Check that we haven't written outside the target area.
+ CHECK(all_zeroes(dest_buffer, dest));
+ CHECK(all_zeroes(dest + size, dest_buffer + data_size));
+ // Check the target area.
+ CHECK_EQ(0, memcmp(src, dest, size));
+ }
+ }
+ }
+
+ // Check that the source data hasn't been clobbered.
+ for (int i = 0; i < data_size; i++) {
+ CHECK(src_buffer[i] == to_non_zero(i));
+ }
+}
+
+
+TEST(LoadConstants) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+
+ int64_t refConstants[64];
+ int64_t result[64];
+
+ int64_t mask = 1;
+ for (int i = 0; i < 64; i++) {
+ refConstants[i] = ~(mask << i);
+ }
+
+ MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler* masm = &assembler;
+
+ __ mov(a4, a0);
+ for (int i = 0; i < 64; i++) {
+ // Load constant.
+ __ li(a5, Operand(refConstants[i]));
+ __ sd(a5, MemOperand(a4));
+ __ Daddu(a4, a4, Operand(kPointerSize));
+ }
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ ::F f = FUNCTION_CAST< ::F>(code->entry());
+ (void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(result),
+ 0, 0, 0, 0);
+ // Check results.
+ for (int i = 0; i < 64; i++) {
+ CHECK(refConstants[i] == result[i]);
+ }
+}
+
+
+TEST(LoadAddress) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+
+ MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler* masm = &assembler;
+ Label to_jump, skip;
+ __ mov(a4, a0);
+
+ __ Branch(&skip);
+ __ bind(&to_jump);
+ __ nop();
+ __ nop();
+ __ jr(ra);
+ __ nop();
+ __ bind(&skip);
+ __ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD);
+ int check_size = masm->InstructionsGeneratedSince(&skip);
+ CHECK_EQ(check_size, 4);
+ __ jr(a4);
+ __ nop();
+ __ stop("invalid");
+ __ stop("invalid");
+ __ stop("invalid");
+ __ stop("invalid");
+ __ stop("invalid");
+
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ ::F f = FUNCTION_CAST< ::F>(code->entry());
+ (void) CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0);
+ // Check results.
+}
+
+#undef __
#include "src/mips/macro-assembler-mips.h"
#include "src/mips/regexp-macro-assembler-mips.h"
#endif
+#if V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64.h"
+#include "src/mips64/macro-assembler-mips64.h"
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+#endif
#if V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_MIPS64
+typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_X87
typedef RegExpMacroAssemblerX87 ArchRegExpMacroAssembler;
#endif
'array-constructor': [PASS, TIMEOUT],
# Very slow on ARM and MIPS, contains no architecture dependent code.
- 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', TIMEOUT]],
+ 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
##############################################################################
# This test expects to reach a certain recursion depth, which may not work
'math-floor-of-div-minus-zero': [SKIP],
}], # 'arch == mipsel or arch == mips'
+##############################################################################
+['arch == mips64el', {
+
+ # Slow tests which times out in debug mode.
+ 'try': [PASS, ['mode == debug', SKIP]],
+ 'debug-scripts-request': [PASS, ['mode == debug', SKIP]],
+ 'array-constructor': [PASS, ['mode == debug', SKIP]],
+
+ # Times out often in release mode on MIPS.
+ 'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
+ 'array-splice': [PASS, TIMEOUT],
+
+ # Long running test.
+ 'mirror-object': [PASS, TIMEOUT],
+ 'string-indexof-2': [PASS, TIMEOUT],
+
+ # BUG(3251035): Timeouts in long looping crankshaft optimization
+ # tests. Skipping because having them timeout takes too long on the
+ # buildbot.
+ 'compiler/alloc-number': [PASS, SLOW],
+ 'compiler/array-length': [PASS, SLOW],
+ 'compiler/assignment-deopt': [PASS, SLOW],
+ 'compiler/deopt-args': [PASS, SLOW],
+ 'compiler/inline-compare': [PASS, SLOW],
+ 'compiler/inline-global-access': [PASS, SLOW],
+ 'compiler/optimized-function-calls': [PASS, SLOW],
+ 'compiler/pic': [PASS, SLOW],
+ 'compiler/property-calls': [PASS, SLOW],
+ 'compiler/recursive-deopt': [PASS, SLOW],
+ 'compiler/regress-4': [PASS, SLOW],
+ 'compiler/regress-funcaller': [PASS, SLOW],
+ 'compiler/regress-rep-change': [PASS, SLOW],
+ 'compiler/regress-arguments': [PASS, SLOW],
+ 'compiler/regress-funarguments': [PASS, SLOW],
+ 'compiler/regress-3249650': [PASS, SLOW],
+ 'compiler/simple-deopt': [PASS, SLOW],
+ 'regress/regress-490': [PASS, SLOW],
+ 'regress/regress-634': [PASS, SLOW],
+ 'regress/regress-create-exception': [PASS, SLOW],
+ 'regress/regress-3218915': [PASS, SLOW],
+ 'regress/regress-3247124': [PASS, SLOW],
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Currently always deopt on minus zero
+ 'math-floor-of-div-minus-zero': [SKIP],
+}], # 'arch == mips64el'
+
+['arch == mips64el and simulator_run == False', {
+ # Random failures on HW, need investigation.
+ 'debug-*': [SKIP],
+}],
+
##############################################################################
# Native Client uses the ARM simulator so will behave similarly to arm
# on mjsunit tests.
}], # 'arch == arm64'
-['arch == mipsel', {
+['arch == mipsel or arch == mips64el', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
-}], # 'arch == mipsel'
+}], # 'arch == mipsel or arch == mips64el'
['arch == mips', {
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
-['arch == arm or arch == mipsel or arch == mips or arch == arm64', {
+['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
'../../src/mips/stub-cache-mips.cc',
],
}],
+ ['v8_target_arch=="mips64el"', {
+ 'sources': [ ### gcmole(arch:mips64el) ###
+ '../../src/mips64/assembler-mips64.cc',
+ '../../src/mips64/assembler-mips64.h',
+ '../../src/mips64/assembler-mips64-inl.h',
+ '../../src/mips64/builtins-mips64.cc',
+ '../../src/mips64/codegen-mips64.cc',
+ '../../src/mips64/codegen-mips64.h',
+ '../../src/mips64/code-stubs-mips64.cc',
+ '../../src/mips64/code-stubs-mips64.h',
+ '../../src/mips64/constants-mips64.cc',
+ '../../src/mips64/constants-mips64.h',
+ '../../src/mips64/cpu-mips64.cc',
+ '../../src/mips64/debug-mips64.cc',
+ '../../src/mips64/deoptimizer-mips64.cc',
+ '../../src/mips64/disasm-mips64.cc',
+ '../../src/mips64/frames-mips64.cc',
+ '../../src/mips64/frames-mips64.h',
+ '../../src/mips64/full-codegen-mips64.cc',
+ '../../src/mips64/ic-mips64.cc',
+ '../../src/mips64/lithium-codegen-mips64.cc',
+ '../../src/mips64/lithium-codegen-mips64.h',
+ '../../src/mips64/lithium-gap-resolver-mips64.cc',
+ '../../src/mips64/lithium-gap-resolver-mips64.h',
+ '../../src/mips64/lithium-mips64.cc',
+ '../../src/mips64/lithium-mips64.h',
+ '../../src/mips64/macro-assembler-mips64.cc',
+ '../../src/mips64/macro-assembler-mips64.h',
+ '../../src/mips64/regexp-macro-assembler-mips64.cc',
+ '../../src/mips64/regexp-macro-assembler-mips64.h',
+ '../../src/mips64/simulator-mips64.cc',
+ '../../src/mips64/stub-cache-mips64.cc',
+ ],
+ }],
['v8_target_arch=="x64" or v8_target_arch=="x32"', {
'sources': [ ### gcmole(arch:x64) ###
'../../src/x64/assembler-x64-inl.h',
"x87",
"mips",
"mipsel",
+ "mips64el",
"nacl_ia32",
"nacl_x64",
"x64",
"arm",
"mips",
"mipsel",
+ "mips64el",
"nacl_ia32",
"nacl_x64",
"x87",
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
- "arm", "arm64", "ia32", "mips", "mipsel", "x64", "x87", "nacl_ia32",
+ "arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
"nacl_x64", "macos", "windows", "linux"]:
VARIABLES[var] = var